1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "exec/exec-all.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/error-report.h" 31 #include "hw/qdev-properties.h" 32 #include "hw/core/qdev-prop-internal.h" 33 #include "migration/vmstate.h" 34 #include "fpu/softfloat-helpers.h" 35 #include "sysemu/device_tree.h" 36 #include "sysemu/kvm.h" 37 #include "sysemu/tcg.h" 38 #include "kvm/kvm_riscv.h" 39 #include "tcg/tcg-cpu.h" 40 #include "tcg/tcg.h" 41 42 /* RISC-V CPU definitions */ 43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH"; 44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 45 RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0}; 46 47 /* 48 * From vector_helper.c 49 * Note that vector data is stored in host-endian 64-bit chunks, 50 * so addressing bytes needs a host-endian fixup. 51 */ 52 #if HOST_BIG_ENDIAN 53 #define BYTE(x) ((x) ^ 7) 54 #else 55 #define BYTE(x) (x) 56 #endif 57 58 bool riscv_cpu_is_32bit(RISCVCPU *cpu) 59 { 60 return riscv_cpu_mxl(&cpu->env) == MXL_RV32; 61 } 62 63 /* Hash that stores general user set numeric options */ 64 static GHashTable *general_user_opts; 65 66 static void cpu_option_add_user_setting(const char *optname, uint32_t value) 67 { 68 g_hash_table_insert(general_user_opts, (gpointer)optname, 69 GUINT_TO_POINTER(value)); 70 } 71 72 bool riscv_cpu_option_set(const char *optname) 73 { 74 return g_hash_table_contains(general_user_opts, optname); 75 } 76 77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 78 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 79 80 /* 81 * Here are the ordering rules of extension naming defined by RISC-V 82 * specification : 83 * 1. All extensions should be separated from other multi-letter extensions 84 * by an underscore. 85 * 2. The first letter following the 'Z' conventionally indicates the most 86 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 87 * If multiple 'Z' extensions are named, they should be ordered first 88 * by category, then alphabetically within a category. 89 * 3. Standard supervisor-level extensions (starts with 'S') should be 90 * listed after standard unprivileged extensions. If multiple 91 * supervisor-level extensions are listed, they should be ordered 92 * alphabetically. 93 * 4. Non-standard extensions (starts with 'X') must be listed after all 94 * standard extensions. They must be separated from other multi-letter 95 * extensions by an underscore. 96 * 97 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 98 * instead. 99 */ 100 const RISCVIsaExtData isa_edata_arr[] = { 101 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 102 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop), 103 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 104 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 105 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 106 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 107 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 108 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 109 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 110 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 111 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 112 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), 113 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), 114 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), 115 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 116 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 117 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 118 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 119 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 120 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 121 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 122 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 123 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 124 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 125 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 126 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 127 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 128 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 129 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 130 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 131 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 132 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 133 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 134 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 135 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 136 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 137 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 138 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 139 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 140 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 141 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 142 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 143 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 144 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 145 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 146 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 147 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 148 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 149 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 150 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 151 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 152 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 153 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 154 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 155 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 156 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 157 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 158 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 159 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 160 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 161 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 162 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 163 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 164 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 165 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 166 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 167 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 168 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 169 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 170 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 171 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 172 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 173 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 174 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 175 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 176 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 177 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 178 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 179 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 180 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 181 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 182 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 183 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 184 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 185 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 186 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 187 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 188 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 189 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 190 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 191 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 192 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 193 194 DEFINE_PROP_END_OF_LIST(), 195 }; 196 197 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 198 { 199 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 200 201 return *ext_enabled; 202 } 203 204 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 205 { 206 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 207 208 *ext_enabled = en; 209 } 210 211 bool riscv_cpu_is_vendor(Object *cpu_obj) 212 { 213 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; 214 } 215 216 const char * const riscv_int_regnames[] = { 217 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 218 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 219 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 220 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 221 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 222 }; 223 224 const char * const riscv_int_regnamesh[] = { 225 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 226 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 227 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 228 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 229 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 230 "x30h/t5h", "x31h/t6h" 231 }; 232 233 const char * const riscv_fpr_regnames[] = { 234 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 235 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 236 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 237 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 238 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 239 "f30/ft10", "f31/ft11" 240 }; 241 242 const char * const riscv_rvv_regnames[] = { 243 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 244 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 245 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 246 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 247 "v28", "v29", "v30", "v31" 248 }; 249 250 static const char * const riscv_excp_names[] = { 251 "misaligned_fetch", 252 "fault_fetch", 253 "illegal_instruction", 254 "breakpoint", 255 "misaligned_load", 256 "fault_load", 257 "misaligned_store", 258 "fault_store", 259 "user_ecall", 260 "supervisor_ecall", 261 "hypervisor_ecall", 262 "machine_ecall", 263 "exec_page_fault", 264 "load_page_fault", 265 "reserved", 266 "store_page_fault", 267 "reserved", 268 "reserved", 269 "reserved", 270 "reserved", 271 "guest_exec_page_fault", 272 "guest_load_page_fault", 273 "reserved", 274 "guest_store_page_fault", 275 }; 276 277 static const char * const riscv_intr_names[] = { 278 "u_software", 279 "s_software", 280 "vs_software", 281 "m_software", 282 "u_timer", 283 "s_timer", 284 "vs_timer", 285 "m_timer", 286 "u_external", 287 "s_external", 288 "vs_external", 289 "m_external", 290 "reserved", 291 "reserved", 292 "reserved", 293 "reserved" 294 }; 295 296 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 297 { 298 if (async) { 299 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 300 riscv_intr_names[cause] : "(unknown)"; 301 } else { 302 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 303 riscv_excp_names[cause] : "(unknown)"; 304 } 305 } 306 307 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) 308 { 309 env->misa_ext_mask = env->misa_ext = ext; 310 } 311 312 int riscv_cpu_max_xlen(RISCVCPUClass *mcc) 313 { 314 return 16 << mcc->misa_mxl_max; 315 } 316 317 #ifndef CONFIG_USER_ONLY 318 static uint8_t satp_mode_from_str(const char *satp_mode_str) 319 { 320 if (!strncmp(satp_mode_str, "mbare", 5)) { 321 return VM_1_10_MBARE; 322 } 323 324 if (!strncmp(satp_mode_str, "sv32", 4)) { 325 return VM_1_10_SV32; 326 } 327 328 if (!strncmp(satp_mode_str, "sv39", 4)) { 329 return VM_1_10_SV39; 330 } 331 332 if (!strncmp(satp_mode_str, "sv48", 4)) { 333 return VM_1_10_SV48; 334 } 335 336 if (!strncmp(satp_mode_str, "sv57", 4)) { 337 return VM_1_10_SV57; 338 } 339 340 if (!strncmp(satp_mode_str, "sv64", 4)) { 341 return VM_1_10_SV64; 342 } 343 344 g_assert_not_reached(); 345 } 346 347 uint8_t satp_mode_max_from_map(uint32_t map) 348 { 349 /* 350 * 'map = 0' will make us return (31 - 32), which C will 351 * happily overflow to UINT_MAX. There's no good result to 352 * return if 'map = 0' (e.g. returning 0 will be ambiguous 353 * with the result for 'map = 1'). 354 * 355 * Assert out if map = 0. Callers will have to deal with 356 * it outside of this function. 357 */ 358 g_assert(map > 0); 359 360 /* map here has at least one bit set, so no problem with clz */ 361 return 31 - __builtin_clz(map); 362 } 363 364 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 365 { 366 if (is_32_bit) { 367 switch (satp_mode) { 368 case VM_1_10_SV32: 369 return "sv32"; 370 case VM_1_10_MBARE: 371 return "none"; 372 } 373 } else { 374 switch (satp_mode) { 375 case VM_1_10_SV64: 376 return "sv64"; 377 case VM_1_10_SV57: 378 return "sv57"; 379 case VM_1_10_SV48: 380 return "sv48"; 381 case VM_1_10_SV39: 382 return "sv39"; 383 case VM_1_10_MBARE: 384 return "none"; 385 } 386 } 387 388 g_assert_not_reached(); 389 } 390 391 static void set_satp_mode_max_supported(RISCVCPU *cpu, 392 uint8_t satp_mode) 393 { 394 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 395 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 396 397 for (int i = 0; i <= satp_mode; ++i) { 398 if (valid_vm[i]) { 399 cpu->cfg.satp_mode.supported |= (1 << i); 400 } 401 } 402 } 403 404 /* Set the satp mode to the max supported */ 405 static void set_satp_mode_default_map(RISCVCPU *cpu) 406 { 407 /* 408 * Bare CPUs do not default to the max available. 409 * Users must set a valid satp_mode in the command 410 * line. 411 */ 412 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { 413 warn_report("No satp mode set. Defaulting to 'bare'"); 414 cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE); 415 return; 416 } 417 418 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 419 } 420 #endif 421 422 static void riscv_any_cpu_init(Object *obj) 423 { 424 RISCVCPU *cpu = RISCV_CPU(obj); 425 CPURISCVState *env = &cpu->env; 426 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 427 428 #ifndef CONFIG_USER_ONLY 429 set_satp_mode_max_supported(RISCV_CPU(obj), 430 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 431 VM_1_10_SV32 : VM_1_10_SV57); 432 #endif 433 434 env->priv_ver = PRIV_VERSION_LATEST; 435 436 /* inherited from parent obj via riscv_cpu_init() */ 437 cpu->cfg.ext_zifencei = true; 438 cpu->cfg.ext_zicsr = true; 439 cpu->cfg.mmu = true; 440 cpu->cfg.pmp = true; 441 } 442 443 static void riscv_max_cpu_init(Object *obj) 444 { 445 RISCVCPU *cpu = RISCV_CPU(obj); 446 CPURISCVState *env = &cpu->env; 447 448 cpu->cfg.mmu = true; 449 cpu->cfg.pmp = true; 450 451 env->priv_ver = PRIV_VERSION_LATEST; 452 #ifndef CONFIG_USER_ONLY 453 #ifdef TARGET_RISCV32 454 set_satp_mode_max_supported(cpu, VM_1_10_SV32); 455 #else 456 set_satp_mode_max_supported(cpu, VM_1_10_SV57); 457 #endif 458 #endif 459 } 460 461 #if defined(TARGET_RISCV64) 462 static void rv64_base_cpu_init(Object *obj) 463 { 464 RISCVCPU *cpu = RISCV_CPU(obj); 465 CPURISCVState *env = &cpu->env; 466 467 cpu->cfg.mmu = true; 468 cpu->cfg.pmp = true; 469 470 /* Set latest version of privileged specification */ 471 env->priv_ver = PRIV_VERSION_LATEST; 472 #ifndef CONFIG_USER_ONLY 473 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 474 #endif 475 } 476 477 static void rv64_sifive_u_cpu_init(Object *obj) 478 { 479 RISCVCPU *cpu = RISCV_CPU(obj); 480 CPURISCVState *env = &cpu->env; 481 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 482 env->priv_ver = PRIV_VERSION_1_10_0; 483 #ifndef CONFIG_USER_ONLY 484 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 485 #endif 486 487 /* inherited from parent obj via riscv_cpu_init() */ 488 cpu->cfg.ext_zifencei = true; 489 cpu->cfg.ext_zicsr = true; 490 cpu->cfg.mmu = true; 491 cpu->cfg.pmp = true; 492 } 493 494 static void rv64_sifive_e_cpu_init(Object *obj) 495 { 496 CPURISCVState *env = &RISCV_CPU(obj)->env; 497 RISCVCPU *cpu = RISCV_CPU(obj); 498 499 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 500 env->priv_ver = PRIV_VERSION_1_10_0; 501 #ifndef CONFIG_USER_ONLY 502 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 503 #endif 504 505 /* inherited from parent obj via riscv_cpu_init() */ 506 cpu->cfg.ext_zifencei = true; 507 cpu->cfg.ext_zicsr = true; 508 cpu->cfg.pmp = true; 509 } 510 511 static void rv64_thead_c906_cpu_init(Object *obj) 512 { 513 CPURISCVState *env = &RISCV_CPU(obj)->env; 514 RISCVCPU *cpu = RISCV_CPU(obj); 515 516 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU); 517 env->priv_ver = PRIV_VERSION_1_11_0; 518 519 cpu->cfg.ext_zfa = true; 520 cpu->cfg.ext_zfh = true; 521 cpu->cfg.mmu = true; 522 cpu->cfg.ext_xtheadba = true; 523 cpu->cfg.ext_xtheadbb = true; 524 cpu->cfg.ext_xtheadbs = true; 525 cpu->cfg.ext_xtheadcmo = true; 526 cpu->cfg.ext_xtheadcondmov = true; 527 cpu->cfg.ext_xtheadfmemidx = true; 528 cpu->cfg.ext_xtheadmac = true; 529 cpu->cfg.ext_xtheadmemidx = true; 530 cpu->cfg.ext_xtheadmempair = true; 531 cpu->cfg.ext_xtheadsync = true; 532 533 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 534 #ifndef CONFIG_USER_ONLY 535 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 536 #endif 537 538 /* inherited from parent obj via riscv_cpu_init() */ 539 cpu->cfg.pmp = true; 540 } 541 542 static void rv64_veyron_v1_cpu_init(Object *obj) 543 { 544 CPURISCVState *env = &RISCV_CPU(obj)->env; 545 RISCVCPU *cpu = RISCV_CPU(obj); 546 547 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH); 548 env->priv_ver = PRIV_VERSION_1_12_0; 549 550 /* Enable ISA extensions */ 551 cpu->cfg.mmu = true; 552 cpu->cfg.ext_zifencei = true; 553 cpu->cfg.ext_zicsr = true; 554 cpu->cfg.pmp = true; 555 cpu->cfg.ext_zicbom = true; 556 cpu->cfg.cbom_blocksize = 64; 557 cpu->cfg.cboz_blocksize = 64; 558 cpu->cfg.ext_zicboz = true; 559 cpu->cfg.ext_smaia = true; 560 cpu->cfg.ext_ssaia = true; 561 cpu->cfg.ext_sscofpmf = true; 562 cpu->cfg.ext_sstc = true; 563 cpu->cfg.ext_svinval = true; 564 cpu->cfg.ext_svnapot = true; 565 cpu->cfg.ext_svpbmt = true; 566 cpu->cfg.ext_smstateen = true; 567 cpu->cfg.ext_zba = true; 568 cpu->cfg.ext_zbb = true; 569 cpu->cfg.ext_zbc = true; 570 cpu->cfg.ext_zbs = true; 571 cpu->cfg.ext_XVentanaCondOps = true; 572 573 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 574 cpu->cfg.marchid = VEYRON_V1_MARCHID; 575 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 576 577 #ifndef CONFIG_USER_ONLY 578 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 579 #endif 580 } 581 582 static void rv128_base_cpu_init(Object *obj) 583 { 584 RISCVCPU *cpu = RISCV_CPU(obj); 585 CPURISCVState *env = &cpu->env; 586 587 if (qemu_tcg_mttcg_enabled()) { 588 /* Missing 128-bit aligned atomics */ 589 error_report("128-bit RISC-V currently does not work with Multi " 590 "Threaded TCG. Please use: -accel tcg,thread=single"); 591 exit(EXIT_FAILURE); 592 } 593 594 cpu->cfg.mmu = true; 595 cpu->cfg.pmp = true; 596 597 /* Set latest version of privileged specification */ 598 env->priv_ver = PRIV_VERSION_LATEST; 599 #ifndef CONFIG_USER_ONLY 600 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 601 #endif 602 } 603 604 static void rv64i_bare_cpu_init(Object *obj) 605 { 606 CPURISCVState *env = &RISCV_CPU(obj)->env; 607 riscv_cpu_set_misa_ext(env, RVI); 608 609 /* Remove the defaults from the parent class */ 610 RISCV_CPU(obj)->cfg.ext_zicntr = false; 611 RISCV_CPU(obj)->cfg.ext_zihpm = false; 612 613 /* Set to QEMU's first supported priv version */ 614 env->priv_ver = PRIV_VERSION_1_10_0; 615 616 /* 617 * Support all available satp_mode settings. The default 618 * value will be set to MBARE if the user doesn't set 619 * satp_mode manually (see set_satp_mode_default()). 620 */ 621 #ifndef CONFIG_USER_ONLY 622 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV64); 623 #endif 624 } 625 #else 626 static void rv32_base_cpu_init(Object *obj) 627 { 628 RISCVCPU *cpu = RISCV_CPU(obj); 629 CPURISCVState *env = &cpu->env; 630 631 cpu->cfg.mmu = true; 632 cpu->cfg.pmp = true; 633 634 /* Set latest version of privileged specification */ 635 env->priv_ver = PRIV_VERSION_LATEST; 636 #ifndef CONFIG_USER_ONLY 637 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 638 #endif 639 } 640 641 static void rv32_sifive_u_cpu_init(Object *obj) 642 { 643 RISCVCPU *cpu = RISCV_CPU(obj); 644 CPURISCVState *env = &cpu->env; 645 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 646 env->priv_ver = PRIV_VERSION_1_10_0; 647 #ifndef CONFIG_USER_ONLY 648 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 649 #endif 650 651 /* inherited from parent obj via riscv_cpu_init() */ 652 cpu->cfg.ext_zifencei = true; 653 cpu->cfg.ext_zicsr = true; 654 cpu->cfg.mmu = true; 655 cpu->cfg.pmp = true; 656 } 657 658 static void rv32_sifive_e_cpu_init(Object *obj) 659 { 660 CPURISCVState *env = &RISCV_CPU(obj)->env; 661 RISCVCPU *cpu = RISCV_CPU(obj); 662 663 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 664 env->priv_ver = PRIV_VERSION_1_10_0; 665 #ifndef CONFIG_USER_ONLY 666 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 667 #endif 668 669 /* inherited from parent obj via riscv_cpu_init() */ 670 cpu->cfg.ext_zifencei = true; 671 cpu->cfg.ext_zicsr = true; 672 cpu->cfg.pmp = true; 673 } 674 675 static void rv32_ibex_cpu_init(Object *obj) 676 { 677 CPURISCVState *env = &RISCV_CPU(obj)->env; 678 RISCVCPU *cpu = RISCV_CPU(obj); 679 680 riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU); 681 env->priv_ver = PRIV_VERSION_1_12_0; 682 #ifndef CONFIG_USER_ONLY 683 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 684 #endif 685 /* inherited from parent obj via riscv_cpu_init() */ 686 cpu->cfg.ext_zifencei = true; 687 cpu->cfg.ext_zicsr = true; 688 cpu->cfg.pmp = true; 689 cpu->cfg.ext_smepmp = true; 690 } 691 692 static void rv32_imafcu_nommu_cpu_init(Object *obj) 693 { 694 CPURISCVState *env = &RISCV_CPU(obj)->env; 695 RISCVCPU *cpu = RISCV_CPU(obj); 696 697 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU); 698 env->priv_ver = PRIV_VERSION_1_10_0; 699 #ifndef CONFIG_USER_ONLY 700 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 701 #endif 702 703 /* inherited from parent obj via riscv_cpu_init() */ 704 cpu->cfg.ext_zifencei = true; 705 cpu->cfg.ext_zicsr = true; 706 cpu->cfg.pmp = true; 707 } 708 #endif 709 710 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 711 { 712 ObjectClass *oc; 713 char *typename; 714 char **cpuname; 715 716 cpuname = g_strsplit(cpu_model, ",", 1); 717 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 718 oc = object_class_by_name(typename); 719 g_strfreev(cpuname); 720 g_free(typename); 721 722 return oc; 723 } 724 725 char *riscv_cpu_get_name(RISCVCPU *cpu) 726 { 727 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 728 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 729 730 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 731 732 return cpu_model_from_type(typename); 733 } 734 735 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 736 { 737 RISCVCPU *cpu = RISCV_CPU(cs); 738 CPURISCVState *env = &cpu->env; 739 int i, j; 740 uint8_t *p; 741 742 #if !defined(CONFIG_USER_ONLY) 743 if (riscv_has_ext(env, RVH)) { 744 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 745 } 746 #endif 747 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 748 #ifndef CONFIG_USER_ONLY 749 { 750 static const int dump_csrs[] = { 751 CSR_MHARTID, 752 CSR_MSTATUS, 753 CSR_MSTATUSH, 754 /* 755 * CSR_SSTATUS is intentionally omitted here as its value 756 * can be figured out by looking at CSR_MSTATUS 757 */ 758 CSR_HSTATUS, 759 CSR_VSSTATUS, 760 CSR_MIP, 761 CSR_MIE, 762 CSR_MIDELEG, 763 CSR_HIDELEG, 764 CSR_MEDELEG, 765 CSR_HEDELEG, 766 CSR_MTVEC, 767 CSR_STVEC, 768 CSR_VSTVEC, 769 CSR_MEPC, 770 CSR_SEPC, 771 CSR_VSEPC, 772 CSR_MCAUSE, 773 CSR_SCAUSE, 774 CSR_VSCAUSE, 775 CSR_MTVAL, 776 CSR_STVAL, 777 CSR_HTVAL, 778 CSR_MTVAL2, 779 CSR_MSCRATCH, 780 CSR_SSCRATCH, 781 CSR_SATP, 782 CSR_MMTE, 783 CSR_UPMBASE, 784 CSR_UPMMASK, 785 CSR_SPMBASE, 786 CSR_SPMMASK, 787 CSR_MPMBASE, 788 CSR_MPMMASK, 789 }; 790 791 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 792 int csrno = dump_csrs[i]; 793 target_ulong val = 0; 794 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 795 796 /* 797 * Rely on the smode, hmode, etc, predicates within csr.c 798 * to do the filtering of the registers that are present. 799 */ 800 if (res == RISCV_EXCP_NONE) { 801 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 802 csr_ops[csrno].name, val); 803 } 804 } 805 } 806 #endif 807 808 for (i = 0; i < 32; i++) { 809 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 810 riscv_int_regnames[i], env->gpr[i]); 811 if ((i & 3) == 3) { 812 qemu_fprintf(f, "\n"); 813 } 814 } 815 if (flags & CPU_DUMP_FPU) { 816 for (i = 0; i < 32; i++) { 817 qemu_fprintf(f, " %-8s %016" PRIx64, 818 riscv_fpr_regnames[i], env->fpr[i]); 819 if ((i & 3) == 3) { 820 qemu_fprintf(f, "\n"); 821 } 822 } 823 } 824 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 825 static const int dump_rvv_csrs[] = { 826 CSR_VSTART, 827 CSR_VXSAT, 828 CSR_VXRM, 829 CSR_VCSR, 830 CSR_VL, 831 CSR_VTYPE, 832 CSR_VLENB, 833 }; 834 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 835 int csrno = dump_rvv_csrs[i]; 836 target_ulong val = 0; 837 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 838 839 /* 840 * Rely on the smode, hmode, etc, predicates within csr.c 841 * to do the filtering of the registers that are present. 842 */ 843 if (res == RISCV_EXCP_NONE) { 844 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 845 csr_ops[csrno].name, val); 846 } 847 } 848 uint16_t vlenb = cpu->cfg.vlenb; 849 850 for (i = 0; i < 32; i++) { 851 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 852 p = (uint8_t *)env->vreg; 853 for (j = vlenb - 1 ; j >= 0; j--) { 854 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 855 } 856 qemu_fprintf(f, "\n"); 857 } 858 } 859 } 860 861 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 862 { 863 RISCVCPU *cpu = RISCV_CPU(cs); 864 CPURISCVState *env = &cpu->env; 865 866 if (env->xl == MXL_RV32) { 867 env->pc = (int32_t)value; 868 } else { 869 env->pc = value; 870 } 871 } 872 873 static vaddr riscv_cpu_get_pc(CPUState *cs) 874 { 875 RISCVCPU *cpu = RISCV_CPU(cs); 876 CPURISCVState *env = &cpu->env; 877 878 /* Match cpu_get_tb_cpu_state. */ 879 if (env->xl == MXL_RV32) { 880 return env->pc & UINT32_MAX; 881 } 882 return env->pc; 883 } 884 885 static bool riscv_cpu_has_work(CPUState *cs) 886 { 887 #ifndef CONFIG_USER_ONLY 888 RISCVCPU *cpu = RISCV_CPU(cs); 889 CPURISCVState *env = &cpu->env; 890 /* 891 * Definition of the WFI instruction requires it to ignore the privilege 892 * mode and delegation registers, but respect individual enables 893 */ 894 return riscv_cpu_all_pending(env) != 0 || 895 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 896 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 897 #else 898 return true; 899 #endif 900 } 901 902 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) 903 { 904 return riscv_env_mmu_index(cpu_env(cs), ifetch); 905 } 906 907 static void riscv_cpu_reset_hold(Object *obj) 908 { 909 #ifndef CONFIG_USER_ONLY 910 uint8_t iprio; 911 int i, irq, rdzero; 912 #endif 913 CPUState *cs = CPU(obj); 914 RISCVCPU *cpu = RISCV_CPU(cs); 915 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 916 CPURISCVState *env = &cpu->env; 917 918 if (mcc->parent_phases.hold) { 919 mcc->parent_phases.hold(obj); 920 } 921 #ifndef CONFIG_USER_ONLY 922 env->misa_mxl = mcc->misa_mxl_max; 923 env->priv = PRV_M; 924 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 925 if (env->misa_mxl > MXL_RV32) { 926 /* 927 * The reset status of SXL/UXL is undefined, but mstatus is WARL 928 * and we must ensure that the value after init is valid for read. 929 */ 930 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 931 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 932 if (riscv_has_ext(env, RVH)) { 933 env->vsstatus = set_field(env->vsstatus, 934 MSTATUS64_SXL, env->misa_mxl); 935 env->vsstatus = set_field(env->vsstatus, 936 MSTATUS64_UXL, env->misa_mxl); 937 env->mstatus_hs = set_field(env->mstatus_hs, 938 MSTATUS64_SXL, env->misa_mxl); 939 env->mstatus_hs = set_field(env->mstatus_hs, 940 MSTATUS64_UXL, env->misa_mxl); 941 } 942 } 943 env->mcause = 0; 944 env->miclaim = MIP_SGEIP; 945 env->pc = env->resetvec; 946 env->bins = 0; 947 env->two_stage_lookup = false; 948 949 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 950 (cpu->cfg.ext_svadu ? MENVCFG_ADUE : 0); 951 env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) | 952 (cpu->cfg.ext_svadu ? HENVCFG_ADUE : 0); 953 954 /* Initialized default priorities of local interrupts. */ 955 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 956 iprio = riscv_cpu_default_priority(i); 957 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 958 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 959 env->hviprio[i] = 0; 960 } 961 i = 0; 962 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 963 if (!rdzero) { 964 env->hviprio[irq] = env->miprio[irq]; 965 } 966 i++; 967 } 968 /* mmte is supposed to have pm.current hardwired to 1 */ 969 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 970 971 /* 972 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor 973 * extension is enabled. 974 */ 975 if (riscv_has_ext(env, RVH)) { 976 env->mideleg |= HS_MODE_INTERRUPTS; 977 } 978 979 /* 980 * Clear mseccfg and unlock all the PMP entries upon reset. 981 * This is allowed as per the priv and smepmp specifications 982 * and is needed to clear stale entries across reboots. 983 */ 984 if (riscv_cpu_cfg(env)->ext_smepmp) { 985 env->mseccfg = 0; 986 } 987 988 pmp_unlock_entries(env); 989 #endif 990 env->xl = riscv_cpu_mxl(env); 991 riscv_cpu_update_mask(env); 992 cs->exception_index = RISCV_EXCP_NONE; 993 env->load_res = -1; 994 set_default_nan_mode(1, &env->fp_status); 995 996 #ifndef CONFIG_USER_ONLY 997 if (cpu->cfg.debug) { 998 riscv_trigger_reset_hold(env); 999 } 1000 1001 if (kvm_enabled()) { 1002 kvm_riscv_reset_vcpu(cpu); 1003 } 1004 #endif 1005 } 1006 1007 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 1008 { 1009 RISCVCPU *cpu = RISCV_CPU(s); 1010 CPURISCVState *env = &cpu->env; 1011 info->target_info = &cpu->cfg; 1012 1013 switch (env->xl) { 1014 case MXL_RV32: 1015 info->print_insn = print_insn_riscv32; 1016 break; 1017 case MXL_RV64: 1018 info->print_insn = print_insn_riscv64; 1019 break; 1020 case MXL_RV128: 1021 info->print_insn = print_insn_riscv128; 1022 break; 1023 default: 1024 g_assert_not_reached(); 1025 } 1026 } 1027 1028 #ifndef CONFIG_USER_ONLY 1029 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1030 { 1031 bool rv32 = riscv_cpu_is_32bit(cpu); 1032 uint8_t satp_mode_map_max, satp_mode_supported_max; 1033 1034 /* The CPU wants the OS to decide which satp mode to use */ 1035 if (cpu->cfg.satp_mode.supported == 0) { 1036 return; 1037 } 1038 1039 satp_mode_supported_max = 1040 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1041 1042 if (cpu->cfg.satp_mode.map == 0) { 1043 if (cpu->cfg.satp_mode.init == 0) { 1044 /* If unset by the user, we fallback to the default satp mode. */ 1045 set_satp_mode_default_map(cpu); 1046 } else { 1047 /* 1048 * Find the lowest level that was disabled and then enable the 1049 * first valid level below which can be found in 1050 * valid_vm_1_10_32/64. 1051 */ 1052 for (int i = 1; i < 16; ++i) { 1053 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1054 (cpu->cfg.satp_mode.supported & (1 << i))) { 1055 for (int j = i - 1; j >= 0; --j) { 1056 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1057 cpu->cfg.satp_mode.map |= (1 << j); 1058 break; 1059 } 1060 } 1061 break; 1062 } 1063 } 1064 } 1065 } 1066 1067 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1068 1069 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1070 if (satp_mode_map_max > satp_mode_supported_max) { 1071 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1072 satp_mode_str(satp_mode_map_max, rv32), 1073 satp_mode_str(satp_mode_supported_max, rv32)); 1074 return; 1075 } 1076 1077 /* 1078 * Make sure the user did not ask for an invalid configuration as per 1079 * the specification. 1080 */ 1081 if (!rv32) { 1082 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1083 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1084 (cpu->cfg.satp_mode.init & (1 << i)) && 1085 (cpu->cfg.satp_mode.supported & (1 << i))) { 1086 error_setg(errp, "cannot disable %s satp mode if %s " 1087 "is enabled", satp_mode_str(i, false), 1088 satp_mode_str(satp_mode_map_max, false)); 1089 return; 1090 } 1091 } 1092 } 1093 1094 /* Finally expand the map so that all valid modes are set */ 1095 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1096 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1097 cpu->cfg.satp_mode.map |= (1 << i); 1098 } 1099 } 1100 } 1101 #endif 1102 1103 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1104 { 1105 Error *local_err = NULL; 1106 1107 #ifndef CONFIG_USER_ONLY 1108 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1109 if (local_err != NULL) { 1110 error_propagate(errp, local_err); 1111 return; 1112 } 1113 #endif 1114 1115 if (tcg_enabled()) { 1116 riscv_tcg_cpu_finalize_features(cpu, &local_err); 1117 if (local_err != NULL) { 1118 error_propagate(errp, local_err); 1119 return; 1120 } 1121 } else if (kvm_enabled()) { 1122 riscv_kvm_cpu_finalize_features(cpu, &local_err); 1123 if (local_err != NULL) { 1124 error_propagate(errp, local_err); 1125 return; 1126 } 1127 } 1128 } 1129 1130 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1131 { 1132 CPUState *cs = CPU(dev); 1133 RISCVCPU *cpu = RISCV_CPU(dev); 1134 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1135 Error *local_err = NULL; 1136 1137 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) { 1138 warn_report("The 'any' CPU is deprecated and will be " 1139 "removed in the future."); 1140 } 1141 1142 cpu_exec_realizefn(cs, &local_err); 1143 if (local_err != NULL) { 1144 error_propagate(errp, local_err); 1145 return; 1146 } 1147 1148 riscv_cpu_finalize_features(cpu, &local_err); 1149 if (local_err != NULL) { 1150 error_propagate(errp, local_err); 1151 return; 1152 } 1153 1154 riscv_cpu_register_gdb_regs_for_features(cs); 1155 1156 #ifndef CONFIG_USER_ONLY 1157 if (cpu->cfg.debug) { 1158 riscv_trigger_realize(&cpu->env); 1159 } 1160 #endif 1161 1162 qemu_init_vcpu(cs); 1163 cpu_reset(cs); 1164 1165 mcc->parent_realize(dev, errp); 1166 } 1167 1168 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 1169 { 1170 if (tcg_enabled()) { 1171 return riscv_cpu_tcg_compatible(cpu); 1172 } 1173 1174 return true; 1175 } 1176 1177 #ifndef CONFIG_USER_ONLY 1178 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1179 void *opaque, Error **errp) 1180 { 1181 RISCVSATPMap *satp_map = opaque; 1182 uint8_t satp = satp_mode_from_str(name); 1183 bool value; 1184 1185 value = satp_map->map & (1 << satp); 1186 1187 visit_type_bool(v, name, &value, errp); 1188 } 1189 1190 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1191 void *opaque, Error **errp) 1192 { 1193 RISCVSATPMap *satp_map = opaque; 1194 uint8_t satp = satp_mode_from_str(name); 1195 bool value; 1196 1197 if (!visit_type_bool(v, name, &value, errp)) { 1198 return; 1199 } 1200 1201 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1202 satp_map->init |= 1 << satp; 1203 } 1204 1205 void riscv_add_satp_mode_properties(Object *obj) 1206 { 1207 RISCVCPU *cpu = RISCV_CPU(obj); 1208 1209 if (cpu->env.misa_mxl == MXL_RV32) { 1210 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1211 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1212 } else { 1213 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1214 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1215 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1216 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1217 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1218 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1219 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1220 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1221 } 1222 } 1223 1224 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1225 { 1226 RISCVCPU *cpu = RISCV_CPU(opaque); 1227 CPURISCVState *env = &cpu->env; 1228 1229 if (irq < IRQ_LOCAL_MAX) { 1230 switch (irq) { 1231 case IRQ_U_SOFT: 1232 case IRQ_S_SOFT: 1233 case IRQ_VS_SOFT: 1234 case IRQ_M_SOFT: 1235 case IRQ_U_TIMER: 1236 case IRQ_S_TIMER: 1237 case IRQ_VS_TIMER: 1238 case IRQ_M_TIMER: 1239 case IRQ_U_EXT: 1240 case IRQ_VS_EXT: 1241 case IRQ_M_EXT: 1242 if (kvm_enabled()) { 1243 kvm_riscv_set_irq(cpu, irq, level); 1244 } else { 1245 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1246 } 1247 break; 1248 case IRQ_S_EXT: 1249 if (kvm_enabled()) { 1250 kvm_riscv_set_irq(cpu, irq, level); 1251 } else { 1252 env->external_seip = level; 1253 riscv_cpu_update_mip(env, 1 << irq, 1254 BOOL_TO_MASK(level | env->software_seip)); 1255 } 1256 break; 1257 default: 1258 g_assert_not_reached(); 1259 } 1260 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1261 /* Require H-extension for handling guest local interrupts */ 1262 if (!riscv_has_ext(env, RVH)) { 1263 g_assert_not_reached(); 1264 } 1265 1266 /* Compute bit position in HGEIP CSR */ 1267 irq = irq - IRQ_LOCAL_MAX + 1; 1268 if (env->geilen < irq) { 1269 g_assert_not_reached(); 1270 } 1271 1272 /* Update HGEIP CSR */ 1273 env->hgeip &= ~((target_ulong)1 << irq); 1274 if (level) { 1275 env->hgeip |= (target_ulong)1 << irq; 1276 } 1277 1278 /* Update mip.SGEIP bit */ 1279 riscv_cpu_update_mip(env, MIP_SGEIP, 1280 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1281 } else { 1282 g_assert_not_reached(); 1283 } 1284 } 1285 #endif /* CONFIG_USER_ONLY */ 1286 1287 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1288 { 1289 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1290 } 1291 1292 static void riscv_cpu_post_init(Object *obj) 1293 { 1294 accel_cpu_instance_init(CPU(obj)); 1295 } 1296 1297 static void riscv_cpu_init(Object *obj) 1298 { 1299 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1300 RISCVCPU *cpu = RISCV_CPU(obj); 1301 CPURISCVState *env = &cpu->env; 1302 1303 env->misa_mxl = mcc->misa_mxl_max; 1304 1305 #ifndef CONFIG_USER_ONLY 1306 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1307 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1308 #endif /* CONFIG_USER_ONLY */ 1309 1310 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal); 1311 1312 /* 1313 * The timer and performance counters extensions were supported 1314 * in QEMU before they were added as discrete extensions in the 1315 * ISA. To keep compatibility we'll always default them to 'true' 1316 * for all CPUs. Each accelerator will decide what to do when 1317 * users disable them. 1318 */ 1319 RISCV_CPU(obj)->cfg.ext_zicntr = true; 1320 RISCV_CPU(obj)->cfg.ext_zihpm = true; 1321 1322 /* Default values for non-bool cpu properties */ 1323 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); 1324 cpu->cfg.vlenb = 128 >> 3; 1325 cpu->cfg.elen = 64; 1326 cpu->cfg.cbom_blocksize = 64; 1327 cpu->cfg.cbop_blocksize = 64; 1328 cpu->cfg.cboz_blocksize = 64; 1329 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1330 } 1331 1332 typedef struct misa_ext_info { 1333 const char *name; 1334 const char *description; 1335 } MISAExtInfo; 1336 1337 #define MISA_INFO_IDX(_bit) \ 1338 __builtin_ctz(_bit) 1339 1340 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1341 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1342 1343 static const MISAExtInfo misa_ext_info_arr[] = { 1344 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1345 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1346 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1347 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1348 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1349 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1350 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1351 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1352 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1353 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1354 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), 1355 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1356 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1357 MISA_EXT_INFO(RVB, "x-b", "Bit manipulation (Zba_Zbb_Zbs)") 1358 }; 1359 1360 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) 1361 { 1362 CPUClass *cc = CPU_CLASS(mcc); 1363 1364 /* Validate that MISA_MXL is set properly. */ 1365 switch (mcc->misa_mxl_max) { 1366 #ifdef TARGET_RISCV64 1367 case MXL_RV64: 1368 case MXL_RV128: 1369 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1370 break; 1371 #endif 1372 case MXL_RV32: 1373 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1374 break; 1375 default: 1376 g_assert_not_reached(); 1377 } 1378 } 1379 1380 static int riscv_validate_misa_info_idx(uint32_t bit) 1381 { 1382 int idx; 1383 1384 /* 1385 * Our lowest valid input (RVA) is 1 and 1386 * __builtin_ctz() is UB with zero. 1387 */ 1388 g_assert(bit != 0); 1389 idx = MISA_INFO_IDX(bit); 1390 1391 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1392 return idx; 1393 } 1394 1395 const char *riscv_get_misa_ext_name(uint32_t bit) 1396 { 1397 int idx = riscv_validate_misa_info_idx(bit); 1398 const char *val = misa_ext_info_arr[idx].name; 1399 1400 g_assert(val != NULL); 1401 return val; 1402 } 1403 1404 const char *riscv_get_misa_ext_description(uint32_t bit) 1405 { 1406 int idx = riscv_validate_misa_info_idx(bit); 1407 const char *val = misa_ext_info_arr[idx].description; 1408 1409 g_assert(val != NULL); 1410 return val; 1411 } 1412 1413 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1414 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1415 .enabled = _defval} 1416 1417 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1418 /* Defaults for standard extensions */ 1419 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1420 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1421 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1422 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1423 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1424 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), 1425 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1426 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1427 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1428 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1429 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1430 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1431 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1432 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1433 1434 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1435 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1436 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1437 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1438 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1439 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1440 1441 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1442 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1443 1444 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1445 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1446 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1447 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1448 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1449 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1450 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1451 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1452 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1453 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1454 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1455 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1456 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1457 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1458 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1459 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1460 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1461 1462 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1463 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1464 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1465 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1466 1467 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1468 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true), 1469 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1470 1471 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1472 1473 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1474 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1475 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1476 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1477 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1478 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1479 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1480 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1481 1482 /* Vector cryptography extensions */ 1483 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1484 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1485 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkg, false), 1486 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1487 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1488 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1489 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1490 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1491 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1492 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1493 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1494 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1495 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1496 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1497 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1498 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1499 1500 DEFINE_PROP_END_OF_LIST(), 1501 }; 1502 1503 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1504 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1505 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1506 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1507 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1508 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1509 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1510 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1511 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1512 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1513 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1514 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1515 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1516 1517 DEFINE_PROP_END_OF_LIST(), 1518 }; 1519 1520 /* These are experimental so mark with 'x-' */ 1521 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1522 MULTI_EXT_CFG_BOOL("x-smaia", ext_smaia, false), 1523 MULTI_EXT_CFG_BOOL("x-ssaia", ext_ssaia, false), 1524 1525 MULTI_EXT_CFG_BOOL("x-zaamo", ext_zaamo, false), 1526 MULTI_EXT_CFG_BOOL("x-zalrsc", ext_zalrsc, false), 1527 1528 MULTI_EXT_CFG_BOOL("x-zvfh", ext_zvfh, false), 1529 MULTI_EXT_CFG_BOOL("x-zvfhmin", ext_zvfhmin, false), 1530 1531 MULTI_EXT_CFG_BOOL("x-zfbfmin", ext_zfbfmin, false), 1532 MULTI_EXT_CFG_BOOL("x-zvfbfmin", ext_zvfbfmin, false), 1533 MULTI_EXT_CFG_BOOL("x-zvfbfwma", ext_zvfbfwma, false), 1534 1535 DEFINE_PROP_END_OF_LIST(), 1536 }; 1537 1538 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { 1539 MULTI_EXT_CFG_BOOL("svade", svade, true), 1540 MULTI_EXT_CFG_BOOL("zic64b", zic64b, true), 1541 1542 DEFINE_PROP_END_OF_LIST(), 1543 }; 1544 1545 /* Deprecated entries marked for future removal */ 1546 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1547 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1548 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1549 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1550 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1551 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1552 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1553 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1554 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1555 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1556 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1557 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1558 1559 DEFINE_PROP_END_OF_LIST(), 1560 }; 1561 1562 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname, 1563 Error **errp) 1564 { 1565 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1566 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'", 1567 cpuname, propname); 1568 } 1569 1570 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1571 void *opaque, Error **errp) 1572 { 1573 RISCVCPU *cpu = RISCV_CPU(obj); 1574 uint8_t pmu_num, curr_pmu_num; 1575 uint32_t pmu_mask; 1576 1577 visit_type_uint8(v, name, &pmu_num, errp); 1578 1579 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask); 1580 1581 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) { 1582 cpu_set_prop_err(cpu, name, errp); 1583 error_append_hint(errp, "Current '%s' val: %u\n", 1584 name, curr_pmu_num); 1585 return; 1586 } 1587 1588 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1589 error_setg(errp, "Number of counters exceeds maximum available"); 1590 return; 1591 } 1592 1593 if (pmu_num == 0) { 1594 pmu_mask = 0; 1595 } else { 1596 pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1597 } 1598 1599 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1600 cpu->cfg.pmu_mask = pmu_mask; 1601 cpu_option_add_user_setting("pmu-mask", pmu_mask); 1602 } 1603 1604 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1605 void *opaque, Error **errp) 1606 { 1607 RISCVCPU *cpu = RISCV_CPU(obj); 1608 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1609 1610 visit_type_uint8(v, name, &pmu_num, errp); 1611 } 1612 1613 static const PropertyInfo prop_pmu_num = { 1614 .name = "pmu-num", 1615 .get = prop_pmu_num_get, 1616 .set = prop_pmu_num_set, 1617 }; 1618 1619 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name, 1620 void *opaque, Error **errp) 1621 { 1622 RISCVCPU *cpu = RISCV_CPU(obj); 1623 uint32_t value; 1624 uint8_t pmu_num; 1625 1626 visit_type_uint32(v, name, &value, errp); 1627 1628 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) { 1629 cpu_set_prop_err(cpu, name, errp); 1630 error_append_hint(errp, "Current '%s' val: %x\n", 1631 name, cpu->cfg.pmu_mask); 1632 return; 1633 } 1634 1635 pmu_num = ctpop32(value); 1636 1637 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1638 error_setg(errp, "Number of counters exceeds maximum available"); 1639 return; 1640 } 1641 1642 cpu_option_add_user_setting(name, value); 1643 cpu->cfg.pmu_mask = value; 1644 } 1645 1646 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name, 1647 void *opaque, Error **errp) 1648 { 1649 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask; 1650 1651 visit_type_uint8(v, name, &pmu_mask, errp); 1652 } 1653 1654 static const PropertyInfo prop_pmu_mask = { 1655 .name = "pmu-mask", 1656 .get = prop_pmu_mask_get, 1657 .set = prop_pmu_mask_set, 1658 }; 1659 1660 static void prop_mmu_set(Object *obj, Visitor *v, const char *name, 1661 void *opaque, Error **errp) 1662 { 1663 RISCVCPU *cpu = RISCV_CPU(obj); 1664 bool value; 1665 1666 visit_type_bool(v, name, &value, errp); 1667 1668 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) { 1669 cpu_set_prop_err(cpu, "mmu", errp); 1670 return; 1671 } 1672 1673 cpu_option_add_user_setting(name, value); 1674 cpu->cfg.mmu = value; 1675 } 1676 1677 static void prop_mmu_get(Object *obj, Visitor *v, const char *name, 1678 void *opaque, Error **errp) 1679 { 1680 bool value = RISCV_CPU(obj)->cfg.mmu; 1681 1682 visit_type_bool(v, name, &value, errp); 1683 } 1684 1685 static const PropertyInfo prop_mmu = { 1686 .name = "mmu", 1687 .get = prop_mmu_get, 1688 .set = prop_mmu_set, 1689 }; 1690 1691 static void prop_pmp_set(Object *obj, Visitor *v, const char *name, 1692 void *opaque, Error **errp) 1693 { 1694 RISCVCPU *cpu = RISCV_CPU(obj); 1695 bool value; 1696 1697 visit_type_bool(v, name, &value, errp); 1698 1699 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) { 1700 cpu_set_prop_err(cpu, name, errp); 1701 return; 1702 } 1703 1704 cpu_option_add_user_setting(name, value); 1705 cpu->cfg.pmp = value; 1706 } 1707 1708 static void prop_pmp_get(Object *obj, Visitor *v, const char *name, 1709 void *opaque, Error **errp) 1710 { 1711 bool value = RISCV_CPU(obj)->cfg.pmp; 1712 1713 visit_type_bool(v, name, &value, errp); 1714 } 1715 1716 static const PropertyInfo prop_pmp = { 1717 .name = "pmp", 1718 .get = prop_pmp_get, 1719 .set = prop_pmp_set, 1720 }; 1721 1722 static int priv_spec_from_str(const char *priv_spec_str) 1723 { 1724 int priv_version = -1; 1725 1726 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) { 1727 priv_version = PRIV_VERSION_1_12_0; 1728 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) { 1729 priv_version = PRIV_VERSION_1_11_0; 1730 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) { 1731 priv_version = PRIV_VERSION_1_10_0; 1732 } 1733 1734 return priv_version; 1735 } 1736 1737 static const char *priv_spec_to_str(int priv_version) 1738 { 1739 switch (priv_version) { 1740 case PRIV_VERSION_1_10_0: 1741 return PRIV_VER_1_10_0_STR; 1742 case PRIV_VERSION_1_11_0: 1743 return PRIV_VER_1_11_0_STR; 1744 case PRIV_VERSION_1_12_0: 1745 return PRIV_VER_1_12_0_STR; 1746 default: 1747 return NULL; 1748 } 1749 } 1750 1751 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name, 1752 void *opaque, Error **errp) 1753 { 1754 RISCVCPU *cpu = RISCV_CPU(obj); 1755 g_autofree char *value = NULL; 1756 int priv_version = -1; 1757 1758 visit_type_str(v, name, &value, errp); 1759 1760 priv_version = priv_spec_from_str(value); 1761 if (priv_version < 0) { 1762 error_setg(errp, "Unsupported privilege spec version '%s'", value); 1763 return; 1764 } 1765 1766 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) { 1767 cpu_set_prop_err(cpu, name, errp); 1768 error_append_hint(errp, "Current '%s' val: %s\n", name, 1769 object_property_get_str(obj, name, NULL)); 1770 return; 1771 } 1772 1773 cpu_option_add_user_setting(name, priv_version); 1774 cpu->env.priv_ver = priv_version; 1775 } 1776 1777 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name, 1778 void *opaque, Error **errp) 1779 { 1780 RISCVCPU *cpu = RISCV_CPU(obj); 1781 const char *value = priv_spec_to_str(cpu->env.priv_ver); 1782 1783 visit_type_str(v, name, (char **)&value, errp); 1784 } 1785 1786 static const PropertyInfo prop_priv_spec = { 1787 .name = "priv_spec", 1788 .get = prop_priv_spec_get, 1789 .set = prop_priv_spec_set, 1790 }; 1791 1792 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name, 1793 void *opaque, Error **errp) 1794 { 1795 RISCVCPU *cpu = RISCV_CPU(obj); 1796 g_autofree char *value = NULL; 1797 1798 visit_type_str(v, name, &value, errp); 1799 1800 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) { 1801 error_setg(errp, "Unsupported vector spec version '%s'", value); 1802 return; 1803 } 1804 1805 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0); 1806 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1807 } 1808 1809 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name, 1810 void *opaque, Error **errp) 1811 { 1812 const char *value = VEXT_VER_1_00_0_STR; 1813 1814 visit_type_str(v, name, (char **)&value, errp); 1815 } 1816 1817 static const PropertyInfo prop_vext_spec = { 1818 .name = "vext_spec", 1819 .get = prop_vext_spec_get, 1820 .set = prop_vext_spec_set, 1821 }; 1822 1823 static void prop_vlen_set(Object *obj, Visitor *v, const char *name, 1824 void *opaque, Error **errp) 1825 { 1826 RISCVCPU *cpu = RISCV_CPU(obj); 1827 uint16_t value; 1828 1829 if (!visit_type_uint16(v, name, &value, errp)) { 1830 return; 1831 } 1832 1833 if (!is_power_of_2(value)) { 1834 error_setg(errp, "Vector extension VLEN must be power of 2"); 1835 return; 1836 } 1837 1838 if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) { 1839 cpu_set_prop_err(cpu, name, errp); 1840 error_append_hint(errp, "Current '%s' val: %u\n", 1841 name, cpu->cfg.vlenb << 3); 1842 return; 1843 } 1844 1845 cpu_option_add_user_setting(name, value); 1846 cpu->cfg.vlenb = value >> 3; 1847 } 1848 1849 static void prop_vlen_get(Object *obj, Visitor *v, const char *name, 1850 void *opaque, Error **errp) 1851 { 1852 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3; 1853 1854 visit_type_uint16(v, name, &value, errp); 1855 } 1856 1857 static const PropertyInfo prop_vlen = { 1858 .name = "vlen", 1859 .get = prop_vlen_get, 1860 .set = prop_vlen_set, 1861 }; 1862 1863 static void prop_elen_set(Object *obj, Visitor *v, const char *name, 1864 void *opaque, Error **errp) 1865 { 1866 RISCVCPU *cpu = RISCV_CPU(obj); 1867 uint16_t value; 1868 1869 if (!visit_type_uint16(v, name, &value, errp)) { 1870 return; 1871 } 1872 1873 if (!is_power_of_2(value)) { 1874 error_setg(errp, "Vector extension ELEN must be power of 2"); 1875 return; 1876 } 1877 1878 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) { 1879 cpu_set_prop_err(cpu, name, errp); 1880 error_append_hint(errp, "Current '%s' val: %u\n", 1881 name, cpu->cfg.elen); 1882 return; 1883 } 1884 1885 cpu_option_add_user_setting(name, value); 1886 cpu->cfg.elen = value; 1887 } 1888 1889 static void prop_elen_get(Object *obj, Visitor *v, const char *name, 1890 void *opaque, Error **errp) 1891 { 1892 uint16_t value = RISCV_CPU(obj)->cfg.elen; 1893 1894 visit_type_uint16(v, name, &value, errp); 1895 } 1896 1897 static const PropertyInfo prop_elen = { 1898 .name = "elen", 1899 .get = prop_elen_get, 1900 .set = prop_elen_set, 1901 }; 1902 1903 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name, 1904 void *opaque, Error **errp) 1905 { 1906 RISCVCPU *cpu = RISCV_CPU(obj); 1907 uint16_t value; 1908 1909 if (!visit_type_uint16(v, name, &value, errp)) { 1910 return; 1911 } 1912 1913 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) { 1914 cpu_set_prop_err(cpu, name, errp); 1915 error_append_hint(errp, "Current '%s' val: %u\n", 1916 name, cpu->cfg.cbom_blocksize); 1917 return; 1918 } 1919 1920 cpu_option_add_user_setting(name, value); 1921 cpu->cfg.cbom_blocksize = value; 1922 } 1923 1924 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name, 1925 void *opaque, Error **errp) 1926 { 1927 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize; 1928 1929 visit_type_uint16(v, name, &value, errp); 1930 } 1931 1932 static const PropertyInfo prop_cbom_blksize = { 1933 .name = "cbom_blocksize", 1934 .get = prop_cbom_blksize_get, 1935 .set = prop_cbom_blksize_set, 1936 }; 1937 1938 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name, 1939 void *opaque, Error **errp) 1940 { 1941 RISCVCPU *cpu = RISCV_CPU(obj); 1942 uint16_t value; 1943 1944 if (!visit_type_uint16(v, name, &value, errp)) { 1945 return; 1946 } 1947 1948 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) { 1949 cpu_set_prop_err(cpu, name, errp); 1950 error_append_hint(errp, "Current '%s' val: %u\n", 1951 name, cpu->cfg.cbop_blocksize); 1952 return; 1953 } 1954 1955 cpu_option_add_user_setting(name, value); 1956 cpu->cfg.cbop_blocksize = value; 1957 } 1958 1959 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name, 1960 void *opaque, Error **errp) 1961 { 1962 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize; 1963 1964 visit_type_uint16(v, name, &value, errp); 1965 } 1966 1967 static const PropertyInfo prop_cbop_blksize = { 1968 .name = "cbop_blocksize", 1969 .get = prop_cbop_blksize_get, 1970 .set = prop_cbop_blksize_set, 1971 }; 1972 1973 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name, 1974 void *opaque, Error **errp) 1975 { 1976 RISCVCPU *cpu = RISCV_CPU(obj); 1977 uint16_t value; 1978 1979 if (!visit_type_uint16(v, name, &value, errp)) { 1980 return; 1981 } 1982 1983 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) { 1984 cpu_set_prop_err(cpu, name, errp); 1985 error_append_hint(errp, "Current '%s' val: %u\n", 1986 name, cpu->cfg.cboz_blocksize); 1987 return; 1988 } 1989 1990 cpu_option_add_user_setting(name, value); 1991 cpu->cfg.cboz_blocksize = value; 1992 } 1993 1994 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name, 1995 void *opaque, Error **errp) 1996 { 1997 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize; 1998 1999 visit_type_uint16(v, name, &value, errp); 2000 } 2001 2002 static const PropertyInfo prop_cboz_blksize = { 2003 .name = "cboz_blocksize", 2004 .get = prop_cboz_blksize_get, 2005 .set = prop_cboz_blksize_set, 2006 }; 2007 2008 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name, 2009 void *opaque, Error **errp) 2010 { 2011 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2012 RISCVCPU *cpu = RISCV_CPU(obj); 2013 uint32_t prev_val = cpu->cfg.mvendorid; 2014 uint32_t value; 2015 2016 if (!visit_type_uint32(v, name, &value, errp)) { 2017 return; 2018 } 2019 2020 if (!dynamic_cpu && prev_val != value) { 2021 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2022 object_get_typename(obj), prev_val); 2023 return; 2024 } 2025 2026 cpu->cfg.mvendorid = value; 2027 } 2028 2029 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name, 2030 void *opaque, Error **errp) 2031 { 2032 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; 2033 2034 visit_type_uint32(v, name, &value, errp); 2035 } 2036 2037 static const PropertyInfo prop_mvendorid = { 2038 .name = "mvendorid", 2039 .get = prop_mvendorid_get, 2040 .set = prop_mvendorid_set, 2041 }; 2042 2043 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name, 2044 void *opaque, Error **errp) 2045 { 2046 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2047 RISCVCPU *cpu = RISCV_CPU(obj); 2048 uint64_t prev_val = cpu->cfg.mimpid; 2049 uint64_t value; 2050 2051 if (!visit_type_uint64(v, name, &value, errp)) { 2052 return; 2053 } 2054 2055 if (!dynamic_cpu && prev_val != value) { 2056 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2057 object_get_typename(obj), prev_val); 2058 return; 2059 } 2060 2061 cpu->cfg.mimpid = value; 2062 } 2063 2064 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name, 2065 void *opaque, Error **errp) 2066 { 2067 uint64_t value = RISCV_CPU(obj)->cfg.mimpid; 2068 2069 visit_type_uint64(v, name, &value, errp); 2070 } 2071 2072 static const PropertyInfo prop_mimpid = { 2073 .name = "mimpid", 2074 .get = prop_mimpid_get, 2075 .set = prop_mimpid_set, 2076 }; 2077 2078 static void prop_marchid_set(Object *obj, Visitor *v, const char *name, 2079 void *opaque, Error **errp) 2080 { 2081 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2082 RISCVCPU *cpu = RISCV_CPU(obj); 2083 uint64_t prev_val = cpu->cfg.marchid; 2084 uint64_t value, invalid_val; 2085 uint32_t mxlen = 0; 2086 2087 if (!visit_type_uint64(v, name, &value, errp)) { 2088 return; 2089 } 2090 2091 if (!dynamic_cpu && prev_val != value) { 2092 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2093 object_get_typename(obj), prev_val); 2094 return; 2095 } 2096 2097 switch (riscv_cpu_mxl(&cpu->env)) { 2098 case MXL_RV32: 2099 mxlen = 32; 2100 break; 2101 case MXL_RV64: 2102 case MXL_RV128: 2103 mxlen = 64; 2104 break; 2105 default: 2106 g_assert_not_reached(); 2107 } 2108 2109 invalid_val = 1LL << (mxlen - 1); 2110 2111 if (value == invalid_val) { 2112 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2113 "and the remaining bits zero", mxlen); 2114 return; 2115 } 2116 2117 cpu->cfg.marchid = value; 2118 } 2119 2120 static void prop_marchid_get(Object *obj, Visitor *v, const char *name, 2121 void *opaque, Error **errp) 2122 { 2123 uint64_t value = RISCV_CPU(obj)->cfg.marchid; 2124 2125 visit_type_uint64(v, name, &value, errp); 2126 } 2127 2128 static const PropertyInfo prop_marchid = { 2129 .name = "marchid", 2130 .get = prop_marchid_get, 2131 .set = prop_marchid_set, 2132 }; 2133 2134 /* 2135 * RVA22U64 defines some 'named features' or 'synthetic extensions' 2136 * that are cache related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa 2137 * and Zicclsm. We do not implement caching in QEMU so we'll consider 2138 * all these named features as always enabled. 2139 * 2140 * There's no riscv,isa update for them (nor for zic64b, despite it 2141 * having a cfg offset) at this moment. 2142 */ 2143 static RISCVCPUProfile RVA22U64 = { 2144 .parent = NULL, 2145 .name = "rva22u64", 2146 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU, 2147 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2148 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2149 .ext_offsets = { 2150 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause), 2151 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb), 2152 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin), 2153 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr), 2154 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom), 2155 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz), 2156 2157 /* mandatory named features for this profile */ 2158 CPU_CFG_OFFSET(zic64b), 2159 2160 RISCV_PROFILE_EXT_LIST_END 2161 } 2162 }; 2163 2164 /* 2165 * As with RVA22U64, RVA22S64 also defines 'named features'. 2166 * 2167 * Cache related features that we consider enabled since we don't 2168 * implement cache: Ssccptr 2169 * 2170 * Other named features that we already implement: Sstvecd, Sstvala, 2171 * Sscounterenw 2172 * 2173 * Named features that we need to enable: svade 2174 * 2175 * The remaining features/extensions comes from RVA22U64. 2176 */ 2177 static RISCVCPUProfile RVA22S64 = { 2178 .parent = &RVA22U64, 2179 .name = "rva22s64", 2180 .misa_ext = RVS, 2181 .priv_spec = PRIV_VERSION_1_12_0, 2182 .satp_mode = VM_1_10_SV39, 2183 .ext_offsets = { 2184 /* rva22s64 exts */ 2185 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt), 2186 CPU_CFG_OFFSET(ext_svinval), 2187 2188 /* rva22s64 named features */ 2189 CPU_CFG_OFFSET(svade), 2190 2191 RISCV_PROFILE_EXT_LIST_END 2192 } 2193 }; 2194 2195 RISCVCPUProfile *riscv_profiles[] = { 2196 &RVA22U64, 2197 &RVA22S64, 2198 NULL, 2199 }; 2200 2201 static Property riscv_cpu_properties[] = { 2202 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 2203 2204 {.name = "pmu-mask", .info = &prop_pmu_mask}, 2205 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 2206 2207 {.name = "mmu", .info = &prop_mmu}, 2208 {.name = "pmp", .info = &prop_pmp}, 2209 2210 {.name = "priv_spec", .info = &prop_priv_spec}, 2211 {.name = "vext_spec", .info = &prop_vext_spec}, 2212 2213 {.name = "vlen", .info = &prop_vlen}, 2214 {.name = "elen", .info = &prop_elen}, 2215 2216 {.name = "cbom_blocksize", .info = &prop_cbom_blksize}, 2217 {.name = "cbop_blocksize", .info = &prop_cbop_blksize}, 2218 {.name = "cboz_blocksize", .info = &prop_cboz_blksize}, 2219 2220 {.name = "mvendorid", .info = &prop_mvendorid}, 2221 {.name = "mimpid", .info = &prop_mimpid}, 2222 {.name = "marchid", .info = &prop_marchid}, 2223 2224 #ifndef CONFIG_USER_ONLY 2225 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 2226 #endif 2227 2228 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 2229 2230 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 2231 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 2232 2233 /* 2234 * write_misa() is marked as experimental for now so mark 2235 * it with -x and default to 'false'. 2236 */ 2237 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 2238 DEFINE_PROP_END_OF_LIST(), 2239 }; 2240 2241 #if defined(TARGET_RISCV64) 2242 static void rva22u64_profile_cpu_init(Object *obj) 2243 { 2244 rv64i_bare_cpu_init(obj); 2245 2246 RVA22U64.enabled = true; 2247 } 2248 2249 static void rva22s64_profile_cpu_init(Object *obj) 2250 { 2251 rv64i_bare_cpu_init(obj); 2252 2253 RVA22S64.enabled = true; 2254 } 2255 #endif 2256 2257 static const gchar *riscv_gdb_arch_name(CPUState *cs) 2258 { 2259 RISCVCPU *cpu = RISCV_CPU(cs); 2260 CPURISCVState *env = &cpu->env; 2261 2262 switch (riscv_cpu_mxl(env)) { 2263 case MXL_RV32: 2264 return "riscv:rv32"; 2265 case MXL_RV64: 2266 case MXL_RV128: 2267 return "riscv:rv64"; 2268 default: 2269 g_assert_not_reached(); 2270 } 2271 } 2272 2273 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) 2274 { 2275 RISCVCPU *cpu = RISCV_CPU(cs); 2276 2277 if (strcmp(xmlname, "riscv-csr.xml") == 0) { 2278 return cpu->dyn_csr_xml; 2279 } else if (strcmp(xmlname, "riscv-vector.xml") == 0) { 2280 return cpu->dyn_vreg_xml; 2281 } 2282 2283 return NULL; 2284 } 2285 2286 #ifndef CONFIG_USER_ONLY 2287 static int64_t riscv_get_arch_id(CPUState *cs) 2288 { 2289 RISCVCPU *cpu = RISCV_CPU(cs); 2290 2291 return cpu->env.mhartid; 2292 } 2293 2294 #include "hw/core/sysemu-cpu-ops.h" 2295 2296 static const struct SysemuCPUOps riscv_sysemu_ops = { 2297 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 2298 .write_elf64_note = riscv_cpu_write_elf64_note, 2299 .write_elf32_note = riscv_cpu_write_elf32_note, 2300 .legacy_vmsd = &vmstate_riscv_cpu, 2301 }; 2302 #endif 2303 2304 static void riscv_cpu_common_class_init(ObjectClass *c, void *data) 2305 { 2306 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2307 CPUClass *cc = CPU_CLASS(c); 2308 DeviceClass *dc = DEVICE_CLASS(c); 2309 ResettableClass *rc = RESETTABLE_CLASS(c); 2310 2311 device_class_set_parent_realize(dc, riscv_cpu_realize, 2312 &mcc->parent_realize); 2313 2314 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 2315 &mcc->parent_phases); 2316 2317 cc->class_by_name = riscv_cpu_class_by_name; 2318 cc->has_work = riscv_cpu_has_work; 2319 cc->mmu_index = riscv_cpu_mmu_index; 2320 cc->dump_state = riscv_cpu_dump_state; 2321 cc->set_pc = riscv_cpu_set_pc; 2322 cc->get_pc = riscv_cpu_get_pc; 2323 cc->gdb_read_register = riscv_cpu_gdb_read_register; 2324 cc->gdb_write_register = riscv_cpu_gdb_write_register; 2325 cc->gdb_num_core_regs = 33; 2326 cc->gdb_stop_before_watchpoint = true; 2327 cc->disas_set_info = riscv_cpu_disas_set_info; 2328 #ifndef CONFIG_USER_ONLY 2329 cc->sysemu_ops = &riscv_sysemu_ops; 2330 cc->get_arch_id = riscv_get_arch_id; 2331 #endif 2332 cc->gdb_arch_name = riscv_gdb_arch_name; 2333 cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml; 2334 2335 device_class_set_props(dc, riscv_cpu_properties); 2336 } 2337 2338 static void riscv_cpu_class_init(ObjectClass *c, void *data) 2339 { 2340 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2341 2342 mcc->misa_mxl_max = (uint32_t)(uintptr_t)data; 2343 riscv_cpu_validate_misa_mxl(mcc); 2344 } 2345 2346 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 2347 int max_str_len) 2348 { 2349 const RISCVIsaExtData *edata; 2350 char *old = *isa_str; 2351 char *new = *isa_str; 2352 2353 for (edata = isa_edata_arr; edata && edata->name; edata++) { 2354 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 2355 new = g_strconcat(old, "_", edata->name, NULL); 2356 g_free(old); 2357 old = new; 2358 } 2359 } 2360 2361 *isa_str = new; 2362 } 2363 2364 char *riscv_isa_string(RISCVCPU *cpu) 2365 { 2366 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 2367 int i; 2368 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 2369 char *isa_str = g_new(char, maxlen); 2370 int xlen = riscv_cpu_max_xlen(mcc); 2371 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen); 2372 2373 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2374 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2375 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 2376 } 2377 } 2378 *p = '\0'; 2379 if (!cpu->cfg.short_isa_string) { 2380 riscv_isa_string_ext(cpu, &isa_str, maxlen); 2381 } 2382 return isa_str; 2383 } 2384 2385 #ifndef CONFIG_USER_ONLY 2386 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count) 2387 { 2388 int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr); 2389 char **extensions = g_new(char *, maxlen); 2390 2391 for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2392 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2393 extensions[*count] = g_new(char, 2); 2394 snprintf(extensions[*count], 2, "%c", 2395 qemu_tolower(riscv_single_letter_exts[i])); 2396 (*count)++; 2397 } 2398 } 2399 2400 for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) { 2401 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 2402 extensions[*count] = g_strdup(edata->name); 2403 (*count)++; 2404 } 2405 } 2406 2407 return extensions; 2408 } 2409 2410 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename) 2411 { 2412 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 2413 const size_t maxlen = sizeof("rv128i"); 2414 g_autofree char *isa_base = g_new(char, maxlen); 2415 g_autofree char *riscv_isa; 2416 char **isa_extensions; 2417 int count = 0; 2418 int xlen = riscv_cpu_max_xlen(mcc); 2419 2420 riscv_isa = riscv_isa_string(cpu); 2421 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa); 2422 2423 snprintf(isa_base, maxlen, "rv%di", xlen); 2424 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base); 2425 2426 isa_extensions = riscv_isa_extensions_list(cpu, &count); 2427 qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions", 2428 isa_extensions, count); 2429 2430 for (int i = 0; i < count; i++) { 2431 g_free(isa_extensions[i]); 2432 } 2433 2434 g_free(isa_extensions); 2435 } 2436 #endif 2437 2438 #define DEFINE_CPU(type_name, misa_mxl_max, initfn) \ 2439 { \ 2440 .name = (type_name), \ 2441 .parent = TYPE_RISCV_CPU, \ 2442 .instance_init = (initfn), \ 2443 .class_init = riscv_cpu_class_init, \ 2444 .class_data = (void *)(misa_mxl_max) \ 2445 } 2446 2447 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \ 2448 { \ 2449 .name = (type_name), \ 2450 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 2451 .instance_init = (initfn), \ 2452 .class_init = riscv_cpu_class_init, \ 2453 .class_data = (void *)(misa_mxl_max) \ 2454 } 2455 2456 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \ 2457 { \ 2458 .name = (type_name), \ 2459 .parent = TYPE_RISCV_VENDOR_CPU, \ 2460 .instance_init = (initfn), \ 2461 .class_init = riscv_cpu_class_init, \ 2462 .class_data = (void *)(misa_mxl_max) \ 2463 } 2464 2465 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \ 2466 { \ 2467 .name = (type_name), \ 2468 .parent = TYPE_RISCV_BARE_CPU, \ 2469 .instance_init = (initfn), \ 2470 .class_init = riscv_cpu_class_init, \ 2471 .class_data = (void *)(misa_mxl_max) \ 2472 } 2473 2474 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \ 2475 { \ 2476 .name = (type_name), \ 2477 .parent = TYPE_RISCV_BARE_CPU, \ 2478 .instance_init = (initfn), \ 2479 .class_init = riscv_cpu_class_init, \ 2480 .class_data = (void *)(misa_mxl_max) \ 2481 } 2482 2483 static const TypeInfo riscv_cpu_type_infos[] = { 2484 { 2485 .name = TYPE_RISCV_CPU, 2486 .parent = TYPE_CPU, 2487 .instance_size = sizeof(RISCVCPU), 2488 .instance_align = __alignof(RISCVCPU), 2489 .instance_init = riscv_cpu_init, 2490 .instance_post_init = riscv_cpu_post_init, 2491 .abstract = true, 2492 .class_size = sizeof(RISCVCPUClass), 2493 .class_init = riscv_cpu_common_class_init, 2494 }, 2495 { 2496 .name = TYPE_RISCV_DYNAMIC_CPU, 2497 .parent = TYPE_RISCV_CPU, 2498 .abstract = true, 2499 }, 2500 { 2501 .name = TYPE_RISCV_VENDOR_CPU, 2502 .parent = TYPE_RISCV_CPU, 2503 .abstract = true, 2504 }, 2505 { 2506 .name = TYPE_RISCV_BARE_CPU, 2507 .parent = TYPE_RISCV_CPU, 2508 .abstract = true, 2509 }, 2510 #if defined(TARGET_RISCV32) 2511 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV32, riscv_any_cpu_init), 2512 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), 2513 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), 2514 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), 2515 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), 2516 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init), 2517 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), 2518 #elif defined(TARGET_RISCV64) 2519 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV64, riscv_any_cpu_init), 2520 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), 2521 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), 2522 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), 2523 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), 2524 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init), 2525 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init), 2526 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init), 2527 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init), 2528 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init), 2529 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init), 2530 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init), 2531 #endif 2532 }; 2533 2534 DEFINE_TYPES(riscv_cpu_type_infos) 2535