1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "exec/exec-all.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/error-report.h" 31 #include "hw/qdev-properties.h" 32 #include "hw/core/qdev-prop-internal.h" 33 #include "migration/vmstate.h" 34 #include "fpu/softfloat-helpers.h" 35 #include "sysemu/device_tree.h" 36 #include "sysemu/kvm.h" 37 #include "sysemu/tcg.h" 38 #include "kvm/kvm_riscv.h" 39 #include "tcg/tcg-cpu.h" 40 #include "tcg/tcg.h" 41 42 /* RISC-V CPU definitions */ 43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH"; 44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 45 RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0}; 46 47 /* 48 * From vector_helper.c 49 * Note that vector data is stored in host-endian 64-bit chunks, 50 * so addressing bytes needs a host-endian fixup. 51 */ 52 #if HOST_BIG_ENDIAN 53 #define BYTE(x) ((x) ^ 7) 54 #else 55 #define BYTE(x) (x) 56 #endif 57 58 bool riscv_cpu_is_32bit(RISCVCPU *cpu) 59 { 60 return riscv_cpu_mxl(&cpu->env) == MXL_RV32; 61 } 62 63 /* Hash that stores general user set numeric options */ 64 static GHashTable *general_user_opts; 65 66 static void cpu_option_add_user_setting(const char *optname, uint32_t value) 67 { 68 g_hash_table_insert(general_user_opts, (gpointer)optname, 69 GUINT_TO_POINTER(value)); 70 } 71 72 bool riscv_cpu_option_set(const char *optname) 73 { 74 return g_hash_table_contains(general_user_opts, optname); 75 } 76 77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 78 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 79 80 /* 81 * Here are the ordering rules of extension naming defined by RISC-V 82 * specification : 83 * 1. All extensions should be separated from other multi-letter extensions 84 * by an underscore. 85 * 2. The first letter following the 'Z' conventionally indicates the most 86 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 87 * If multiple 'Z' extensions are named, they should be ordered first 88 * by category, then alphabetically within a category. 89 * 3. Standard supervisor-level extensions (starts with 'S') should be 90 * listed after standard unprivileged extensions. If multiple 91 * supervisor-level extensions are listed, they should be ordered 92 * alphabetically. 93 * 4. Non-standard extensions (starts with 'X') must be listed after all 94 * standard extensions. They must be separated from other multi-letter 95 * extensions by an underscore. 96 * 97 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 98 * instead. 99 */ 100 const RISCVIsaExtData isa_edata_arr[] = { 101 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 102 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop), 103 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 104 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 105 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 106 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 107 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 108 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 109 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 110 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 111 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 112 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), 113 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), 114 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), 115 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 116 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 117 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 118 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 119 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 120 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 121 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 122 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 123 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 124 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 125 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 126 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 127 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 128 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 129 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 130 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 131 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 132 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 133 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 134 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 135 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 136 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 137 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 138 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 139 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 140 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 141 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 142 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 143 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 144 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 145 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 146 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 147 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 148 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 149 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 150 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 151 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 152 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 153 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 154 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 155 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 156 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 157 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 158 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 159 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 160 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 161 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 162 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 163 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 164 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 165 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 166 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 167 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 168 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 169 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 170 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 171 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 172 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 173 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 174 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 175 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 176 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 177 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 178 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 179 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 180 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 181 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 182 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 183 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 184 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 185 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 186 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 187 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 188 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 189 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 190 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 191 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 192 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 193 194 DEFINE_PROP_END_OF_LIST(), 195 }; 196 197 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 198 { 199 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 200 201 return *ext_enabled; 202 } 203 204 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 205 { 206 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 207 208 *ext_enabled = en; 209 } 210 211 bool riscv_cpu_is_vendor(Object *cpu_obj) 212 { 213 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; 214 } 215 216 const char * const riscv_int_regnames[] = { 217 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 218 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 219 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 220 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 221 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 222 }; 223 224 const char * const riscv_int_regnamesh[] = { 225 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 226 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 227 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 228 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 229 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 230 "x30h/t5h", "x31h/t6h" 231 }; 232 233 const char * const riscv_fpr_regnames[] = { 234 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 235 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 236 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 237 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 238 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 239 "f30/ft10", "f31/ft11" 240 }; 241 242 const char * const riscv_rvv_regnames[] = { 243 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 244 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 245 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 246 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 247 "v28", "v29", "v30", "v31" 248 }; 249 250 static const char * const riscv_excp_names[] = { 251 "misaligned_fetch", 252 "fault_fetch", 253 "illegal_instruction", 254 "breakpoint", 255 "misaligned_load", 256 "fault_load", 257 "misaligned_store", 258 "fault_store", 259 "user_ecall", 260 "supervisor_ecall", 261 "hypervisor_ecall", 262 "machine_ecall", 263 "exec_page_fault", 264 "load_page_fault", 265 "reserved", 266 "store_page_fault", 267 "reserved", 268 "reserved", 269 "reserved", 270 "reserved", 271 "guest_exec_page_fault", 272 "guest_load_page_fault", 273 "reserved", 274 "guest_store_page_fault", 275 }; 276 277 static const char * const riscv_intr_names[] = { 278 "u_software", 279 "s_software", 280 "vs_software", 281 "m_software", 282 "u_timer", 283 "s_timer", 284 "vs_timer", 285 "m_timer", 286 "u_external", 287 "s_external", 288 "vs_external", 289 "m_external", 290 "reserved", 291 "reserved", 292 "reserved", 293 "reserved" 294 }; 295 296 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 297 { 298 if (async) { 299 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 300 riscv_intr_names[cause] : "(unknown)"; 301 } else { 302 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 303 riscv_excp_names[cause] : "(unknown)"; 304 } 305 } 306 307 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) 308 { 309 env->misa_ext_mask = env->misa_ext = ext; 310 } 311 312 int riscv_cpu_max_xlen(RISCVCPUClass *mcc) 313 { 314 return 16 << mcc->misa_mxl_max; 315 } 316 317 #ifndef CONFIG_USER_ONLY 318 static uint8_t satp_mode_from_str(const char *satp_mode_str) 319 { 320 if (!strncmp(satp_mode_str, "mbare", 5)) { 321 return VM_1_10_MBARE; 322 } 323 324 if (!strncmp(satp_mode_str, "sv32", 4)) { 325 return VM_1_10_SV32; 326 } 327 328 if (!strncmp(satp_mode_str, "sv39", 4)) { 329 return VM_1_10_SV39; 330 } 331 332 if (!strncmp(satp_mode_str, "sv48", 4)) { 333 return VM_1_10_SV48; 334 } 335 336 if (!strncmp(satp_mode_str, "sv57", 4)) { 337 return VM_1_10_SV57; 338 } 339 340 if (!strncmp(satp_mode_str, "sv64", 4)) { 341 return VM_1_10_SV64; 342 } 343 344 g_assert_not_reached(); 345 } 346 347 uint8_t satp_mode_max_from_map(uint32_t map) 348 { 349 /* 350 * 'map = 0' will make us return (31 - 32), which C will 351 * happily overflow to UINT_MAX. There's no good result to 352 * return if 'map = 0' (e.g. returning 0 will be ambiguous 353 * with the result for 'map = 1'). 354 * 355 * Assert out if map = 0. Callers will have to deal with 356 * it outside of this function. 357 */ 358 g_assert(map > 0); 359 360 /* map here has at least one bit set, so no problem with clz */ 361 return 31 - __builtin_clz(map); 362 } 363 364 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 365 { 366 if (is_32_bit) { 367 switch (satp_mode) { 368 case VM_1_10_SV32: 369 return "sv32"; 370 case VM_1_10_MBARE: 371 return "none"; 372 } 373 } else { 374 switch (satp_mode) { 375 case VM_1_10_SV64: 376 return "sv64"; 377 case VM_1_10_SV57: 378 return "sv57"; 379 case VM_1_10_SV48: 380 return "sv48"; 381 case VM_1_10_SV39: 382 return "sv39"; 383 case VM_1_10_MBARE: 384 return "none"; 385 } 386 } 387 388 g_assert_not_reached(); 389 } 390 391 static void set_satp_mode_max_supported(RISCVCPU *cpu, 392 uint8_t satp_mode) 393 { 394 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 395 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 396 397 for (int i = 0; i <= satp_mode; ++i) { 398 if (valid_vm[i]) { 399 cpu->cfg.satp_mode.supported |= (1 << i); 400 } 401 } 402 } 403 404 /* Set the satp mode to the max supported */ 405 static void set_satp_mode_default_map(RISCVCPU *cpu) 406 { 407 /* 408 * Bare CPUs do not default to the max available. 409 * Users must set a valid satp_mode in the command 410 * line. 411 */ 412 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { 413 warn_report("No satp mode set. Defaulting to 'bare'"); 414 cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE); 415 return; 416 } 417 418 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 419 } 420 #endif 421 422 static void riscv_any_cpu_init(Object *obj) 423 { 424 RISCVCPU *cpu = RISCV_CPU(obj); 425 CPURISCVState *env = &cpu->env; 426 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 427 428 #ifndef CONFIG_USER_ONLY 429 set_satp_mode_max_supported(RISCV_CPU(obj), 430 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 431 VM_1_10_SV32 : VM_1_10_SV57); 432 #endif 433 434 env->priv_ver = PRIV_VERSION_LATEST; 435 436 /* inherited from parent obj via riscv_cpu_init() */ 437 cpu->cfg.ext_zifencei = true; 438 cpu->cfg.ext_zicsr = true; 439 cpu->cfg.mmu = true; 440 cpu->cfg.pmp = true; 441 } 442 443 static void riscv_max_cpu_init(Object *obj) 444 { 445 RISCVCPU *cpu = RISCV_CPU(obj); 446 CPURISCVState *env = &cpu->env; 447 448 cpu->cfg.mmu = true; 449 cpu->cfg.pmp = true; 450 451 env->priv_ver = PRIV_VERSION_LATEST; 452 #ifndef CONFIG_USER_ONLY 453 #ifdef TARGET_RISCV32 454 set_satp_mode_max_supported(cpu, VM_1_10_SV32); 455 #else 456 set_satp_mode_max_supported(cpu, VM_1_10_SV57); 457 #endif 458 #endif 459 } 460 461 #if defined(TARGET_RISCV64) 462 static void rv64_base_cpu_init(Object *obj) 463 { 464 RISCVCPU *cpu = RISCV_CPU(obj); 465 CPURISCVState *env = &cpu->env; 466 467 cpu->cfg.mmu = true; 468 cpu->cfg.pmp = true; 469 470 /* Set latest version of privileged specification */ 471 env->priv_ver = PRIV_VERSION_LATEST; 472 #ifndef CONFIG_USER_ONLY 473 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 474 #endif 475 } 476 477 static void rv64_sifive_u_cpu_init(Object *obj) 478 { 479 RISCVCPU *cpu = RISCV_CPU(obj); 480 CPURISCVState *env = &cpu->env; 481 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 482 env->priv_ver = PRIV_VERSION_1_10_0; 483 #ifndef CONFIG_USER_ONLY 484 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 485 #endif 486 487 /* inherited from parent obj via riscv_cpu_init() */ 488 cpu->cfg.ext_zifencei = true; 489 cpu->cfg.ext_zicsr = true; 490 cpu->cfg.mmu = true; 491 cpu->cfg.pmp = true; 492 } 493 494 static void rv64_sifive_e_cpu_init(Object *obj) 495 { 496 CPURISCVState *env = &RISCV_CPU(obj)->env; 497 RISCVCPU *cpu = RISCV_CPU(obj); 498 499 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 500 env->priv_ver = PRIV_VERSION_1_10_0; 501 #ifndef CONFIG_USER_ONLY 502 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 503 #endif 504 505 /* inherited from parent obj via riscv_cpu_init() */ 506 cpu->cfg.ext_zifencei = true; 507 cpu->cfg.ext_zicsr = true; 508 cpu->cfg.pmp = true; 509 } 510 511 static void rv64_thead_c906_cpu_init(Object *obj) 512 { 513 CPURISCVState *env = &RISCV_CPU(obj)->env; 514 RISCVCPU *cpu = RISCV_CPU(obj); 515 516 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU); 517 env->priv_ver = PRIV_VERSION_1_11_0; 518 519 cpu->cfg.ext_zfa = true; 520 cpu->cfg.ext_zfh = true; 521 cpu->cfg.mmu = true; 522 cpu->cfg.ext_xtheadba = true; 523 cpu->cfg.ext_xtheadbb = true; 524 cpu->cfg.ext_xtheadbs = true; 525 cpu->cfg.ext_xtheadcmo = true; 526 cpu->cfg.ext_xtheadcondmov = true; 527 cpu->cfg.ext_xtheadfmemidx = true; 528 cpu->cfg.ext_xtheadmac = true; 529 cpu->cfg.ext_xtheadmemidx = true; 530 cpu->cfg.ext_xtheadmempair = true; 531 cpu->cfg.ext_xtheadsync = true; 532 533 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 534 #ifndef CONFIG_USER_ONLY 535 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 536 #endif 537 538 /* inherited from parent obj via riscv_cpu_init() */ 539 cpu->cfg.pmp = true; 540 } 541 542 static void rv64_veyron_v1_cpu_init(Object *obj) 543 { 544 CPURISCVState *env = &RISCV_CPU(obj)->env; 545 RISCVCPU *cpu = RISCV_CPU(obj); 546 547 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH); 548 env->priv_ver = PRIV_VERSION_1_12_0; 549 550 /* Enable ISA extensions */ 551 cpu->cfg.mmu = true; 552 cpu->cfg.ext_zifencei = true; 553 cpu->cfg.ext_zicsr = true; 554 cpu->cfg.pmp = true; 555 cpu->cfg.ext_zicbom = true; 556 cpu->cfg.cbom_blocksize = 64; 557 cpu->cfg.cboz_blocksize = 64; 558 cpu->cfg.ext_zicboz = true; 559 cpu->cfg.ext_smaia = true; 560 cpu->cfg.ext_ssaia = true; 561 cpu->cfg.ext_sscofpmf = true; 562 cpu->cfg.ext_sstc = true; 563 cpu->cfg.ext_svinval = true; 564 cpu->cfg.ext_svnapot = true; 565 cpu->cfg.ext_svpbmt = true; 566 cpu->cfg.ext_smstateen = true; 567 cpu->cfg.ext_zba = true; 568 cpu->cfg.ext_zbb = true; 569 cpu->cfg.ext_zbc = true; 570 cpu->cfg.ext_zbs = true; 571 cpu->cfg.ext_XVentanaCondOps = true; 572 573 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 574 cpu->cfg.marchid = VEYRON_V1_MARCHID; 575 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 576 577 #ifndef CONFIG_USER_ONLY 578 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 579 #endif 580 } 581 582 static void rv128_base_cpu_init(Object *obj) 583 { 584 RISCVCPU *cpu = RISCV_CPU(obj); 585 CPURISCVState *env = &cpu->env; 586 587 if (qemu_tcg_mttcg_enabled()) { 588 /* Missing 128-bit aligned atomics */ 589 error_report("128-bit RISC-V currently does not work with Multi " 590 "Threaded TCG. Please use: -accel tcg,thread=single"); 591 exit(EXIT_FAILURE); 592 } 593 594 cpu->cfg.mmu = true; 595 cpu->cfg.pmp = true; 596 597 /* Set latest version of privileged specification */ 598 env->priv_ver = PRIV_VERSION_LATEST; 599 #ifndef CONFIG_USER_ONLY 600 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 601 #endif 602 } 603 604 static void rv64i_bare_cpu_init(Object *obj) 605 { 606 CPURISCVState *env = &RISCV_CPU(obj)->env; 607 riscv_cpu_set_misa_ext(env, RVI); 608 } 609 #else 610 static void rv32_base_cpu_init(Object *obj) 611 { 612 RISCVCPU *cpu = RISCV_CPU(obj); 613 CPURISCVState *env = &cpu->env; 614 615 cpu->cfg.mmu = true; 616 cpu->cfg.pmp = true; 617 618 /* Set latest version of privileged specification */ 619 env->priv_ver = PRIV_VERSION_LATEST; 620 #ifndef CONFIG_USER_ONLY 621 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 622 #endif 623 } 624 625 static void rv32_sifive_u_cpu_init(Object *obj) 626 { 627 RISCVCPU *cpu = RISCV_CPU(obj); 628 CPURISCVState *env = &cpu->env; 629 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 630 env->priv_ver = PRIV_VERSION_1_10_0; 631 #ifndef CONFIG_USER_ONLY 632 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 633 #endif 634 635 /* inherited from parent obj via riscv_cpu_init() */ 636 cpu->cfg.ext_zifencei = true; 637 cpu->cfg.ext_zicsr = true; 638 cpu->cfg.mmu = true; 639 cpu->cfg.pmp = true; 640 } 641 642 static void rv32_sifive_e_cpu_init(Object *obj) 643 { 644 CPURISCVState *env = &RISCV_CPU(obj)->env; 645 RISCVCPU *cpu = RISCV_CPU(obj); 646 647 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 648 env->priv_ver = PRIV_VERSION_1_10_0; 649 #ifndef CONFIG_USER_ONLY 650 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 651 #endif 652 653 /* inherited from parent obj via riscv_cpu_init() */ 654 cpu->cfg.ext_zifencei = true; 655 cpu->cfg.ext_zicsr = true; 656 cpu->cfg.pmp = true; 657 } 658 659 static void rv32_ibex_cpu_init(Object *obj) 660 { 661 CPURISCVState *env = &RISCV_CPU(obj)->env; 662 RISCVCPU *cpu = RISCV_CPU(obj); 663 664 riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU); 665 env->priv_ver = PRIV_VERSION_1_12_0; 666 #ifndef CONFIG_USER_ONLY 667 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 668 #endif 669 /* inherited from parent obj via riscv_cpu_init() */ 670 cpu->cfg.ext_zifencei = true; 671 cpu->cfg.ext_zicsr = true; 672 cpu->cfg.pmp = true; 673 cpu->cfg.ext_smepmp = true; 674 } 675 676 static void rv32_imafcu_nommu_cpu_init(Object *obj) 677 { 678 CPURISCVState *env = &RISCV_CPU(obj)->env; 679 RISCVCPU *cpu = RISCV_CPU(obj); 680 681 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU); 682 env->priv_ver = PRIV_VERSION_1_10_0; 683 #ifndef CONFIG_USER_ONLY 684 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 685 #endif 686 687 /* inherited from parent obj via riscv_cpu_init() */ 688 cpu->cfg.ext_zifencei = true; 689 cpu->cfg.ext_zicsr = true; 690 cpu->cfg.pmp = true; 691 } 692 #endif 693 694 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 695 { 696 ObjectClass *oc; 697 char *typename; 698 char **cpuname; 699 700 cpuname = g_strsplit(cpu_model, ",", 1); 701 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 702 oc = object_class_by_name(typename); 703 g_strfreev(cpuname); 704 g_free(typename); 705 706 return oc; 707 } 708 709 char *riscv_cpu_get_name(RISCVCPU *cpu) 710 { 711 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 712 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 713 714 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 715 716 return cpu_model_from_type(typename); 717 } 718 719 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 720 { 721 RISCVCPU *cpu = RISCV_CPU(cs); 722 CPURISCVState *env = &cpu->env; 723 int i, j; 724 uint8_t *p; 725 726 #if !defined(CONFIG_USER_ONLY) 727 if (riscv_has_ext(env, RVH)) { 728 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 729 } 730 #endif 731 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 732 #ifndef CONFIG_USER_ONLY 733 { 734 static const int dump_csrs[] = { 735 CSR_MHARTID, 736 CSR_MSTATUS, 737 CSR_MSTATUSH, 738 /* 739 * CSR_SSTATUS is intentionally omitted here as its value 740 * can be figured out by looking at CSR_MSTATUS 741 */ 742 CSR_HSTATUS, 743 CSR_VSSTATUS, 744 CSR_MIP, 745 CSR_MIE, 746 CSR_MIDELEG, 747 CSR_HIDELEG, 748 CSR_MEDELEG, 749 CSR_HEDELEG, 750 CSR_MTVEC, 751 CSR_STVEC, 752 CSR_VSTVEC, 753 CSR_MEPC, 754 CSR_SEPC, 755 CSR_VSEPC, 756 CSR_MCAUSE, 757 CSR_SCAUSE, 758 CSR_VSCAUSE, 759 CSR_MTVAL, 760 CSR_STVAL, 761 CSR_HTVAL, 762 CSR_MTVAL2, 763 CSR_MSCRATCH, 764 CSR_SSCRATCH, 765 CSR_SATP, 766 CSR_MMTE, 767 CSR_UPMBASE, 768 CSR_UPMMASK, 769 CSR_SPMBASE, 770 CSR_SPMMASK, 771 CSR_MPMBASE, 772 CSR_MPMMASK, 773 }; 774 775 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 776 int csrno = dump_csrs[i]; 777 target_ulong val = 0; 778 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 779 780 /* 781 * Rely on the smode, hmode, etc, predicates within csr.c 782 * to do the filtering of the registers that are present. 783 */ 784 if (res == RISCV_EXCP_NONE) { 785 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 786 csr_ops[csrno].name, val); 787 } 788 } 789 } 790 #endif 791 792 for (i = 0; i < 32; i++) { 793 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 794 riscv_int_regnames[i], env->gpr[i]); 795 if ((i & 3) == 3) { 796 qemu_fprintf(f, "\n"); 797 } 798 } 799 if (flags & CPU_DUMP_FPU) { 800 for (i = 0; i < 32; i++) { 801 qemu_fprintf(f, " %-8s %016" PRIx64, 802 riscv_fpr_regnames[i], env->fpr[i]); 803 if ((i & 3) == 3) { 804 qemu_fprintf(f, "\n"); 805 } 806 } 807 } 808 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 809 static const int dump_rvv_csrs[] = { 810 CSR_VSTART, 811 CSR_VXSAT, 812 CSR_VXRM, 813 CSR_VCSR, 814 CSR_VL, 815 CSR_VTYPE, 816 CSR_VLENB, 817 }; 818 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 819 int csrno = dump_rvv_csrs[i]; 820 target_ulong val = 0; 821 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 822 823 /* 824 * Rely on the smode, hmode, etc, predicates within csr.c 825 * to do the filtering of the registers that are present. 826 */ 827 if (res == RISCV_EXCP_NONE) { 828 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 829 csr_ops[csrno].name, val); 830 } 831 } 832 uint16_t vlenb = cpu->cfg.vlenb; 833 834 for (i = 0; i < 32; i++) { 835 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 836 p = (uint8_t *)env->vreg; 837 for (j = vlenb - 1 ; j >= 0; j--) { 838 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 839 } 840 qemu_fprintf(f, "\n"); 841 } 842 } 843 } 844 845 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 846 { 847 RISCVCPU *cpu = RISCV_CPU(cs); 848 CPURISCVState *env = &cpu->env; 849 850 if (env->xl == MXL_RV32) { 851 env->pc = (int32_t)value; 852 } else { 853 env->pc = value; 854 } 855 } 856 857 static vaddr riscv_cpu_get_pc(CPUState *cs) 858 { 859 RISCVCPU *cpu = RISCV_CPU(cs); 860 CPURISCVState *env = &cpu->env; 861 862 /* Match cpu_get_tb_cpu_state. */ 863 if (env->xl == MXL_RV32) { 864 return env->pc & UINT32_MAX; 865 } 866 return env->pc; 867 } 868 869 static bool riscv_cpu_has_work(CPUState *cs) 870 { 871 #ifndef CONFIG_USER_ONLY 872 RISCVCPU *cpu = RISCV_CPU(cs); 873 CPURISCVState *env = &cpu->env; 874 /* 875 * Definition of the WFI instruction requires it to ignore the privilege 876 * mode and delegation registers, but respect individual enables 877 */ 878 return riscv_cpu_all_pending(env) != 0 || 879 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 880 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 881 #else 882 return true; 883 #endif 884 } 885 886 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) 887 { 888 return riscv_env_mmu_index(cpu_env(cs), ifetch); 889 } 890 891 static void riscv_cpu_reset_hold(Object *obj) 892 { 893 #ifndef CONFIG_USER_ONLY 894 uint8_t iprio; 895 int i, irq, rdzero; 896 #endif 897 CPUState *cs = CPU(obj); 898 RISCVCPU *cpu = RISCV_CPU(cs); 899 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 900 CPURISCVState *env = &cpu->env; 901 902 if (mcc->parent_phases.hold) { 903 mcc->parent_phases.hold(obj); 904 } 905 #ifndef CONFIG_USER_ONLY 906 env->misa_mxl = mcc->misa_mxl_max; 907 env->priv = PRV_M; 908 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 909 if (env->misa_mxl > MXL_RV32) { 910 /* 911 * The reset status of SXL/UXL is undefined, but mstatus is WARL 912 * and we must ensure that the value after init is valid for read. 913 */ 914 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 915 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 916 if (riscv_has_ext(env, RVH)) { 917 env->vsstatus = set_field(env->vsstatus, 918 MSTATUS64_SXL, env->misa_mxl); 919 env->vsstatus = set_field(env->vsstatus, 920 MSTATUS64_UXL, env->misa_mxl); 921 env->mstatus_hs = set_field(env->mstatus_hs, 922 MSTATUS64_SXL, env->misa_mxl); 923 env->mstatus_hs = set_field(env->mstatus_hs, 924 MSTATUS64_UXL, env->misa_mxl); 925 } 926 } 927 env->mcause = 0; 928 env->miclaim = MIP_SGEIP; 929 env->pc = env->resetvec; 930 env->bins = 0; 931 env->two_stage_lookup = false; 932 933 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 934 (cpu->cfg.ext_svadu ? MENVCFG_ADUE : 0); 935 env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) | 936 (cpu->cfg.ext_svadu ? HENVCFG_ADUE : 0); 937 938 /* Initialized default priorities of local interrupts. */ 939 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 940 iprio = riscv_cpu_default_priority(i); 941 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 942 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 943 env->hviprio[i] = 0; 944 } 945 i = 0; 946 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 947 if (!rdzero) { 948 env->hviprio[irq] = env->miprio[irq]; 949 } 950 i++; 951 } 952 /* mmte is supposed to have pm.current hardwired to 1 */ 953 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 954 955 /* 956 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor 957 * extension is enabled. 958 */ 959 if (riscv_has_ext(env, RVH)) { 960 env->mideleg |= HS_MODE_INTERRUPTS; 961 } 962 963 /* 964 * Clear mseccfg and unlock all the PMP entries upon reset. 965 * This is allowed as per the priv and smepmp specifications 966 * and is needed to clear stale entries across reboots. 967 */ 968 if (riscv_cpu_cfg(env)->ext_smepmp) { 969 env->mseccfg = 0; 970 } 971 972 pmp_unlock_entries(env); 973 #endif 974 env->xl = riscv_cpu_mxl(env); 975 riscv_cpu_update_mask(env); 976 cs->exception_index = RISCV_EXCP_NONE; 977 env->load_res = -1; 978 set_default_nan_mode(1, &env->fp_status); 979 980 #ifndef CONFIG_USER_ONLY 981 if (cpu->cfg.debug) { 982 riscv_trigger_reset_hold(env); 983 } 984 985 if (kvm_enabled()) { 986 kvm_riscv_reset_vcpu(cpu); 987 } 988 #endif 989 } 990 991 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 992 { 993 RISCVCPU *cpu = RISCV_CPU(s); 994 CPURISCVState *env = &cpu->env; 995 info->target_info = &cpu->cfg; 996 997 switch (env->xl) { 998 case MXL_RV32: 999 info->print_insn = print_insn_riscv32; 1000 break; 1001 case MXL_RV64: 1002 info->print_insn = print_insn_riscv64; 1003 break; 1004 case MXL_RV128: 1005 info->print_insn = print_insn_riscv128; 1006 break; 1007 default: 1008 g_assert_not_reached(); 1009 } 1010 } 1011 1012 #ifndef CONFIG_USER_ONLY 1013 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1014 { 1015 bool rv32 = riscv_cpu_is_32bit(cpu); 1016 uint8_t satp_mode_map_max, satp_mode_supported_max; 1017 1018 /* The CPU wants the OS to decide which satp mode to use */ 1019 if (cpu->cfg.satp_mode.supported == 0) { 1020 return; 1021 } 1022 1023 satp_mode_supported_max = 1024 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1025 1026 if (cpu->cfg.satp_mode.map == 0) { 1027 if (cpu->cfg.satp_mode.init == 0) { 1028 /* If unset by the user, we fallback to the default satp mode. */ 1029 set_satp_mode_default_map(cpu); 1030 } else { 1031 /* 1032 * Find the lowest level that was disabled and then enable the 1033 * first valid level below which can be found in 1034 * valid_vm_1_10_32/64. 1035 */ 1036 for (int i = 1; i < 16; ++i) { 1037 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1038 (cpu->cfg.satp_mode.supported & (1 << i))) { 1039 for (int j = i - 1; j >= 0; --j) { 1040 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1041 cpu->cfg.satp_mode.map |= (1 << j); 1042 break; 1043 } 1044 } 1045 break; 1046 } 1047 } 1048 } 1049 } 1050 1051 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1052 1053 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1054 if (satp_mode_map_max > satp_mode_supported_max) { 1055 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1056 satp_mode_str(satp_mode_map_max, rv32), 1057 satp_mode_str(satp_mode_supported_max, rv32)); 1058 return; 1059 } 1060 1061 /* 1062 * Make sure the user did not ask for an invalid configuration as per 1063 * the specification. 1064 */ 1065 if (!rv32) { 1066 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1067 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1068 (cpu->cfg.satp_mode.init & (1 << i)) && 1069 (cpu->cfg.satp_mode.supported & (1 << i))) { 1070 error_setg(errp, "cannot disable %s satp mode if %s " 1071 "is enabled", satp_mode_str(i, false), 1072 satp_mode_str(satp_mode_map_max, false)); 1073 return; 1074 } 1075 } 1076 } 1077 1078 /* Finally expand the map so that all valid modes are set */ 1079 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1080 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1081 cpu->cfg.satp_mode.map |= (1 << i); 1082 } 1083 } 1084 } 1085 #endif 1086 1087 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1088 { 1089 Error *local_err = NULL; 1090 1091 #ifndef CONFIG_USER_ONLY 1092 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1093 if (local_err != NULL) { 1094 error_propagate(errp, local_err); 1095 return; 1096 } 1097 #endif 1098 1099 if (tcg_enabled()) { 1100 riscv_tcg_cpu_finalize_features(cpu, &local_err); 1101 if (local_err != NULL) { 1102 error_propagate(errp, local_err); 1103 return; 1104 } 1105 } else if (kvm_enabled()) { 1106 riscv_kvm_cpu_finalize_features(cpu, &local_err); 1107 if (local_err != NULL) { 1108 error_propagate(errp, local_err); 1109 return; 1110 } 1111 } 1112 } 1113 1114 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1115 { 1116 CPUState *cs = CPU(dev); 1117 RISCVCPU *cpu = RISCV_CPU(dev); 1118 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1119 Error *local_err = NULL; 1120 1121 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) { 1122 warn_report("The 'any' CPU is deprecated and will be " 1123 "removed in the future."); 1124 } 1125 1126 cpu_exec_realizefn(cs, &local_err); 1127 if (local_err != NULL) { 1128 error_propagate(errp, local_err); 1129 return; 1130 } 1131 1132 riscv_cpu_finalize_features(cpu, &local_err); 1133 if (local_err != NULL) { 1134 error_propagate(errp, local_err); 1135 return; 1136 } 1137 1138 riscv_cpu_register_gdb_regs_for_features(cs); 1139 1140 #ifndef CONFIG_USER_ONLY 1141 if (cpu->cfg.debug) { 1142 riscv_trigger_realize(&cpu->env); 1143 } 1144 #endif 1145 1146 qemu_init_vcpu(cs); 1147 cpu_reset(cs); 1148 1149 mcc->parent_realize(dev, errp); 1150 } 1151 1152 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 1153 { 1154 if (tcg_enabled()) { 1155 return riscv_cpu_tcg_compatible(cpu); 1156 } 1157 1158 return true; 1159 } 1160 1161 #ifndef CONFIG_USER_ONLY 1162 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1163 void *opaque, Error **errp) 1164 { 1165 RISCVSATPMap *satp_map = opaque; 1166 uint8_t satp = satp_mode_from_str(name); 1167 bool value; 1168 1169 value = satp_map->map & (1 << satp); 1170 1171 visit_type_bool(v, name, &value, errp); 1172 } 1173 1174 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1175 void *opaque, Error **errp) 1176 { 1177 RISCVSATPMap *satp_map = opaque; 1178 uint8_t satp = satp_mode_from_str(name); 1179 bool value; 1180 1181 if (!visit_type_bool(v, name, &value, errp)) { 1182 return; 1183 } 1184 1185 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1186 satp_map->init |= 1 << satp; 1187 } 1188 1189 void riscv_add_satp_mode_properties(Object *obj) 1190 { 1191 RISCVCPU *cpu = RISCV_CPU(obj); 1192 1193 if (cpu->env.misa_mxl == MXL_RV32) { 1194 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1195 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1196 } else { 1197 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1198 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1199 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1200 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1201 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1202 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1203 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1204 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1205 } 1206 } 1207 1208 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1209 { 1210 RISCVCPU *cpu = RISCV_CPU(opaque); 1211 CPURISCVState *env = &cpu->env; 1212 1213 if (irq < IRQ_LOCAL_MAX) { 1214 switch (irq) { 1215 case IRQ_U_SOFT: 1216 case IRQ_S_SOFT: 1217 case IRQ_VS_SOFT: 1218 case IRQ_M_SOFT: 1219 case IRQ_U_TIMER: 1220 case IRQ_S_TIMER: 1221 case IRQ_VS_TIMER: 1222 case IRQ_M_TIMER: 1223 case IRQ_U_EXT: 1224 case IRQ_VS_EXT: 1225 case IRQ_M_EXT: 1226 if (kvm_enabled()) { 1227 kvm_riscv_set_irq(cpu, irq, level); 1228 } else { 1229 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1230 } 1231 break; 1232 case IRQ_S_EXT: 1233 if (kvm_enabled()) { 1234 kvm_riscv_set_irq(cpu, irq, level); 1235 } else { 1236 env->external_seip = level; 1237 riscv_cpu_update_mip(env, 1 << irq, 1238 BOOL_TO_MASK(level | env->software_seip)); 1239 } 1240 break; 1241 default: 1242 g_assert_not_reached(); 1243 } 1244 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1245 /* Require H-extension for handling guest local interrupts */ 1246 if (!riscv_has_ext(env, RVH)) { 1247 g_assert_not_reached(); 1248 } 1249 1250 /* Compute bit position in HGEIP CSR */ 1251 irq = irq - IRQ_LOCAL_MAX + 1; 1252 if (env->geilen < irq) { 1253 g_assert_not_reached(); 1254 } 1255 1256 /* Update HGEIP CSR */ 1257 env->hgeip &= ~((target_ulong)1 << irq); 1258 if (level) { 1259 env->hgeip |= (target_ulong)1 << irq; 1260 } 1261 1262 /* Update mip.SGEIP bit */ 1263 riscv_cpu_update_mip(env, MIP_SGEIP, 1264 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1265 } else { 1266 g_assert_not_reached(); 1267 } 1268 } 1269 #endif /* CONFIG_USER_ONLY */ 1270 1271 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1272 { 1273 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1274 } 1275 1276 static void riscv_cpu_post_init(Object *obj) 1277 { 1278 accel_cpu_instance_init(CPU(obj)); 1279 } 1280 1281 static void riscv_cpu_init(Object *obj) 1282 { 1283 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1284 RISCVCPU *cpu = RISCV_CPU(obj); 1285 CPURISCVState *env = &cpu->env; 1286 1287 env->misa_mxl = mcc->misa_mxl_max; 1288 1289 #ifndef CONFIG_USER_ONLY 1290 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1291 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1292 #endif /* CONFIG_USER_ONLY */ 1293 1294 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal); 1295 1296 /* 1297 * The timer and performance counters extensions were supported 1298 * in QEMU before they were added as discrete extensions in the 1299 * ISA. To keep compatibility we'll always default them to 'true' 1300 * for all CPUs. Each accelerator will decide what to do when 1301 * users disable them. 1302 */ 1303 RISCV_CPU(obj)->cfg.ext_zicntr = true; 1304 RISCV_CPU(obj)->cfg.ext_zihpm = true; 1305 1306 /* Default values for non-bool cpu properties */ 1307 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); 1308 cpu->cfg.vlenb = 128 >> 3; 1309 cpu->cfg.elen = 64; 1310 cpu->cfg.cbom_blocksize = 64; 1311 cpu->cfg.cbop_blocksize = 64; 1312 cpu->cfg.cboz_blocksize = 64; 1313 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1314 } 1315 1316 static void riscv_bare_cpu_init(Object *obj) 1317 { 1318 RISCVCPU *cpu = RISCV_CPU(obj); 1319 1320 /* 1321 * Bare CPUs do not inherit the timer and performance 1322 * counters from the parent class (see riscv_cpu_init() 1323 * for info on why the parent enables them). 1324 * 1325 * Users have to explicitly enable these counters for 1326 * bare CPUs. 1327 */ 1328 cpu->cfg.ext_zicntr = false; 1329 cpu->cfg.ext_zihpm = false; 1330 1331 /* Set to QEMU's first supported priv version */ 1332 cpu->env.priv_ver = PRIV_VERSION_1_10_0; 1333 1334 /* 1335 * Support all available satp_mode settings. The default 1336 * value will be set to MBARE if the user doesn't set 1337 * satp_mode manually (see set_satp_mode_default()). 1338 */ 1339 #ifndef CONFIG_USER_ONLY 1340 set_satp_mode_max_supported(cpu, VM_1_10_SV64); 1341 #endif 1342 } 1343 1344 typedef struct misa_ext_info { 1345 const char *name; 1346 const char *description; 1347 } MISAExtInfo; 1348 1349 #define MISA_INFO_IDX(_bit) \ 1350 __builtin_ctz(_bit) 1351 1352 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1353 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1354 1355 static const MISAExtInfo misa_ext_info_arr[] = { 1356 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1357 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1358 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1359 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1360 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1361 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1362 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1363 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1364 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1365 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1366 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), 1367 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1368 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1369 MISA_EXT_INFO(RVB, "x-b", "Bit manipulation (Zba_Zbb_Zbs)") 1370 }; 1371 1372 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) 1373 { 1374 CPUClass *cc = CPU_CLASS(mcc); 1375 1376 /* Validate that MISA_MXL is set properly. */ 1377 switch (mcc->misa_mxl_max) { 1378 #ifdef TARGET_RISCV64 1379 case MXL_RV64: 1380 case MXL_RV128: 1381 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1382 break; 1383 #endif 1384 case MXL_RV32: 1385 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1386 break; 1387 default: 1388 g_assert_not_reached(); 1389 } 1390 } 1391 1392 static int riscv_validate_misa_info_idx(uint32_t bit) 1393 { 1394 int idx; 1395 1396 /* 1397 * Our lowest valid input (RVA) is 1 and 1398 * __builtin_ctz() is UB with zero. 1399 */ 1400 g_assert(bit != 0); 1401 idx = MISA_INFO_IDX(bit); 1402 1403 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1404 return idx; 1405 } 1406 1407 const char *riscv_get_misa_ext_name(uint32_t bit) 1408 { 1409 int idx = riscv_validate_misa_info_idx(bit); 1410 const char *val = misa_ext_info_arr[idx].name; 1411 1412 g_assert(val != NULL); 1413 return val; 1414 } 1415 1416 const char *riscv_get_misa_ext_description(uint32_t bit) 1417 { 1418 int idx = riscv_validate_misa_info_idx(bit); 1419 const char *val = misa_ext_info_arr[idx].description; 1420 1421 g_assert(val != NULL); 1422 return val; 1423 } 1424 1425 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1426 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1427 .enabled = _defval} 1428 1429 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1430 /* Defaults for standard extensions */ 1431 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1432 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1433 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1434 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1435 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1436 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), 1437 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1438 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1439 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1440 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1441 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1442 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1443 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1444 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1445 1446 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1447 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1448 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1449 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1450 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1451 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1452 1453 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1454 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1455 1456 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1457 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1458 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1459 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1460 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1461 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1462 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1463 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1464 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1465 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1466 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1467 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1468 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1469 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1470 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1471 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1472 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1473 1474 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1475 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1476 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1477 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1478 1479 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1480 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true), 1481 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1482 1483 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1484 1485 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1486 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1487 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1488 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1489 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1490 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1491 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1492 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1493 1494 /* Vector cryptography extensions */ 1495 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1496 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1497 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkg, false), 1498 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1499 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1500 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1501 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1502 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1503 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1504 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1505 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1506 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1507 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1508 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1509 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1510 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1511 1512 DEFINE_PROP_END_OF_LIST(), 1513 }; 1514 1515 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1516 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1517 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1518 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1519 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1520 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1521 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1522 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1523 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1524 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1525 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1526 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1527 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1528 1529 DEFINE_PROP_END_OF_LIST(), 1530 }; 1531 1532 /* These are experimental so mark with 'x-' */ 1533 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1534 MULTI_EXT_CFG_BOOL("x-smaia", ext_smaia, false), 1535 MULTI_EXT_CFG_BOOL("x-ssaia", ext_ssaia, false), 1536 1537 MULTI_EXT_CFG_BOOL("x-zaamo", ext_zaamo, false), 1538 MULTI_EXT_CFG_BOOL("x-zalrsc", ext_zalrsc, false), 1539 1540 MULTI_EXT_CFG_BOOL("x-zvfh", ext_zvfh, false), 1541 MULTI_EXT_CFG_BOOL("x-zvfhmin", ext_zvfhmin, false), 1542 1543 MULTI_EXT_CFG_BOOL("x-zfbfmin", ext_zfbfmin, false), 1544 MULTI_EXT_CFG_BOOL("x-zvfbfmin", ext_zvfbfmin, false), 1545 MULTI_EXT_CFG_BOOL("x-zvfbfwma", ext_zvfbfwma, false), 1546 1547 DEFINE_PROP_END_OF_LIST(), 1548 }; 1549 1550 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { 1551 MULTI_EXT_CFG_BOOL("svade", svade, true), 1552 MULTI_EXT_CFG_BOOL("zic64b", zic64b, true), 1553 1554 DEFINE_PROP_END_OF_LIST(), 1555 }; 1556 1557 /* Deprecated entries marked for future removal */ 1558 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1559 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1560 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1561 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1562 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1563 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1564 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1565 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1566 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1567 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1568 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1569 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1570 1571 DEFINE_PROP_END_OF_LIST(), 1572 }; 1573 1574 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname, 1575 Error **errp) 1576 { 1577 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1578 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'", 1579 cpuname, propname); 1580 } 1581 1582 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1583 void *opaque, Error **errp) 1584 { 1585 RISCVCPU *cpu = RISCV_CPU(obj); 1586 uint8_t pmu_num, curr_pmu_num; 1587 uint32_t pmu_mask; 1588 1589 visit_type_uint8(v, name, &pmu_num, errp); 1590 1591 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask); 1592 1593 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) { 1594 cpu_set_prop_err(cpu, name, errp); 1595 error_append_hint(errp, "Current '%s' val: %u\n", 1596 name, curr_pmu_num); 1597 return; 1598 } 1599 1600 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1601 error_setg(errp, "Number of counters exceeds maximum available"); 1602 return; 1603 } 1604 1605 if (pmu_num == 0) { 1606 pmu_mask = 0; 1607 } else { 1608 pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1609 } 1610 1611 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1612 cpu->cfg.pmu_mask = pmu_mask; 1613 cpu_option_add_user_setting("pmu-mask", pmu_mask); 1614 } 1615 1616 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1617 void *opaque, Error **errp) 1618 { 1619 RISCVCPU *cpu = RISCV_CPU(obj); 1620 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1621 1622 visit_type_uint8(v, name, &pmu_num, errp); 1623 } 1624 1625 static const PropertyInfo prop_pmu_num = { 1626 .name = "pmu-num", 1627 .get = prop_pmu_num_get, 1628 .set = prop_pmu_num_set, 1629 }; 1630 1631 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name, 1632 void *opaque, Error **errp) 1633 { 1634 RISCVCPU *cpu = RISCV_CPU(obj); 1635 uint32_t value; 1636 uint8_t pmu_num; 1637 1638 visit_type_uint32(v, name, &value, errp); 1639 1640 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) { 1641 cpu_set_prop_err(cpu, name, errp); 1642 error_append_hint(errp, "Current '%s' val: %x\n", 1643 name, cpu->cfg.pmu_mask); 1644 return; 1645 } 1646 1647 pmu_num = ctpop32(value); 1648 1649 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1650 error_setg(errp, "Number of counters exceeds maximum available"); 1651 return; 1652 } 1653 1654 cpu_option_add_user_setting(name, value); 1655 cpu->cfg.pmu_mask = value; 1656 } 1657 1658 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name, 1659 void *opaque, Error **errp) 1660 { 1661 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask; 1662 1663 visit_type_uint8(v, name, &pmu_mask, errp); 1664 } 1665 1666 static const PropertyInfo prop_pmu_mask = { 1667 .name = "pmu-mask", 1668 .get = prop_pmu_mask_get, 1669 .set = prop_pmu_mask_set, 1670 }; 1671 1672 static void prop_mmu_set(Object *obj, Visitor *v, const char *name, 1673 void *opaque, Error **errp) 1674 { 1675 RISCVCPU *cpu = RISCV_CPU(obj); 1676 bool value; 1677 1678 visit_type_bool(v, name, &value, errp); 1679 1680 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) { 1681 cpu_set_prop_err(cpu, "mmu", errp); 1682 return; 1683 } 1684 1685 cpu_option_add_user_setting(name, value); 1686 cpu->cfg.mmu = value; 1687 } 1688 1689 static void prop_mmu_get(Object *obj, Visitor *v, const char *name, 1690 void *opaque, Error **errp) 1691 { 1692 bool value = RISCV_CPU(obj)->cfg.mmu; 1693 1694 visit_type_bool(v, name, &value, errp); 1695 } 1696 1697 static const PropertyInfo prop_mmu = { 1698 .name = "mmu", 1699 .get = prop_mmu_get, 1700 .set = prop_mmu_set, 1701 }; 1702 1703 static void prop_pmp_set(Object *obj, Visitor *v, const char *name, 1704 void *opaque, Error **errp) 1705 { 1706 RISCVCPU *cpu = RISCV_CPU(obj); 1707 bool value; 1708 1709 visit_type_bool(v, name, &value, errp); 1710 1711 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) { 1712 cpu_set_prop_err(cpu, name, errp); 1713 return; 1714 } 1715 1716 cpu_option_add_user_setting(name, value); 1717 cpu->cfg.pmp = value; 1718 } 1719 1720 static void prop_pmp_get(Object *obj, Visitor *v, const char *name, 1721 void *opaque, Error **errp) 1722 { 1723 bool value = RISCV_CPU(obj)->cfg.pmp; 1724 1725 visit_type_bool(v, name, &value, errp); 1726 } 1727 1728 static const PropertyInfo prop_pmp = { 1729 .name = "pmp", 1730 .get = prop_pmp_get, 1731 .set = prop_pmp_set, 1732 }; 1733 1734 static int priv_spec_from_str(const char *priv_spec_str) 1735 { 1736 int priv_version = -1; 1737 1738 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) { 1739 priv_version = PRIV_VERSION_1_12_0; 1740 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) { 1741 priv_version = PRIV_VERSION_1_11_0; 1742 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) { 1743 priv_version = PRIV_VERSION_1_10_0; 1744 } 1745 1746 return priv_version; 1747 } 1748 1749 static const char *priv_spec_to_str(int priv_version) 1750 { 1751 switch (priv_version) { 1752 case PRIV_VERSION_1_10_0: 1753 return PRIV_VER_1_10_0_STR; 1754 case PRIV_VERSION_1_11_0: 1755 return PRIV_VER_1_11_0_STR; 1756 case PRIV_VERSION_1_12_0: 1757 return PRIV_VER_1_12_0_STR; 1758 default: 1759 return NULL; 1760 } 1761 } 1762 1763 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name, 1764 void *opaque, Error **errp) 1765 { 1766 RISCVCPU *cpu = RISCV_CPU(obj); 1767 g_autofree char *value = NULL; 1768 int priv_version = -1; 1769 1770 visit_type_str(v, name, &value, errp); 1771 1772 priv_version = priv_spec_from_str(value); 1773 if (priv_version < 0) { 1774 error_setg(errp, "Unsupported privilege spec version '%s'", value); 1775 return; 1776 } 1777 1778 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) { 1779 cpu_set_prop_err(cpu, name, errp); 1780 error_append_hint(errp, "Current '%s' val: %s\n", name, 1781 object_property_get_str(obj, name, NULL)); 1782 return; 1783 } 1784 1785 cpu_option_add_user_setting(name, priv_version); 1786 cpu->env.priv_ver = priv_version; 1787 } 1788 1789 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name, 1790 void *opaque, Error **errp) 1791 { 1792 RISCVCPU *cpu = RISCV_CPU(obj); 1793 const char *value = priv_spec_to_str(cpu->env.priv_ver); 1794 1795 visit_type_str(v, name, (char **)&value, errp); 1796 } 1797 1798 static const PropertyInfo prop_priv_spec = { 1799 .name = "priv_spec", 1800 .get = prop_priv_spec_get, 1801 .set = prop_priv_spec_set, 1802 }; 1803 1804 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name, 1805 void *opaque, Error **errp) 1806 { 1807 RISCVCPU *cpu = RISCV_CPU(obj); 1808 g_autofree char *value = NULL; 1809 1810 visit_type_str(v, name, &value, errp); 1811 1812 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) { 1813 error_setg(errp, "Unsupported vector spec version '%s'", value); 1814 return; 1815 } 1816 1817 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0); 1818 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1819 } 1820 1821 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name, 1822 void *opaque, Error **errp) 1823 { 1824 const char *value = VEXT_VER_1_00_0_STR; 1825 1826 visit_type_str(v, name, (char **)&value, errp); 1827 } 1828 1829 static const PropertyInfo prop_vext_spec = { 1830 .name = "vext_spec", 1831 .get = prop_vext_spec_get, 1832 .set = prop_vext_spec_set, 1833 }; 1834 1835 static void prop_vlen_set(Object *obj, Visitor *v, const char *name, 1836 void *opaque, Error **errp) 1837 { 1838 RISCVCPU *cpu = RISCV_CPU(obj); 1839 uint16_t value; 1840 1841 if (!visit_type_uint16(v, name, &value, errp)) { 1842 return; 1843 } 1844 1845 if (!is_power_of_2(value)) { 1846 error_setg(errp, "Vector extension VLEN must be power of 2"); 1847 return; 1848 } 1849 1850 if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) { 1851 cpu_set_prop_err(cpu, name, errp); 1852 error_append_hint(errp, "Current '%s' val: %u\n", 1853 name, cpu->cfg.vlenb << 3); 1854 return; 1855 } 1856 1857 cpu_option_add_user_setting(name, value); 1858 cpu->cfg.vlenb = value >> 3; 1859 } 1860 1861 static void prop_vlen_get(Object *obj, Visitor *v, const char *name, 1862 void *opaque, Error **errp) 1863 { 1864 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3; 1865 1866 visit_type_uint16(v, name, &value, errp); 1867 } 1868 1869 static const PropertyInfo prop_vlen = { 1870 .name = "vlen", 1871 .get = prop_vlen_get, 1872 .set = prop_vlen_set, 1873 }; 1874 1875 static void prop_elen_set(Object *obj, Visitor *v, const char *name, 1876 void *opaque, Error **errp) 1877 { 1878 RISCVCPU *cpu = RISCV_CPU(obj); 1879 uint16_t value; 1880 1881 if (!visit_type_uint16(v, name, &value, errp)) { 1882 return; 1883 } 1884 1885 if (!is_power_of_2(value)) { 1886 error_setg(errp, "Vector extension ELEN must be power of 2"); 1887 return; 1888 } 1889 1890 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) { 1891 cpu_set_prop_err(cpu, name, errp); 1892 error_append_hint(errp, "Current '%s' val: %u\n", 1893 name, cpu->cfg.elen); 1894 return; 1895 } 1896 1897 cpu_option_add_user_setting(name, value); 1898 cpu->cfg.elen = value; 1899 } 1900 1901 static void prop_elen_get(Object *obj, Visitor *v, const char *name, 1902 void *opaque, Error **errp) 1903 { 1904 uint16_t value = RISCV_CPU(obj)->cfg.elen; 1905 1906 visit_type_uint16(v, name, &value, errp); 1907 } 1908 1909 static const PropertyInfo prop_elen = { 1910 .name = "elen", 1911 .get = prop_elen_get, 1912 .set = prop_elen_set, 1913 }; 1914 1915 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name, 1916 void *opaque, Error **errp) 1917 { 1918 RISCVCPU *cpu = RISCV_CPU(obj); 1919 uint16_t value; 1920 1921 if (!visit_type_uint16(v, name, &value, errp)) { 1922 return; 1923 } 1924 1925 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) { 1926 cpu_set_prop_err(cpu, name, errp); 1927 error_append_hint(errp, "Current '%s' val: %u\n", 1928 name, cpu->cfg.cbom_blocksize); 1929 return; 1930 } 1931 1932 cpu_option_add_user_setting(name, value); 1933 cpu->cfg.cbom_blocksize = value; 1934 } 1935 1936 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name, 1937 void *opaque, Error **errp) 1938 { 1939 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize; 1940 1941 visit_type_uint16(v, name, &value, errp); 1942 } 1943 1944 static const PropertyInfo prop_cbom_blksize = { 1945 .name = "cbom_blocksize", 1946 .get = prop_cbom_blksize_get, 1947 .set = prop_cbom_blksize_set, 1948 }; 1949 1950 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name, 1951 void *opaque, Error **errp) 1952 { 1953 RISCVCPU *cpu = RISCV_CPU(obj); 1954 uint16_t value; 1955 1956 if (!visit_type_uint16(v, name, &value, errp)) { 1957 return; 1958 } 1959 1960 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) { 1961 cpu_set_prop_err(cpu, name, errp); 1962 error_append_hint(errp, "Current '%s' val: %u\n", 1963 name, cpu->cfg.cbop_blocksize); 1964 return; 1965 } 1966 1967 cpu_option_add_user_setting(name, value); 1968 cpu->cfg.cbop_blocksize = value; 1969 } 1970 1971 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name, 1972 void *opaque, Error **errp) 1973 { 1974 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize; 1975 1976 visit_type_uint16(v, name, &value, errp); 1977 } 1978 1979 static const PropertyInfo prop_cbop_blksize = { 1980 .name = "cbop_blocksize", 1981 .get = prop_cbop_blksize_get, 1982 .set = prop_cbop_blksize_set, 1983 }; 1984 1985 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name, 1986 void *opaque, Error **errp) 1987 { 1988 RISCVCPU *cpu = RISCV_CPU(obj); 1989 uint16_t value; 1990 1991 if (!visit_type_uint16(v, name, &value, errp)) { 1992 return; 1993 } 1994 1995 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) { 1996 cpu_set_prop_err(cpu, name, errp); 1997 error_append_hint(errp, "Current '%s' val: %u\n", 1998 name, cpu->cfg.cboz_blocksize); 1999 return; 2000 } 2001 2002 cpu_option_add_user_setting(name, value); 2003 cpu->cfg.cboz_blocksize = value; 2004 } 2005 2006 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name, 2007 void *opaque, Error **errp) 2008 { 2009 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize; 2010 2011 visit_type_uint16(v, name, &value, errp); 2012 } 2013 2014 static const PropertyInfo prop_cboz_blksize = { 2015 .name = "cboz_blocksize", 2016 .get = prop_cboz_blksize_get, 2017 .set = prop_cboz_blksize_set, 2018 }; 2019 2020 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name, 2021 void *opaque, Error **errp) 2022 { 2023 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2024 RISCVCPU *cpu = RISCV_CPU(obj); 2025 uint32_t prev_val = cpu->cfg.mvendorid; 2026 uint32_t value; 2027 2028 if (!visit_type_uint32(v, name, &value, errp)) { 2029 return; 2030 } 2031 2032 if (!dynamic_cpu && prev_val != value) { 2033 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2034 object_get_typename(obj), prev_val); 2035 return; 2036 } 2037 2038 cpu->cfg.mvendorid = value; 2039 } 2040 2041 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name, 2042 void *opaque, Error **errp) 2043 { 2044 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; 2045 2046 visit_type_uint32(v, name, &value, errp); 2047 } 2048 2049 static const PropertyInfo prop_mvendorid = { 2050 .name = "mvendorid", 2051 .get = prop_mvendorid_get, 2052 .set = prop_mvendorid_set, 2053 }; 2054 2055 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name, 2056 void *opaque, Error **errp) 2057 { 2058 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2059 RISCVCPU *cpu = RISCV_CPU(obj); 2060 uint64_t prev_val = cpu->cfg.mimpid; 2061 uint64_t value; 2062 2063 if (!visit_type_uint64(v, name, &value, errp)) { 2064 return; 2065 } 2066 2067 if (!dynamic_cpu && prev_val != value) { 2068 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2069 object_get_typename(obj), prev_val); 2070 return; 2071 } 2072 2073 cpu->cfg.mimpid = value; 2074 } 2075 2076 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name, 2077 void *opaque, Error **errp) 2078 { 2079 uint64_t value = RISCV_CPU(obj)->cfg.mimpid; 2080 2081 visit_type_uint64(v, name, &value, errp); 2082 } 2083 2084 static const PropertyInfo prop_mimpid = { 2085 .name = "mimpid", 2086 .get = prop_mimpid_get, 2087 .set = prop_mimpid_set, 2088 }; 2089 2090 static void prop_marchid_set(Object *obj, Visitor *v, const char *name, 2091 void *opaque, Error **errp) 2092 { 2093 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2094 RISCVCPU *cpu = RISCV_CPU(obj); 2095 uint64_t prev_val = cpu->cfg.marchid; 2096 uint64_t value, invalid_val; 2097 uint32_t mxlen = 0; 2098 2099 if (!visit_type_uint64(v, name, &value, errp)) { 2100 return; 2101 } 2102 2103 if (!dynamic_cpu && prev_val != value) { 2104 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2105 object_get_typename(obj), prev_val); 2106 return; 2107 } 2108 2109 switch (riscv_cpu_mxl(&cpu->env)) { 2110 case MXL_RV32: 2111 mxlen = 32; 2112 break; 2113 case MXL_RV64: 2114 case MXL_RV128: 2115 mxlen = 64; 2116 break; 2117 default: 2118 g_assert_not_reached(); 2119 } 2120 2121 invalid_val = 1LL << (mxlen - 1); 2122 2123 if (value == invalid_val) { 2124 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2125 "and the remaining bits zero", mxlen); 2126 return; 2127 } 2128 2129 cpu->cfg.marchid = value; 2130 } 2131 2132 static void prop_marchid_get(Object *obj, Visitor *v, const char *name, 2133 void *opaque, Error **errp) 2134 { 2135 uint64_t value = RISCV_CPU(obj)->cfg.marchid; 2136 2137 visit_type_uint64(v, name, &value, errp); 2138 } 2139 2140 static const PropertyInfo prop_marchid = { 2141 .name = "marchid", 2142 .get = prop_marchid_get, 2143 .set = prop_marchid_set, 2144 }; 2145 2146 /* 2147 * RVA22U64 defines some 'named features' or 'synthetic extensions' 2148 * that are cache related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa 2149 * and Zicclsm. We do not implement caching in QEMU so we'll consider 2150 * all these named features as always enabled. 2151 * 2152 * There's no riscv,isa update for them (nor for zic64b, despite it 2153 * having a cfg offset) at this moment. 2154 */ 2155 static RISCVCPUProfile RVA22U64 = { 2156 .parent = NULL, 2157 .name = "rva22u64", 2158 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU, 2159 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2160 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2161 .ext_offsets = { 2162 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause), 2163 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb), 2164 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin), 2165 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr), 2166 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom), 2167 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz), 2168 2169 /* mandatory named features for this profile */ 2170 CPU_CFG_OFFSET(zic64b), 2171 2172 RISCV_PROFILE_EXT_LIST_END 2173 } 2174 }; 2175 2176 /* 2177 * As with RVA22U64, RVA22S64 also defines 'named features'. 2178 * 2179 * Cache related features that we consider enabled since we don't 2180 * implement cache: Ssccptr 2181 * 2182 * Other named features that we already implement: Sstvecd, Sstvala, 2183 * Sscounterenw 2184 * 2185 * Named features that we need to enable: svade 2186 * 2187 * The remaining features/extensions comes from RVA22U64. 2188 */ 2189 static RISCVCPUProfile RVA22S64 = { 2190 .parent = &RVA22U64, 2191 .name = "rva22s64", 2192 .misa_ext = RVS, 2193 .priv_spec = PRIV_VERSION_1_12_0, 2194 .satp_mode = VM_1_10_SV39, 2195 .ext_offsets = { 2196 /* rva22s64 exts */ 2197 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt), 2198 CPU_CFG_OFFSET(ext_svinval), 2199 2200 /* rva22s64 named features */ 2201 CPU_CFG_OFFSET(svade), 2202 2203 RISCV_PROFILE_EXT_LIST_END 2204 } 2205 }; 2206 2207 RISCVCPUProfile *riscv_profiles[] = { 2208 &RVA22U64, 2209 &RVA22S64, 2210 NULL, 2211 }; 2212 2213 static Property riscv_cpu_properties[] = { 2214 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 2215 2216 {.name = "pmu-mask", .info = &prop_pmu_mask}, 2217 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 2218 2219 {.name = "mmu", .info = &prop_mmu}, 2220 {.name = "pmp", .info = &prop_pmp}, 2221 2222 {.name = "priv_spec", .info = &prop_priv_spec}, 2223 {.name = "vext_spec", .info = &prop_vext_spec}, 2224 2225 {.name = "vlen", .info = &prop_vlen}, 2226 {.name = "elen", .info = &prop_elen}, 2227 2228 {.name = "cbom_blocksize", .info = &prop_cbom_blksize}, 2229 {.name = "cbop_blocksize", .info = &prop_cbop_blksize}, 2230 {.name = "cboz_blocksize", .info = &prop_cboz_blksize}, 2231 2232 {.name = "mvendorid", .info = &prop_mvendorid}, 2233 {.name = "mimpid", .info = &prop_mimpid}, 2234 {.name = "marchid", .info = &prop_marchid}, 2235 2236 #ifndef CONFIG_USER_ONLY 2237 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 2238 #endif 2239 2240 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 2241 2242 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 2243 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 2244 2245 /* 2246 * write_misa() is marked as experimental for now so mark 2247 * it with -x and default to 'false'. 2248 */ 2249 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 2250 DEFINE_PROP_END_OF_LIST(), 2251 }; 2252 2253 #if defined(TARGET_RISCV64) 2254 static void rva22u64_profile_cpu_init(Object *obj) 2255 { 2256 rv64i_bare_cpu_init(obj); 2257 2258 RVA22U64.enabled = true; 2259 } 2260 2261 static void rva22s64_profile_cpu_init(Object *obj) 2262 { 2263 rv64i_bare_cpu_init(obj); 2264 2265 RVA22S64.enabled = true; 2266 } 2267 #endif 2268 2269 static const gchar *riscv_gdb_arch_name(CPUState *cs) 2270 { 2271 RISCVCPU *cpu = RISCV_CPU(cs); 2272 CPURISCVState *env = &cpu->env; 2273 2274 switch (riscv_cpu_mxl(env)) { 2275 case MXL_RV32: 2276 return "riscv:rv32"; 2277 case MXL_RV64: 2278 case MXL_RV128: 2279 return "riscv:rv64"; 2280 default: 2281 g_assert_not_reached(); 2282 } 2283 } 2284 2285 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) 2286 { 2287 RISCVCPU *cpu = RISCV_CPU(cs); 2288 2289 if (strcmp(xmlname, "riscv-csr.xml") == 0) { 2290 return cpu->dyn_csr_xml; 2291 } else if (strcmp(xmlname, "riscv-vector.xml") == 0) { 2292 return cpu->dyn_vreg_xml; 2293 } 2294 2295 return NULL; 2296 } 2297 2298 #ifndef CONFIG_USER_ONLY 2299 static int64_t riscv_get_arch_id(CPUState *cs) 2300 { 2301 RISCVCPU *cpu = RISCV_CPU(cs); 2302 2303 return cpu->env.mhartid; 2304 } 2305 2306 #include "hw/core/sysemu-cpu-ops.h" 2307 2308 static const struct SysemuCPUOps riscv_sysemu_ops = { 2309 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 2310 .write_elf64_note = riscv_cpu_write_elf64_note, 2311 .write_elf32_note = riscv_cpu_write_elf32_note, 2312 .legacy_vmsd = &vmstate_riscv_cpu, 2313 }; 2314 #endif 2315 2316 static void riscv_cpu_common_class_init(ObjectClass *c, void *data) 2317 { 2318 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2319 CPUClass *cc = CPU_CLASS(c); 2320 DeviceClass *dc = DEVICE_CLASS(c); 2321 ResettableClass *rc = RESETTABLE_CLASS(c); 2322 2323 device_class_set_parent_realize(dc, riscv_cpu_realize, 2324 &mcc->parent_realize); 2325 2326 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 2327 &mcc->parent_phases); 2328 2329 cc->class_by_name = riscv_cpu_class_by_name; 2330 cc->has_work = riscv_cpu_has_work; 2331 cc->mmu_index = riscv_cpu_mmu_index; 2332 cc->dump_state = riscv_cpu_dump_state; 2333 cc->set_pc = riscv_cpu_set_pc; 2334 cc->get_pc = riscv_cpu_get_pc; 2335 cc->gdb_read_register = riscv_cpu_gdb_read_register; 2336 cc->gdb_write_register = riscv_cpu_gdb_write_register; 2337 cc->gdb_num_core_regs = 33; 2338 cc->gdb_stop_before_watchpoint = true; 2339 cc->disas_set_info = riscv_cpu_disas_set_info; 2340 #ifndef CONFIG_USER_ONLY 2341 cc->sysemu_ops = &riscv_sysemu_ops; 2342 cc->get_arch_id = riscv_get_arch_id; 2343 #endif 2344 cc->gdb_arch_name = riscv_gdb_arch_name; 2345 cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml; 2346 2347 device_class_set_props(dc, riscv_cpu_properties); 2348 } 2349 2350 static void riscv_cpu_class_init(ObjectClass *c, void *data) 2351 { 2352 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2353 2354 mcc->misa_mxl_max = (uint32_t)(uintptr_t)data; 2355 riscv_cpu_validate_misa_mxl(mcc); 2356 } 2357 2358 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 2359 int max_str_len) 2360 { 2361 const RISCVIsaExtData *edata; 2362 char *old = *isa_str; 2363 char *new = *isa_str; 2364 2365 for (edata = isa_edata_arr; edata && edata->name; edata++) { 2366 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 2367 new = g_strconcat(old, "_", edata->name, NULL); 2368 g_free(old); 2369 old = new; 2370 } 2371 } 2372 2373 *isa_str = new; 2374 } 2375 2376 char *riscv_isa_string(RISCVCPU *cpu) 2377 { 2378 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 2379 int i; 2380 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 2381 char *isa_str = g_new(char, maxlen); 2382 int xlen = riscv_cpu_max_xlen(mcc); 2383 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen); 2384 2385 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2386 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2387 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 2388 } 2389 } 2390 *p = '\0'; 2391 if (!cpu->cfg.short_isa_string) { 2392 riscv_isa_string_ext(cpu, &isa_str, maxlen); 2393 } 2394 return isa_str; 2395 } 2396 2397 #ifndef CONFIG_USER_ONLY 2398 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count) 2399 { 2400 int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr); 2401 char **extensions = g_new(char *, maxlen); 2402 2403 for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2404 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2405 extensions[*count] = g_new(char, 2); 2406 snprintf(extensions[*count], 2, "%c", 2407 qemu_tolower(riscv_single_letter_exts[i])); 2408 (*count)++; 2409 } 2410 } 2411 2412 for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) { 2413 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 2414 extensions[*count] = g_strdup(edata->name); 2415 (*count)++; 2416 } 2417 } 2418 2419 return extensions; 2420 } 2421 2422 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename) 2423 { 2424 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 2425 const size_t maxlen = sizeof("rv128i"); 2426 g_autofree char *isa_base = g_new(char, maxlen); 2427 g_autofree char *riscv_isa; 2428 char **isa_extensions; 2429 int count = 0; 2430 int xlen = riscv_cpu_max_xlen(mcc); 2431 2432 riscv_isa = riscv_isa_string(cpu); 2433 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa); 2434 2435 snprintf(isa_base, maxlen, "rv%di", xlen); 2436 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base); 2437 2438 isa_extensions = riscv_isa_extensions_list(cpu, &count); 2439 qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions", 2440 isa_extensions, count); 2441 2442 for (int i = 0; i < count; i++) { 2443 g_free(isa_extensions[i]); 2444 } 2445 2446 g_free(isa_extensions); 2447 } 2448 #endif 2449 2450 #define DEFINE_CPU(type_name, misa_mxl_max, initfn) \ 2451 { \ 2452 .name = (type_name), \ 2453 .parent = TYPE_RISCV_CPU, \ 2454 .instance_init = (initfn), \ 2455 .class_init = riscv_cpu_class_init, \ 2456 .class_data = (void *)(misa_mxl_max) \ 2457 } 2458 2459 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \ 2460 { \ 2461 .name = (type_name), \ 2462 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 2463 .instance_init = (initfn), \ 2464 .class_init = riscv_cpu_class_init, \ 2465 .class_data = (void *)(misa_mxl_max) \ 2466 } 2467 2468 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \ 2469 { \ 2470 .name = (type_name), \ 2471 .parent = TYPE_RISCV_VENDOR_CPU, \ 2472 .instance_init = (initfn), \ 2473 .class_init = riscv_cpu_class_init, \ 2474 .class_data = (void *)(misa_mxl_max) \ 2475 } 2476 2477 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \ 2478 { \ 2479 .name = (type_name), \ 2480 .parent = TYPE_RISCV_BARE_CPU, \ 2481 .instance_init = (initfn), \ 2482 .class_init = riscv_cpu_class_init, \ 2483 .class_data = (void *)(misa_mxl_max) \ 2484 } 2485 2486 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \ 2487 { \ 2488 .name = (type_name), \ 2489 .parent = TYPE_RISCV_BARE_CPU, \ 2490 .instance_init = (initfn), \ 2491 .class_init = riscv_cpu_class_init, \ 2492 .class_data = (void *)(misa_mxl_max) \ 2493 } 2494 2495 static const TypeInfo riscv_cpu_type_infos[] = { 2496 { 2497 .name = TYPE_RISCV_CPU, 2498 .parent = TYPE_CPU, 2499 .instance_size = sizeof(RISCVCPU), 2500 .instance_align = __alignof(RISCVCPU), 2501 .instance_init = riscv_cpu_init, 2502 .instance_post_init = riscv_cpu_post_init, 2503 .abstract = true, 2504 .class_size = sizeof(RISCVCPUClass), 2505 .class_init = riscv_cpu_common_class_init, 2506 }, 2507 { 2508 .name = TYPE_RISCV_DYNAMIC_CPU, 2509 .parent = TYPE_RISCV_CPU, 2510 .abstract = true, 2511 }, 2512 { 2513 .name = TYPE_RISCV_VENDOR_CPU, 2514 .parent = TYPE_RISCV_CPU, 2515 .abstract = true, 2516 }, 2517 { 2518 .name = TYPE_RISCV_BARE_CPU, 2519 .parent = TYPE_RISCV_CPU, 2520 .instance_init = riscv_bare_cpu_init, 2521 .abstract = true, 2522 }, 2523 #if defined(TARGET_RISCV32) 2524 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV32, riscv_any_cpu_init), 2525 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), 2526 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), 2527 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), 2528 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), 2529 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init), 2530 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), 2531 #elif defined(TARGET_RISCV64) 2532 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV64, riscv_any_cpu_init), 2533 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), 2534 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), 2535 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), 2536 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), 2537 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init), 2538 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init), 2539 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init), 2540 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init), 2541 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init), 2542 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init), 2543 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init), 2544 #endif 2545 }; 2546 2547 DEFINE_TYPES(riscv_cpu_type_infos) 2548