1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "exec/exec-all.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/error-report.h" 31 #include "hw/qdev-properties.h" 32 #include "hw/core/qdev-prop-internal.h" 33 #include "migration/vmstate.h" 34 #include "fpu/softfloat-helpers.h" 35 #include "sysemu/device_tree.h" 36 #include "sysemu/kvm.h" 37 #include "sysemu/tcg.h" 38 #include "kvm/kvm_riscv.h" 39 #include "tcg/tcg-cpu.h" 40 #include "tcg/tcg.h" 41 42 /* RISC-V CPU definitions */ 43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH"; 44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 45 RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0}; 46 47 /* 48 * From vector_helper.c 49 * Note that vector data is stored in host-endian 64-bit chunks, 50 * so addressing bytes needs a host-endian fixup. 51 */ 52 #if HOST_BIG_ENDIAN 53 #define BYTE(x) ((x) ^ 7) 54 #else 55 #define BYTE(x) (x) 56 #endif 57 58 bool riscv_cpu_is_32bit(RISCVCPU *cpu) 59 { 60 return riscv_cpu_mxl(&cpu->env) == MXL_RV32; 61 } 62 63 /* Hash that stores general user set numeric options */ 64 static GHashTable *general_user_opts; 65 66 static void cpu_option_add_user_setting(const char *optname, uint32_t value) 67 { 68 g_hash_table_insert(general_user_opts, (gpointer)optname, 69 GUINT_TO_POINTER(value)); 70 } 71 72 bool riscv_cpu_option_set(const char *optname) 73 { 74 return g_hash_table_contains(general_user_opts, optname); 75 } 76 77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 78 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 79 80 /* 81 * Here are the ordering rules of extension naming defined by RISC-V 82 * specification : 83 * 1. All extensions should be separated from other multi-letter extensions 84 * by an underscore. 85 * 2. The first letter following the 'Z' conventionally indicates the most 86 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 87 * If multiple 'Z' extensions are named, they should be ordered first 88 * by category, then alphabetically within a category. 89 * 3. Standard supervisor-level extensions (starts with 'S') should be 90 * listed after standard unprivileged extensions. If multiple 91 * supervisor-level extensions are listed, they should be ordered 92 * alphabetically. 93 * 4. Non-standard extensions (starts with 'X') must be listed after all 94 * standard extensions. They must be separated from other multi-letter 95 * extensions by an underscore. 96 * 97 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 98 * instead. 99 */ 100 const RISCVIsaExtData isa_edata_arr[] = { 101 ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b), 102 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 103 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop), 104 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 105 ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11), 106 ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11), 107 ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11), 108 ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11), 109 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 110 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 111 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 112 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 113 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 114 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 115 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 116 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 117 ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_11), 118 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), 119 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), 120 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), 121 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 122 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 123 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 124 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 125 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 126 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 127 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 128 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 129 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 130 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 131 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 132 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 133 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 134 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 135 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 136 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 137 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 138 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 139 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 140 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 141 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 142 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 143 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 144 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 145 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 146 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 147 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 148 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 149 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 150 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 151 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 152 ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso), 153 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 154 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 155 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 156 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 157 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 158 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 159 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 160 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 161 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 162 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 163 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 164 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 165 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 166 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 167 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 168 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 169 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 170 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 171 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 172 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 173 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 174 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 175 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 176 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 177 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 178 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 179 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 180 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 181 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 182 ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11), 183 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 184 ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 185 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 186 ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 187 ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 188 ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade), 189 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 190 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 191 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 192 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 193 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 194 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 195 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 196 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 197 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 198 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 199 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 200 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 201 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 202 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 203 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 204 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 205 206 DEFINE_PROP_END_OF_LIST(), 207 }; 208 209 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 210 { 211 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 212 213 return *ext_enabled; 214 } 215 216 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 217 { 218 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 219 220 *ext_enabled = en; 221 } 222 223 bool riscv_cpu_is_vendor(Object *cpu_obj) 224 { 225 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; 226 } 227 228 const char * const riscv_int_regnames[] = { 229 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 230 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 231 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 232 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 233 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 234 }; 235 236 const char * const riscv_int_regnamesh[] = { 237 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 238 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 239 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 240 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 241 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 242 "x30h/t5h", "x31h/t6h" 243 }; 244 245 const char * const riscv_fpr_regnames[] = { 246 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 247 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 248 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 249 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 250 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 251 "f30/ft10", "f31/ft11" 252 }; 253 254 const char * const riscv_rvv_regnames[] = { 255 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 256 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 257 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 258 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 259 "v28", "v29", "v30", "v31" 260 }; 261 262 static const char * const riscv_excp_names[] = { 263 "misaligned_fetch", 264 "fault_fetch", 265 "illegal_instruction", 266 "breakpoint", 267 "misaligned_load", 268 "fault_load", 269 "misaligned_store", 270 "fault_store", 271 "user_ecall", 272 "supervisor_ecall", 273 "hypervisor_ecall", 274 "machine_ecall", 275 "exec_page_fault", 276 "load_page_fault", 277 "reserved", 278 "store_page_fault", 279 "reserved", 280 "reserved", 281 "reserved", 282 "reserved", 283 "guest_exec_page_fault", 284 "guest_load_page_fault", 285 "reserved", 286 "guest_store_page_fault", 287 }; 288 289 static const char * const riscv_intr_names[] = { 290 "u_software", 291 "s_software", 292 "vs_software", 293 "m_software", 294 "u_timer", 295 "s_timer", 296 "vs_timer", 297 "m_timer", 298 "u_external", 299 "s_external", 300 "vs_external", 301 "m_external", 302 "reserved", 303 "reserved", 304 "reserved", 305 "reserved" 306 }; 307 308 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 309 { 310 if (async) { 311 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 312 riscv_intr_names[cause] : "(unknown)"; 313 } else { 314 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 315 riscv_excp_names[cause] : "(unknown)"; 316 } 317 } 318 319 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) 320 { 321 env->misa_ext_mask = env->misa_ext = ext; 322 } 323 324 int riscv_cpu_max_xlen(RISCVCPUClass *mcc) 325 { 326 return 16 << mcc->misa_mxl_max; 327 } 328 329 #ifndef CONFIG_USER_ONLY 330 static uint8_t satp_mode_from_str(const char *satp_mode_str) 331 { 332 if (!strncmp(satp_mode_str, "mbare", 5)) { 333 return VM_1_10_MBARE; 334 } 335 336 if (!strncmp(satp_mode_str, "sv32", 4)) { 337 return VM_1_10_SV32; 338 } 339 340 if (!strncmp(satp_mode_str, "sv39", 4)) { 341 return VM_1_10_SV39; 342 } 343 344 if (!strncmp(satp_mode_str, "sv48", 4)) { 345 return VM_1_10_SV48; 346 } 347 348 if (!strncmp(satp_mode_str, "sv57", 4)) { 349 return VM_1_10_SV57; 350 } 351 352 if (!strncmp(satp_mode_str, "sv64", 4)) { 353 return VM_1_10_SV64; 354 } 355 356 g_assert_not_reached(); 357 } 358 359 uint8_t satp_mode_max_from_map(uint32_t map) 360 { 361 /* 362 * 'map = 0' will make us return (31 - 32), which C will 363 * happily overflow to UINT_MAX. There's no good result to 364 * return if 'map = 0' (e.g. returning 0 will be ambiguous 365 * with the result for 'map = 1'). 366 * 367 * Assert out if map = 0. Callers will have to deal with 368 * it outside of this function. 369 */ 370 g_assert(map > 0); 371 372 /* map here has at least one bit set, so no problem with clz */ 373 return 31 - __builtin_clz(map); 374 } 375 376 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 377 { 378 if (is_32_bit) { 379 switch (satp_mode) { 380 case VM_1_10_SV32: 381 return "sv32"; 382 case VM_1_10_MBARE: 383 return "none"; 384 } 385 } else { 386 switch (satp_mode) { 387 case VM_1_10_SV64: 388 return "sv64"; 389 case VM_1_10_SV57: 390 return "sv57"; 391 case VM_1_10_SV48: 392 return "sv48"; 393 case VM_1_10_SV39: 394 return "sv39"; 395 case VM_1_10_MBARE: 396 return "none"; 397 } 398 } 399 400 g_assert_not_reached(); 401 } 402 403 static void set_satp_mode_max_supported(RISCVCPU *cpu, 404 uint8_t satp_mode) 405 { 406 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 407 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 408 409 for (int i = 0; i <= satp_mode; ++i) { 410 if (valid_vm[i]) { 411 cpu->cfg.satp_mode.supported |= (1 << i); 412 } 413 } 414 } 415 416 /* Set the satp mode to the max supported */ 417 static void set_satp_mode_default_map(RISCVCPU *cpu) 418 { 419 /* 420 * Bare CPUs do not default to the max available. 421 * Users must set a valid satp_mode in the command 422 * line. 423 */ 424 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { 425 warn_report("No satp mode set. Defaulting to 'bare'"); 426 cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE); 427 return; 428 } 429 430 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 431 } 432 #endif 433 434 static void riscv_any_cpu_init(Object *obj) 435 { 436 RISCVCPU *cpu = RISCV_CPU(obj); 437 CPURISCVState *env = &cpu->env; 438 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 439 440 #ifndef CONFIG_USER_ONLY 441 set_satp_mode_max_supported(RISCV_CPU(obj), 442 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 443 VM_1_10_SV32 : VM_1_10_SV57); 444 #endif 445 446 env->priv_ver = PRIV_VERSION_LATEST; 447 448 /* inherited from parent obj via riscv_cpu_init() */ 449 cpu->cfg.ext_zifencei = true; 450 cpu->cfg.ext_zicsr = true; 451 cpu->cfg.mmu = true; 452 cpu->cfg.pmp = true; 453 } 454 455 static void riscv_max_cpu_init(Object *obj) 456 { 457 RISCVCPU *cpu = RISCV_CPU(obj); 458 CPURISCVState *env = &cpu->env; 459 460 cpu->cfg.mmu = true; 461 cpu->cfg.pmp = true; 462 463 env->priv_ver = PRIV_VERSION_LATEST; 464 #ifndef CONFIG_USER_ONLY 465 #ifdef TARGET_RISCV32 466 set_satp_mode_max_supported(cpu, VM_1_10_SV32); 467 #else 468 set_satp_mode_max_supported(cpu, VM_1_10_SV57); 469 #endif 470 #endif 471 } 472 473 #if defined(TARGET_RISCV64) 474 static void rv64_base_cpu_init(Object *obj) 475 { 476 RISCVCPU *cpu = RISCV_CPU(obj); 477 CPURISCVState *env = &cpu->env; 478 479 cpu->cfg.mmu = true; 480 cpu->cfg.pmp = true; 481 482 /* Set latest version of privileged specification */ 483 env->priv_ver = PRIV_VERSION_LATEST; 484 #ifndef CONFIG_USER_ONLY 485 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 486 #endif 487 } 488 489 static void rv64_sifive_u_cpu_init(Object *obj) 490 { 491 RISCVCPU *cpu = RISCV_CPU(obj); 492 CPURISCVState *env = &cpu->env; 493 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 494 env->priv_ver = PRIV_VERSION_1_10_0; 495 #ifndef CONFIG_USER_ONLY 496 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 497 #endif 498 499 /* inherited from parent obj via riscv_cpu_init() */ 500 cpu->cfg.ext_zifencei = true; 501 cpu->cfg.ext_zicsr = true; 502 cpu->cfg.mmu = true; 503 cpu->cfg.pmp = true; 504 } 505 506 static void rv64_sifive_e_cpu_init(Object *obj) 507 { 508 CPURISCVState *env = &RISCV_CPU(obj)->env; 509 RISCVCPU *cpu = RISCV_CPU(obj); 510 511 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 512 env->priv_ver = PRIV_VERSION_1_10_0; 513 #ifndef CONFIG_USER_ONLY 514 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 515 #endif 516 517 /* inherited from parent obj via riscv_cpu_init() */ 518 cpu->cfg.ext_zifencei = true; 519 cpu->cfg.ext_zicsr = true; 520 cpu->cfg.pmp = true; 521 } 522 523 static void rv64_thead_c906_cpu_init(Object *obj) 524 { 525 CPURISCVState *env = &RISCV_CPU(obj)->env; 526 RISCVCPU *cpu = RISCV_CPU(obj); 527 528 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU); 529 env->priv_ver = PRIV_VERSION_1_11_0; 530 531 cpu->cfg.ext_zfa = true; 532 cpu->cfg.ext_zfh = true; 533 cpu->cfg.mmu = true; 534 cpu->cfg.ext_xtheadba = true; 535 cpu->cfg.ext_xtheadbb = true; 536 cpu->cfg.ext_xtheadbs = true; 537 cpu->cfg.ext_xtheadcmo = true; 538 cpu->cfg.ext_xtheadcondmov = true; 539 cpu->cfg.ext_xtheadfmemidx = true; 540 cpu->cfg.ext_xtheadmac = true; 541 cpu->cfg.ext_xtheadmemidx = true; 542 cpu->cfg.ext_xtheadmempair = true; 543 cpu->cfg.ext_xtheadsync = true; 544 545 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 546 #ifndef CONFIG_USER_ONLY 547 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 548 #endif 549 550 /* inherited from parent obj via riscv_cpu_init() */ 551 cpu->cfg.pmp = true; 552 } 553 554 static void rv64_veyron_v1_cpu_init(Object *obj) 555 { 556 CPURISCVState *env = &RISCV_CPU(obj)->env; 557 RISCVCPU *cpu = RISCV_CPU(obj); 558 559 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH); 560 env->priv_ver = PRIV_VERSION_1_12_0; 561 562 /* Enable ISA extensions */ 563 cpu->cfg.mmu = true; 564 cpu->cfg.ext_zifencei = true; 565 cpu->cfg.ext_zicsr = true; 566 cpu->cfg.pmp = true; 567 cpu->cfg.ext_zicbom = true; 568 cpu->cfg.cbom_blocksize = 64; 569 cpu->cfg.cboz_blocksize = 64; 570 cpu->cfg.ext_zicboz = true; 571 cpu->cfg.ext_smaia = true; 572 cpu->cfg.ext_ssaia = true; 573 cpu->cfg.ext_sscofpmf = true; 574 cpu->cfg.ext_sstc = true; 575 cpu->cfg.ext_svinval = true; 576 cpu->cfg.ext_svnapot = true; 577 cpu->cfg.ext_svpbmt = true; 578 cpu->cfg.ext_smstateen = true; 579 cpu->cfg.ext_zba = true; 580 cpu->cfg.ext_zbb = true; 581 cpu->cfg.ext_zbc = true; 582 cpu->cfg.ext_zbs = true; 583 cpu->cfg.ext_XVentanaCondOps = true; 584 585 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 586 cpu->cfg.marchid = VEYRON_V1_MARCHID; 587 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 588 589 #ifndef CONFIG_USER_ONLY 590 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 591 #endif 592 } 593 594 #ifdef CONFIG_TCG 595 static void rv128_base_cpu_init(Object *obj) 596 { 597 RISCVCPU *cpu = RISCV_CPU(obj); 598 CPURISCVState *env = &cpu->env; 599 600 if (qemu_tcg_mttcg_enabled()) { 601 /* Missing 128-bit aligned atomics */ 602 error_report("128-bit RISC-V currently does not work with Multi " 603 "Threaded TCG. Please use: -accel tcg,thread=single"); 604 exit(EXIT_FAILURE); 605 } 606 607 cpu->cfg.mmu = true; 608 cpu->cfg.pmp = true; 609 610 /* Set latest version of privileged specification */ 611 env->priv_ver = PRIV_VERSION_LATEST; 612 #ifndef CONFIG_USER_ONLY 613 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 614 #endif 615 } 616 #endif /* CONFIG_TCG */ 617 618 static void rv64i_bare_cpu_init(Object *obj) 619 { 620 CPURISCVState *env = &RISCV_CPU(obj)->env; 621 riscv_cpu_set_misa_ext(env, RVI); 622 } 623 624 static void rv64e_bare_cpu_init(Object *obj) 625 { 626 CPURISCVState *env = &RISCV_CPU(obj)->env; 627 riscv_cpu_set_misa_ext(env, RVE); 628 } 629 630 #else /* !TARGET_RISCV64 */ 631 632 static void rv32_base_cpu_init(Object *obj) 633 { 634 RISCVCPU *cpu = RISCV_CPU(obj); 635 CPURISCVState *env = &cpu->env; 636 637 cpu->cfg.mmu = true; 638 cpu->cfg.pmp = true; 639 640 /* Set latest version of privileged specification */ 641 env->priv_ver = PRIV_VERSION_LATEST; 642 #ifndef CONFIG_USER_ONLY 643 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 644 #endif 645 } 646 647 static void rv32_sifive_u_cpu_init(Object *obj) 648 { 649 RISCVCPU *cpu = RISCV_CPU(obj); 650 CPURISCVState *env = &cpu->env; 651 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 652 env->priv_ver = PRIV_VERSION_1_10_0; 653 #ifndef CONFIG_USER_ONLY 654 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 655 #endif 656 657 /* inherited from parent obj via riscv_cpu_init() */ 658 cpu->cfg.ext_zifencei = true; 659 cpu->cfg.ext_zicsr = true; 660 cpu->cfg.mmu = true; 661 cpu->cfg.pmp = true; 662 } 663 664 static void rv32_sifive_e_cpu_init(Object *obj) 665 { 666 CPURISCVState *env = &RISCV_CPU(obj)->env; 667 RISCVCPU *cpu = RISCV_CPU(obj); 668 669 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 670 env->priv_ver = PRIV_VERSION_1_10_0; 671 #ifndef CONFIG_USER_ONLY 672 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 673 #endif 674 675 /* inherited from parent obj via riscv_cpu_init() */ 676 cpu->cfg.ext_zifencei = true; 677 cpu->cfg.ext_zicsr = true; 678 cpu->cfg.pmp = true; 679 } 680 681 static void rv32_ibex_cpu_init(Object *obj) 682 { 683 CPURISCVState *env = &RISCV_CPU(obj)->env; 684 RISCVCPU *cpu = RISCV_CPU(obj); 685 686 riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU); 687 env->priv_ver = PRIV_VERSION_1_12_0; 688 #ifndef CONFIG_USER_ONLY 689 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 690 #endif 691 /* inherited from parent obj via riscv_cpu_init() */ 692 cpu->cfg.ext_zifencei = true; 693 cpu->cfg.ext_zicsr = true; 694 cpu->cfg.pmp = true; 695 cpu->cfg.ext_smepmp = true; 696 } 697 698 static void rv32_imafcu_nommu_cpu_init(Object *obj) 699 { 700 CPURISCVState *env = &RISCV_CPU(obj)->env; 701 RISCVCPU *cpu = RISCV_CPU(obj); 702 703 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU); 704 env->priv_ver = PRIV_VERSION_1_10_0; 705 #ifndef CONFIG_USER_ONLY 706 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 707 #endif 708 709 /* inherited from parent obj via riscv_cpu_init() */ 710 cpu->cfg.ext_zifencei = true; 711 cpu->cfg.ext_zicsr = true; 712 cpu->cfg.pmp = true; 713 } 714 715 static void rv32i_bare_cpu_init(Object *obj) 716 { 717 CPURISCVState *env = &RISCV_CPU(obj)->env; 718 riscv_cpu_set_misa_ext(env, RVI); 719 } 720 721 static void rv32e_bare_cpu_init(Object *obj) 722 { 723 CPURISCVState *env = &RISCV_CPU(obj)->env; 724 riscv_cpu_set_misa_ext(env, RVE); 725 } 726 #endif 727 728 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 729 { 730 ObjectClass *oc; 731 char *typename; 732 char **cpuname; 733 734 cpuname = g_strsplit(cpu_model, ",", 1); 735 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 736 oc = object_class_by_name(typename); 737 g_strfreev(cpuname); 738 g_free(typename); 739 740 return oc; 741 } 742 743 char *riscv_cpu_get_name(RISCVCPU *cpu) 744 { 745 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 746 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 747 748 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 749 750 return cpu_model_from_type(typename); 751 } 752 753 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 754 { 755 RISCVCPU *cpu = RISCV_CPU(cs); 756 CPURISCVState *env = &cpu->env; 757 int i, j; 758 uint8_t *p; 759 760 #if !defined(CONFIG_USER_ONLY) 761 if (riscv_has_ext(env, RVH)) { 762 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 763 } 764 #endif 765 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 766 #ifndef CONFIG_USER_ONLY 767 { 768 static const int dump_csrs[] = { 769 CSR_MHARTID, 770 CSR_MSTATUS, 771 CSR_MSTATUSH, 772 /* 773 * CSR_SSTATUS is intentionally omitted here as its value 774 * can be figured out by looking at CSR_MSTATUS 775 */ 776 CSR_HSTATUS, 777 CSR_VSSTATUS, 778 CSR_MIP, 779 CSR_MIE, 780 CSR_MIDELEG, 781 CSR_HIDELEG, 782 CSR_MEDELEG, 783 CSR_HEDELEG, 784 CSR_MTVEC, 785 CSR_STVEC, 786 CSR_VSTVEC, 787 CSR_MEPC, 788 CSR_SEPC, 789 CSR_VSEPC, 790 CSR_MCAUSE, 791 CSR_SCAUSE, 792 CSR_VSCAUSE, 793 CSR_MTVAL, 794 CSR_STVAL, 795 CSR_HTVAL, 796 CSR_MTVAL2, 797 CSR_MSCRATCH, 798 CSR_SSCRATCH, 799 CSR_SATP, 800 CSR_MMTE, 801 CSR_UPMBASE, 802 CSR_UPMMASK, 803 CSR_SPMBASE, 804 CSR_SPMMASK, 805 CSR_MPMBASE, 806 CSR_MPMMASK, 807 }; 808 809 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 810 int csrno = dump_csrs[i]; 811 target_ulong val = 0; 812 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 813 814 /* 815 * Rely on the smode, hmode, etc, predicates within csr.c 816 * to do the filtering of the registers that are present. 817 */ 818 if (res == RISCV_EXCP_NONE) { 819 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 820 csr_ops[csrno].name, val); 821 } 822 } 823 } 824 #endif 825 826 for (i = 0; i < 32; i++) { 827 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 828 riscv_int_regnames[i], env->gpr[i]); 829 if ((i & 3) == 3) { 830 qemu_fprintf(f, "\n"); 831 } 832 } 833 if (flags & CPU_DUMP_FPU) { 834 for (i = 0; i < 32; i++) { 835 qemu_fprintf(f, " %-8s %016" PRIx64, 836 riscv_fpr_regnames[i], env->fpr[i]); 837 if ((i & 3) == 3) { 838 qemu_fprintf(f, "\n"); 839 } 840 } 841 } 842 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 843 static const int dump_rvv_csrs[] = { 844 CSR_VSTART, 845 CSR_VXSAT, 846 CSR_VXRM, 847 CSR_VCSR, 848 CSR_VL, 849 CSR_VTYPE, 850 CSR_VLENB, 851 }; 852 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 853 int csrno = dump_rvv_csrs[i]; 854 target_ulong val = 0; 855 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 856 857 /* 858 * Rely on the smode, hmode, etc, predicates within csr.c 859 * to do the filtering of the registers that are present. 860 */ 861 if (res == RISCV_EXCP_NONE) { 862 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 863 csr_ops[csrno].name, val); 864 } 865 } 866 uint16_t vlenb = cpu->cfg.vlenb; 867 868 for (i = 0; i < 32; i++) { 869 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 870 p = (uint8_t *)env->vreg; 871 for (j = vlenb - 1 ; j >= 0; j--) { 872 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 873 } 874 qemu_fprintf(f, "\n"); 875 } 876 } 877 } 878 879 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 880 { 881 RISCVCPU *cpu = RISCV_CPU(cs); 882 CPURISCVState *env = &cpu->env; 883 884 if (env->xl == MXL_RV32) { 885 env->pc = (int32_t)value; 886 } else { 887 env->pc = value; 888 } 889 } 890 891 static vaddr riscv_cpu_get_pc(CPUState *cs) 892 { 893 RISCVCPU *cpu = RISCV_CPU(cs); 894 CPURISCVState *env = &cpu->env; 895 896 /* Match cpu_get_tb_cpu_state. */ 897 if (env->xl == MXL_RV32) { 898 return env->pc & UINT32_MAX; 899 } 900 return env->pc; 901 } 902 903 static bool riscv_cpu_has_work(CPUState *cs) 904 { 905 #ifndef CONFIG_USER_ONLY 906 RISCVCPU *cpu = RISCV_CPU(cs); 907 CPURISCVState *env = &cpu->env; 908 /* 909 * Definition of the WFI instruction requires it to ignore the privilege 910 * mode and delegation registers, but respect individual enables 911 */ 912 return riscv_cpu_all_pending(env) != 0 || 913 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 914 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 915 #else 916 return true; 917 #endif 918 } 919 920 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) 921 { 922 return riscv_env_mmu_index(cpu_env(cs), ifetch); 923 } 924 925 static void riscv_cpu_reset_hold(Object *obj, ResetType type) 926 { 927 #ifndef CONFIG_USER_ONLY 928 uint8_t iprio; 929 int i, irq, rdzero; 930 #endif 931 CPUState *cs = CPU(obj); 932 RISCVCPU *cpu = RISCV_CPU(cs); 933 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 934 CPURISCVState *env = &cpu->env; 935 936 if (mcc->parent_phases.hold) { 937 mcc->parent_phases.hold(obj, type); 938 } 939 #ifndef CONFIG_USER_ONLY 940 env->misa_mxl = mcc->misa_mxl_max; 941 env->priv = PRV_M; 942 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 943 if (env->misa_mxl > MXL_RV32) { 944 /* 945 * The reset status of SXL/UXL is undefined, but mstatus is WARL 946 * and we must ensure that the value after init is valid for read. 947 */ 948 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 949 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 950 if (riscv_has_ext(env, RVH)) { 951 env->vsstatus = set_field(env->vsstatus, 952 MSTATUS64_SXL, env->misa_mxl); 953 env->vsstatus = set_field(env->vsstatus, 954 MSTATUS64_UXL, env->misa_mxl); 955 env->mstatus_hs = set_field(env->mstatus_hs, 956 MSTATUS64_SXL, env->misa_mxl); 957 env->mstatus_hs = set_field(env->mstatus_hs, 958 MSTATUS64_UXL, env->misa_mxl); 959 } 960 } 961 env->mcause = 0; 962 env->miclaim = MIP_SGEIP; 963 env->pc = env->resetvec; 964 env->bins = 0; 965 env->two_stage_lookup = false; 966 967 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 968 (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ? 969 MENVCFG_ADUE : 0); 970 env->henvcfg = 0; 971 972 /* Initialized default priorities of local interrupts. */ 973 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 974 iprio = riscv_cpu_default_priority(i); 975 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 976 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 977 env->hviprio[i] = 0; 978 } 979 i = 0; 980 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 981 if (!rdzero) { 982 env->hviprio[irq] = env->miprio[irq]; 983 } 984 i++; 985 } 986 /* mmte is supposed to have pm.current hardwired to 1 */ 987 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 988 989 /* 990 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor 991 * extension is enabled. 992 */ 993 if (riscv_has_ext(env, RVH)) { 994 env->mideleg |= HS_MODE_INTERRUPTS; 995 } 996 997 /* 998 * Clear mseccfg and unlock all the PMP entries upon reset. 999 * This is allowed as per the priv and smepmp specifications 1000 * and is needed to clear stale entries across reboots. 1001 */ 1002 if (riscv_cpu_cfg(env)->ext_smepmp) { 1003 env->mseccfg = 0; 1004 } 1005 1006 pmp_unlock_entries(env); 1007 #endif 1008 env->xl = riscv_cpu_mxl(env); 1009 riscv_cpu_update_mask(env); 1010 cs->exception_index = RISCV_EXCP_NONE; 1011 env->load_res = -1; 1012 set_default_nan_mode(1, &env->fp_status); 1013 1014 #ifndef CONFIG_USER_ONLY 1015 if (cpu->cfg.debug) { 1016 riscv_trigger_reset_hold(env); 1017 } 1018 1019 if (kvm_enabled()) { 1020 kvm_riscv_reset_vcpu(cpu); 1021 } 1022 #endif 1023 } 1024 1025 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 1026 { 1027 RISCVCPU *cpu = RISCV_CPU(s); 1028 CPURISCVState *env = &cpu->env; 1029 info->target_info = &cpu->cfg; 1030 1031 switch (env->xl) { 1032 case MXL_RV32: 1033 info->print_insn = print_insn_riscv32; 1034 break; 1035 case MXL_RV64: 1036 info->print_insn = print_insn_riscv64; 1037 break; 1038 case MXL_RV128: 1039 info->print_insn = print_insn_riscv128; 1040 break; 1041 default: 1042 g_assert_not_reached(); 1043 } 1044 } 1045 1046 #ifndef CONFIG_USER_ONLY 1047 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1048 { 1049 bool rv32 = riscv_cpu_is_32bit(cpu); 1050 uint8_t satp_mode_map_max, satp_mode_supported_max; 1051 1052 /* The CPU wants the OS to decide which satp mode to use */ 1053 if (cpu->cfg.satp_mode.supported == 0) { 1054 return; 1055 } 1056 1057 satp_mode_supported_max = 1058 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1059 1060 if (cpu->cfg.satp_mode.map == 0) { 1061 if (cpu->cfg.satp_mode.init == 0) { 1062 /* If unset by the user, we fallback to the default satp mode. */ 1063 set_satp_mode_default_map(cpu); 1064 } else { 1065 /* 1066 * Find the lowest level that was disabled and then enable the 1067 * first valid level below which can be found in 1068 * valid_vm_1_10_32/64. 1069 */ 1070 for (int i = 1; i < 16; ++i) { 1071 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1072 (cpu->cfg.satp_mode.supported & (1 << i))) { 1073 for (int j = i - 1; j >= 0; --j) { 1074 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1075 cpu->cfg.satp_mode.map |= (1 << j); 1076 break; 1077 } 1078 } 1079 break; 1080 } 1081 } 1082 } 1083 } 1084 1085 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1086 1087 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1088 if (satp_mode_map_max > satp_mode_supported_max) { 1089 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1090 satp_mode_str(satp_mode_map_max, rv32), 1091 satp_mode_str(satp_mode_supported_max, rv32)); 1092 return; 1093 } 1094 1095 /* 1096 * Make sure the user did not ask for an invalid configuration as per 1097 * the specification. 1098 */ 1099 if (!rv32) { 1100 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1101 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1102 (cpu->cfg.satp_mode.init & (1 << i)) && 1103 (cpu->cfg.satp_mode.supported & (1 << i))) { 1104 error_setg(errp, "cannot disable %s satp mode if %s " 1105 "is enabled", satp_mode_str(i, false), 1106 satp_mode_str(satp_mode_map_max, false)); 1107 return; 1108 } 1109 } 1110 } 1111 1112 /* Finally expand the map so that all valid modes are set */ 1113 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1114 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1115 cpu->cfg.satp_mode.map |= (1 << i); 1116 } 1117 } 1118 } 1119 #endif 1120 1121 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1122 { 1123 Error *local_err = NULL; 1124 1125 #ifndef CONFIG_USER_ONLY 1126 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1127 if (local_err != NULL) { 1128 error_propagate(errp, local_err); 1129 return; 1130 } 1131 #endif 1132 1133 if (tcg_enabled()) { 1134 riscv_tcg_cpu_finalize_features(cpu, &local_err); 1135 if (local_err != NULL) { 1136 error_propagate(errp, local_err); 1137 return; 1138 } 1139 } else if (kvm_enabled()) { 1140 riscv_kvm_cpu_finalize_features(cpu, &local_err); 1141 if (local_err != NULL) { 1142 error_propagate(errp, local_err); 1143 return; 1144 } 1145 } 1146 } 1147 1148 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1149 { 1150 CPUState *cs = CPU(dev); 1151 RISCVCPU *cpu = RISCV_CPU(dev); 1152 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1153 Error *local_err = NULL; 1154 1155 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) { 1156 warn_report("The 'any' CPU is deprecated and will be " 1157 "removed in the future."); 1158 } 1159 1160 cpu_exec_realizefn(cs, &local_err); 1161 if (local_err != NULL) { 1162 error_propagate(errp, local_err); 1163 return; 1164 } 1165 1166 riscv_cpu_finalize_features(cpu, &local_err); 1167 if (local_err != NULL) { 1168 error_propagate(errp, local_err); 1169 return; 1170 } 1171 1172 riscv_cpu_register_gdb_regs_for_features(cs); 1173 1174 #ifndef CONFIG_USER_ONLY 1175 if (cpu->cfg.debug) { 1176 riscv_trigger_realize(&cpu->env); 1177 } 1178 #endif 1179 1180 qemu_init_vcpu(cs); 1181 cpu_reset(cs); 1182 1183 mcc->parent_realize(dev, errp); 1184 } 1185 1186 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 1187 { 1188 if (tcg_enabled()) { 1189 return riscv_cpu_tcg_compatible(cpu); 1190 } 1191 1192 return true; 1193 } 1194 1195 #ifndef CONFIG_USER_ONLY 1196 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1197 void *opaque, Error **errp) 1198 { 1199 RISCVSATPMap *satp_map = opaque; 1200 uint8_t satp = satp_mode_from_str(name); 1201 bool value; 1202 1203 value = satp_map->map & (1 << satp); 1204 1205 visit_type_bool(v, name, &value, errp); 1206 } 1207 1208 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1209 void *opaque, Error **errp) 1210 { 1211 RISCVSATPMap *satp_map = opaque; 1212 uint8_t satp = satp_mode_from_str(name); 1213 bool value; 1214 1215 if (!visit_type_bool(v, name, &value, errp)) { 1216 return; 1217 } 1218 1219 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1220 satp_map->init |= 1 << satp; 1221 } 1222 1223 void riscv_add_satp_mode_properties(Object *obj) 1224 { 1225 RISCVCPU *cpu = RISCV_CPU(obj); 1226 1227 if (cpu->env.misa_mxl == MXL_RV32) { 1228 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1229 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1230 } else { 1231 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1232 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1233 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1234 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1235 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1236 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1237 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1238 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1239 } 1240 } 1241 1242 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1243 { 1244 RISCVCPU *cpu = RISCV_CPU(opaque); 1245 CPURISCVState *env = &cpu->env; 1246 1247 if (irq < IRQ_LOCAL_MAX) { 1248 switch (irq) { 1249 case IRQ_U_SOFT: 1250 case IRQ_S_SOFT: 1251 case IRQ_VS_SOFT: 1252 case IRQ_M_SOFT: 1253 case IRQ_U_TIMER: 1254 case IRQ_S_TIMER: 1255 case IRQ_VS_TIMER: 1256 case IRQ_M_TIMER: 1257 case IRQ_U_EXT: 1258 case IRQ_VS_EXT: 1259 case IRQ_M_EXT: 1260 if (kvm_enabled()) { 1261 kvm_riscv_set_irq(cpu, irq, level); 1262 } else { 1263 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1264 } 1265 break; 1266 case IRQ_S_EXT: 1267 if (kvm_enabled()) { 1268 kvm_riscv_set_irq(cpu, irq, level); 1269 } else { 1270 env->external_seip = level; 1271 riscv_cpu_update_mip(env, 1 << irq, 1272 BOOL_TO_MASK(level | env->software_seip)); 1273 } 1274 break; 1275 default: 1276 g_assert_not_reached(); 1277 } 1278 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1279 /* Require H-extension for handling guest local interrupts */ 1280 if (!riscv_has_ext(env, RVH)) { 1281 g_assert_not_reached(); 1282 } 1283 1284 /* Compute bit position in HGEIP CSR */ 1285 irq = irq - IRQ_LOCAL_MAX + 1; 1286 if (env->geilen < irq) { 1287 g_assert_not_reached(); 1288 } 1289 1290 /* Update HGEIP CSR */ 1291 env->hgeip &= ~((target_ulong)1 << irq); 1292 if (level) { 1293 env->hgeip |= (target_ulong)1 << irq; 1294 } 1295 1296 /* Update mip.SGEIP bit */ 1297 riscv_cpu_update_mip(env, MIP_SGEIP, 1298 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1299 } else { 1300 g_assert_not_reached(); 1301 } 1302 } 1303 #endif /* CONFIG_USER_ONLY */ 1304 1305 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1306 { 1307 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1308 } 1309 1310 static void riscv_cpu_post_init(Object *obj) 1311 { 1312 accel_cpu_instance_init(CPU(obj)); 1313 } 1314 1315 static void riscv_cpu_init(Object *obj) 1316 { 1317 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1318 RISCVCPU *cpu = RISCV_CPU(obj); 1319 CPURISCVState *env = &cpu->env; 1320 1321 env->misa_mxl = mcc->misa_mxl_max; 1322 1323 #ifndef CONFIG_USER_ONLY 1324 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1325 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1326 #endif /* CONFIG_USER_ONLY */ 1327 1328 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal); 1329 1330 /* 1331 * The timer and performance counters extensions were supported 1332 * in QEMU before they were added as discrete extensions in the 1333 * ISA. To keep compatibility we'll always default them to 'true' 1334 * for all CPUs. Each accelerator will decide what to do when 1335 * users disable them. 1336 */ 1337 RISCV_CPU(obj)->cfg.ext_zicntr = true; 1338 RISCV_CPU(obj)->cfg.ext_zihpm = true; 1339 1340 /* Default values for non-bool cpu properties */ 1341 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); 1342 cpu->cfg.vlenb = 128 >> 3; 1343 cpu->cfg.elen = 64; 1344 cpu->cfg.cbom_blocksize = 64; 1345 cpu->cfg.cbop_blocksize = 64; 1346 cpu->cfg.cboz_blocksize = 64; 1347 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1348 } 1349 1350 static void riscv_bare_cpu_init(Object *obj) 1351 { 1352 RISCVCPU *cpu = RISCV_CPU(obj); 1353 1354 /* 1355 * Bare CPUs do not inherit the timer and performance 1356 * counters from the parent class (see riscv_cpu_init() 1357 * for info on why the parent enables them). 1358 * 1359 * Users have to explicitly enable these counters for 1360 * bare CPUs. 1361 */ 1362 cpu->cfg.ext_zicntr = false; 1363 cpu->cfg.ext_zihpm = false; 1364 1365 /* Set to QEMU's first supported priv version */ 1366 cpu->env.priv_ver = PRIV_VERSION_1_10_0; 1367 1368 /* 1369 * Support all available satp_mode settings. The default 1370 * value will be set to MBARE if the user doesn't set 1371 * satp_mode manually (see set_satp_mode_default()). 1372 */ 1373 #ifndef CONFIG_USER_ONLY 1374 set_satp_mode_max_supported(cpu, VM_1_10_SV64); 1375 #endif 1376 } 1377 1378 typedef struct misa_ext_info { 1379 const char *name; 1380 const char *description; 1381 } MISAExtInfo; 1382 1383 #define MISA_INFO_IDX(_bit) \ 1384 __builtin_ctz(_bit) 1385 1386 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1387 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1388 1389 static const MISAExtInfo misa_ext_info_arr[] = { 1390 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1391 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1392 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1393 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1394 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1395 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1396 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1397 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1398 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1399 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1400 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), 1401 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1402 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1403 MISA_EXT_INFO(RVB, "x-b", "Bit manipulation (Zba_Zbb_Zbs)") 1404 }; 1405 1406 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) 1407 { 1408 CPUClass *cc = CPU_CLASS(mcc); 1409 1410 /* Validate that MISA_MXL is set properly. */ 1411 switch (mcc->misa_mxl_max) { 1412 #ifdef TARGET_RISCV64 1413 case MXL_RV64: 1414 case MXL_RV128: 1415 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1416 break; 1417 #endif 1418 case MXL_RV32: 1419 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1420 break; 1421 default: 1422 g_assert_not_reached(); 1423 } 1424 } 1425 1426 static int riscv_validate_misa_info_idx(uint32_t bit) 1427 { 1428 int idx; 1429 1430 /* 1431 * Our lowest valid input (RVA) is 1 and 1432 * __builtin_ctz() is UB with zero. 1433 */ 1434 g_assert(bit != 0); 1435 idx = MISA_INFO_IDX(bit); 1436 1437 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1438 return idx; 1439 } 1440 1441 const char *riscv_get_misa_ext_name(uint32_t bit) 1442 { 1443 int idx = riscv_validate_misa_info_idx(bit); 1444 const char *val = misa_ext_info_arr[idx].name; 1445 1446 g_assert(val != NULL); 1447 return val; 1448 } 1449 1450 const char *riscv_get_misa_ext_description(uint32_t bit) 1451 { 1452 int idx = riscv_validate_misa_info_idx(bit); 1453 const char *val = misa_ext_info_arr[idx].description; 1454 1455 g_assert(val != NULL); 1456 return val; 1457 } 1458 1459 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1460 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1461 .enabled = _defval} 1462 1463 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1464 /* Defaults for standard extensions */ 1465 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1466 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1467 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1468 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1469 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1470 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), 1471 MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), 1472 MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), 1473 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1474 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1475 MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false), 1476 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1477 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1478 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1479 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1480 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1481 MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false), 1482 MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false), 1483 MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false), 1484 MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false), 1485 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1486 1487 MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false), 1488 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1489 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1490 MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false), 1491 MULTI_EXT_CFG_BOOL("svade", ext_svade, false), 1492 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1493 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1494 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1495 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1496 1497 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1498 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1499 1500 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1501 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1502 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1503 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1504 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1505 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1506 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1507 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1508 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1509 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1510 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1511 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1512 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1513 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1514 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1515 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1516 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1517 MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false), 1518 1519 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1520 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1521 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1522 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1523 1524 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1525 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true), 1526 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1527 1528 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1529 1530 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1531 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1532 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1533 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1534 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1535 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1536 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1537 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1538 1539 /* Vector cryptography extensions */ 1540 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1541 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1542 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkg, false), 1543 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1544 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1545 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1546 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1547 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1548 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1549 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1550 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1551 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1552 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1553 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1554 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1555 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1556 1557 DEFINE_PROP_END_OF_LIST(), 1558 }; 1559 1560 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1561 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1562 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1563 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1564 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1565 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1566 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1567 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1568 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1569 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1570 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1571 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1572 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1573 1574 DEFINE_PROP_END_OF_LIST(), 1575 }; 1576 1577 /* These are experimental so mark with 'x-' */ 1578 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1579 DEFINE_PROP_END_OF_LIST(), 1580 }; 1581 1582 /* 1583 * 'Named features' is the name we give to extensions that we 1584 * don't want to expose to users. They are either immutable 1585 * (always enabled/disable) or they'll vary depending on 1586 * the resulting CPU state. They have riscv,isa strings 1587 * and priv_ver like regular extensions. 1588 */ 1589 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { 1590 MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true), 1591 1592 DEFINE_PROP_END_OF_LIST(), 1593 }; 1594 1595 /* Deprecated entries marked for future removal */ 1596 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1597 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1598 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1599 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1600 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1601 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1602 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1603 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1604 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1605 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1606 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1607 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1608 1609 DEFINE_PROP_END_OF_LIST(), 1610 }; 1611 1612 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname, 1613 Error **errp) 1614 { 1615 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1616 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'", 1617 cpuname, propname); 1618 } 1619 1620 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1621 void *opaque, Error **errp) 1622 { 1623 RISCVCPU *cpu = RISCV_CPU(obj); 1624 uint8_t pmu_num, curr_pmu_num; 1625 uint32_t pmu_mask; 1626 1627 visit_type_uint8(v, name, &pmu_num, errp); 1628 1629 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask); 1630 1631 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) { 1632 cpu_set_prop_err(cpu, name, errp); 1633 error_append_hint(errp, "Current '%s' val: %u\n", 1634 name, curr_pmu_num); 1635 return; 1636 } 1637 1638 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1639 error_setg(errp, "Number of counters exceeds maximum available"); 1640 return; 1641 } 1642 1643 if (pmu_num == 0) { 1644 pmu_mask = 0; 1645 } else { 1646 pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1647 } 1648 1649 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1650 cpu->cfg.pmu_mask = pmu_mask; 1651 cpu_option_add_user_setting("pmu-mask", pmu_mask); 1652 } 1653 1654 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1655 void *opaque, Error **errp) 1656 { 1657 RISCVCPU *cpu = RISCV_CPU(obj); 1658 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1659 1660 visit_type_uint8(v, name, &pmu_num, errp); 1661 } 1662 1663 static const PropertyInfo prop_pmu_num = { 1664 .name = "pmu-num", 1665 .get = prop_pmu_num_get, 1666 .set = prop_pmu_num_set, 1667 }; 1668 1669 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name, 1670 void *opaque, Error **errp) 1671 { 1672 RISCVCPU *cpu = RISCV_CPU(obj); 1673 uint32_t value; 1674 uint8_t pmu_num; 1675 1676 visit_type_uint32(v, name, &value, errp); 1677 1678 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) { 1679 cpu_set_prop_err(cpu, name, errp); 1680 error_append_hint(errp, "Current '%s' val: %x\n", 1681 name, cpu->cfg.pmu_mask); 1682 return; 1683 } 1684 1685 pmu_num = ctpop32(value); 1686 1687 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1688 error_setg(errp, "Number of counters exceeds maximum available"); 1689 return; 1690 } 1691 1692 cpu_option_add_user_setting(name, value); 1693 cpu->cfg.pmu_mask = value; 1694 } 1695 1696 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name, 1697 void *opaque, Error **errp) 1698 { 1699 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask; 1700 1701 visit_type_uint8(v, name, &pmu_mask, errp); 1702 } 1703 1704 static const PropertyInfo prop_pmu_mask = { 1705 .name = "pmu-mask", 1706 .get = prop_pmu_mask_get, 1707 .set = prop_pmu_mask_set, 1708 }; 1709 1710 static void prop_mmu_set(Object *obj, Visitor *v, const char *name, 1711 void *opaque, Error **errp) 1712 { 1713 RISCVCPU *cpu = RISCV_CPU(obj); 1714 bool value; 1715 1716 visit_type_bool(v, name, &value, errp); 1717 1718 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) { 1719 cpu_set_prop_err(cpu, "mmu", errp); 1720 return; 1721 } 1722 1723 cpu_option_add_user_setting(name, value); 1724 cpu->cfg.mmu = value; 1725 } 1726 1727 static void prop_mmu_get(Object *obj, Visitor *v, const char *name, 1728 void *opaque, Error **errp) 1729 { 1730 bool value = RISCV_CPU(obj)->cfg.mmu; 1731 1732 visit_type_bool(v, name, &value, errp); 1733 } 1734 1735 static const PropertyInfo prop_mmu = { 1736 .name = "mmu", 1737 .get = prop_mmu_get, 1738 .set = prop_mmu_set, 1739 }; 1740 1741 static void prop_pmp_set(Object *obj, Visitor *v, const char *name, 1742 void *opaque, Error **errp) 1743 { 1744 RISCVCPU *cpu = RISCV_CPU(obj); 1745 bool value; 1746 1747 visit_type_bool(v, name, &value, errp); 1748 1749 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) { 1750 cpu_set_prop_err(cpu, name, errp); 1751 return; 1752 } 1753 1754 cpu_option_add_user_setting(name, value); 1755 cpu->cfg.pmp = value; 1756 } 1757 1758 static void prop_pmp_get(Object *obj, Visitor *v, const char *name, 1759 void *opaque, Error **errp) 1760 { 1761 bool value = RISCV_CPU(obj)->cfg.pmp; 1762 1763 visit_type_bool(v, name, &value, errp); 1764 } 1765 1766 static const PropertyInfo prop_pmp = { 1767 .name = "pmp", 1768 .get = prop_pmp_get, 1769 .set = prop_pmp_set, 1770 }; 1771 1772 static int priv_spec_from_str(const char *priv_spec_str) 1773 { 1774 int priv_version = -1; 1775 1776 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) { 1777 priv_version = PRIV_VERSION_1_12_0; 1778 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) { 1779 priv_version = PRIV_VERSION_1_11_0; 1780 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) { 1781 priv_version = PRIV_VERSION_1_10_0; 1782 } 1783 1784 return priv_version; 1785 } 1786 1787 static const char *priv_spec_to_str(int priv_version) 1788 { 1789 switch (priv_version) { 1790 case PRIV_VERSION_1_10_0: 1791 return PRIV_VER_1_10_0_STR; 1792 case PRIV_VERSION_1_11_0: 1793 return PRIV_VER_1_11_0_STR; 1794 case PRIV_VERSION_1_12_0: 1795 return PRIV_VER_1_12_0_STR; 1796 default: 1797 return NULL; 1798 } 1799 } 1800 1801 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name, 1802 void *opaque, Error **errp) 1803 { 1804 RISCVCPU *cpu = RISCV_CPU(obj); 1805 g_autofree char *value = NULL; 1806 int priv_version = -1; 1807 1808 visit_type_str(v, name, &value, errp); 1809 1810 priv_version = priv_spec_from_str(value); 1811 if (priv_version < 0) { 1812 error_setg(errp, "Unsupported privilege spec version '%s'", value); 1813 return; 1814 } 1815 1816 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) { 1817 cpu_set_prop_err(cpu, name, errp); 1818 error_append_hint(errp, "Current '%s' val: %s\n", name, 1819 object_property_get_str(obj, name, NULL)); 1820 return; 1821 } 1822 1823 cpu_option_add_user_setting(name, priv_version); 1824 cpu->env.priv_ver = priv_version; 1825 } 1826 1827 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name, 1828 void *opaque, Error **errp) 1829 { 1830 RISCVCPU *cpu = RISCV_CPU(obj); 1831 const char *value = priv_spec_to_str(cpu->env.priv_ver); 1832 1833 visit_type_str(v, name, (char **)&value, errp); 1834 } 1835 1836 static const PropertyInfo prop_priv_spec = { 1837 .name = "priv_spec", 1838 .get = prop_priv_spec_get, 1839 .set = prop_priv_spec_set, 1840 }; 1841 1842 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name, 1843 void *opaque, Error **errp) 1844 { 1845 RISCVCPU *cpu = RISCV_CPU(obj); 1846 g_autofree char *value = NULL; 1847 1848 visit_type_str(v, name, &value, errp); 1849 1850 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) { 1851 error_setg(errp, "Unsupported vector spec version '%s'", value); 1852 return; 1853 } 1854 1855 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0); 1856 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1857 } 1858 1859 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name, 1860 void *opaque, Error **errp) 1861 { 1862 const char *value = VEXT_VER_1_00_0_STR; 1863 1864 visit_type_str(v, name, (char **)&value, errp); 1865 } 1866 1867 static const PropertyInfo prop_vext_spec = { 1868 .name = "vext_spec", 1869 .get = prop_vext_spec_get, 1870 .set = prop_vext_spec_set, 1871 }; 1872 1873 static void prop_vlen_set(Object *obj, Visitor *v, const char *name, 1874 void *opaque, Error **errp) 1875 { 1876 RISCVCPU *cpu = RISCV_CPU(obj); 1877 uint16_t value; 1878 1879 if (!visit_type_uint16(v, name, &value, errp)) { 1880 return; 1881 } 1882 1883 if (!is_power_of_2(value)) { 1884 error_setg(errp, "Vector extension VLEN must be power of 2"); 1885 return; 1886 } 1887 1888 if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) { 1889 cpu_set_prop_err(cpu, name, errp); 1890 error_append_hint(errp, "Current '%s' val: %u\n", 1891 name, cpu->cfg.vlenb << 3); 1892 return; 1893 } 1894 1895 cpu_option_add_user_setting(name, value); 1896 cpu->cfg.vlenb = value >> 3; 1897 } 1898 1899 static void prop_vlen_get(Object *obj, Visitor *v, const char *name, 1900 void *opaque, Error **errp) 1901 { 1902 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3; 1903 1904 visit_type_uint16(v, name, &value, errp); 1905 } 1906 1907 static const PropertyInfo prop_vlen = { 1908 .name = "vlen", 1909 .get = prop_vlen_get, 1910 .set = prop_vlen_set, 1911 }; 1912 1913 static void prop_elen_set(Object *obj, Visitor *v, const char *name, 1914 void *opaque, Error **errp) 1915 { 1916 RISCVCPU *cpu = RISCV_CPU(obj); 1917 uint16_t value; 1918 1919 if (!visit_type_uint16(v, name, &value, errp)) { 1920 return; 1921 } 1922 1923 if (!is_power_of_2(value)) { 1924 error_setg(errp, "Vector extension ELEN must be power of 2"); 1925 return; 1926 } 1927 1928 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) { 1929 cpu_set_prop_err(cpu, name, errp); 1930 error_append_hint(errp, "Current '%s' val: %u\n", 1931 name, cpu->cfg.elen); 1932 return; 1933 } 1934 1935 cpu_option_add_user_setting(name, value); 1936 cpu->cfg.elen = value; 1937 } 1938 1939 static void prop_elen_get(Object *obj, Visitor *v, const char *name, 1940 void *opaque, Error **errp) 1941 { 1942 uint16_t value = RISCV_CPU(obj)->cfg.elen; 1943 1944 visit_type_uint16(v, name, &value, errp); 1945 } 1946 1947 static const PropertyInfo prop_elen = { 1948 .name = "elen", 1949 .get = prop_elen_get, 1950 .set = prop_elen_set, 1951 }; 1952 1953 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name, 1954 void *opaque, Error **errp) 1955 { 1956 RISCVCPU *cpu = RISCV_CPU(obj); 1957 uint16_t value; 1958 1959 if (!visit_type_uint16(v, name, &value, errp)) { 1960 return; 1961 } 1962 1963 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) { 1964 cpu_set_prop_err(cpu, name, errp); 1965 error_append_hint(errp, "Current '%s' val: %u\n", 1966 name, cpu->cfg.cbom_blocksize); 1967 return; 1968 } 1969 1970 cpu_option_add_user_setting(name, value); 1971 cpu->cfg.cbom_blocksize = value; 1972 } 1973 1974 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name, 1975 void *opaque, Error **errp) 1976 { 1977 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize; 1978 1979 visit_type_uint16(v, name, &value, errp); 1980 } 1981 1982 static const PropertyInfo prop_cbom_blksize = { 1983 .name = "cbom_blocksize", 1984 .get = prop_cbom_blksize_get, 1985 .set = prop_cbom_blksize_set, 1986 }; 1987 1988 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name, 1989 void *opaque, Error **errp) 1990 { 1991 RISCVCPU *cpu = RISCV_CPU(obj); 1992 uint16_t value; 1993 1994 if (!visit_type_uint16(v, name, &value, errp)) { 1995 return; 1996 } 1997 1998 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) { 1999 cpu_set_prop_err(cpu, name, errp); 2000 error_append_hint(errp, "Current '%s' val: %u\n", 2001 name, cpu->cfg.cbop_blocksize); 2002 return; 2003 } 2004 2005 cpu_option_add_user_setting(name, value); 2006 cpu->cfg.cbop_blocksize = value; 2007 } 2008 2009 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name, 2010 void *opaque, Error **errp) 2011 { 2012 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize; 2013 2014 visit_type_uint16(v, name, &value, errp); 2015 } 2016 2017 static const PropertyInfo prop_cbop_blksize = { 2018 .name = "cbop_blocksize", 2019 .get = prop_cbop_blksize_get, 2020 .set = prop_cbop_blksize_set, 2021 }; 2022 2023 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name, 2024 void *opaque, Error **errp) 2025 { 2026 RISCVCPU *cpu = RISCV_CPU(obj); 2027 uint16_t value; 2028 2029 if (!visit_type_uint16(v, name, &value, errp)) { 2030 return; 2031 } 2032 2033 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) { 2034 cpu_set_prop_err(cpu, name, errp); 2035 error_append_hint(errp, "Current '%s' val: %u\n", 2036 name, cpu->cfg.cboz_blocksize); 2037 return; 2038 } 2039 2040 cpu_option_add_user_setting(name, value); 2041 cpu->cfg.cboz_blocksize = value; 2042 } 2043 2044 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name, 2045 void *opaque, Error **errp) 2046 { 2047 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize; 2048 2049 visit_type_uint16(v, name, &value, errp); 2050 } 2051 2052 static const PropertyInfo prop_cboz_blksize = { 2053 .name = "cboz_blocksize", 2054 .get = prop_cboz_blksize_get, 2055 .set = prop_cboz_blksize_set, 2056 }; 2057 2058 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name, 2059 void *opaque, Error **errp) 2060 { 2061 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2062 RISCVCPU *cpu = RISCV_CPU(obj); 2063 uint32_t prev_val = cpu->cfg.mvendorid; 2064 uint32_t value; 2065 2066 if (!visit_type_uint32(v, name, &value, errp)) { 2067 return; 2068 } 2069 2070 if (!dynamic_cpu && prev_val != value) { 2071 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2072 object_get_typename(obj), prev_val); 2073 return; 2074 } 2075 2076 cpu->cfg.mvendorid = value; 2077 } 2078 2079 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name, 2080 void *opaque, Error **errp) 2081 { 2082 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; 2083 2084 visit_type_uint32(v, name, &value, errp); 2085 } 2086 2087 static const PropertyInfo prop_mvendorid = { 2088 .name = "mvendorid", 2089 .get = prop_mvendorid_get, 2090 .set = prop_mvendorid_set, 2091 }; 2092 2093 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name, 2094 void *opaque, Error **errp) 2095 { 2096 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2097 RISCVCPU *cpu = RISCV_CPU(obj); 2098 uint64_t prev_val = cpu->cfg.mimpid; 2099 uint64_t value; 2100 2101 if (!visit_type_uint64(v, name, &value, errp)) { 2102 return; 2103 } 2104 2105 if (!dynamic_cpu && prev_val != value) { 2106 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2107 object_get_typename(obj), prev_val); 2108 return; 2109 } 2110 2111 cpu->cfg.mimpid = value; 2112 } 2113 2114 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name, 2115 void *opaque, Error **errp) 2116 { 2117 uint64_t value = RISCV_CPU(obj)->cfg.mimpid; 2118 2119 visit_type_uint64(v, name, &value, errp); 2120 } 2121 2122 static const PropertyInfo prop_mimpid = { 2123 .name = "mimpid", 2124 .get = prop_mimpid_get, 2125 .set = prop_mimpid_set, 2126 }; 2127 2128 static void prop_marchid_set(Object *obj, Visitor *v, const char *name, 2129 void *opaque, Error **errp) 2130 { 2131 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2132 RISCVCPU *cpu = RISCV_CPU(obj); 2133 uint64_t prev_val = cpu->cfg.marchid; 2134 uint64_t value, invalid_val; 2135 uint32_t mxlen = 0; 2136 2137 if (!visit_type_uint64(v, name, &value, errp)) { 2138 return; 2139 } 2140 2141 if (!dynamic_cpu && prev_val != value) { 2142 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2143 object_get_typename(obj), prev_val); 2144 return; 2145 } 2146 2147 switch (riscv_cpu_mxl(&cpu->env)) { 2148 case MXL_RV32: 2149 mxlen = 32; 2150 break; 2151 case MXL_RV64: 2152 case MXL_RV128: 2153 mxlen = 64; 2154 break; 2155 default: 2156 g_assert_not_reached(); 2157 } 2158 2159 invalid_val = 1LL << (mxlen - 1); 2160 2161 if (value == invalid_val) { 2162 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2163 "and the remaining bits zero", mxlen); 2164 return; 2165 } 2166 2167 cpu->cfg.marchid = value; 2168 } 2169 2170 static void prop_marchid_get(Object *obj, Visitor *v, const char *name, 2171 void *opaque, Error **errp) 2172 { 2173 uint64_t value = RISCV_CPU(obj)->cfg.marchid; 2174 2175 visit_type_uint64(v, name, &value, errp); 2176 } 2177 2178 static const PropertyInfo prop_marchid = { 2179 .name = "marchid", 2180 .get = prop_marchid_get, 2181 .set = prop_marchid_set, 2182 }; 2183 2184 /* 2185 * RVA22U64 defines some 'named features' that are cache 2186 * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa 2187 * and Zicclsm. They are always implemented in TCG and 2188 * doesn't need to be manually enabled by the profile. 2189 */ 2190 static RISCVCPUProfile RVA22U64 = { 2191 .parent = NULL, 2192 .name = "rva22u64", 2193 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU, 2194 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2195 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2196 .ext_offsets = { 2197 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause), 2198 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb), 2199 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin), 2200 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr), 2201 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom), 2202 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz), 2203 2204 /* mandatory named features for this profile */ 2205 CPU_CFG_OFFSET(ext_zic64b), 2206 2207 RISCV_PROFILE_EXT_LIST_END 2208 } 2209 }; 2210 2211 /* 2212 * As with RVA22U64, RVA22S64 also defines 'named features'. 2213 * 2214 * Cache related features that we consider enabled since we don't 2215 * implement cache: Ssccptr 2216 * 2217 * Other named features that we already implement: Sstvecd, Sstvala, 2218 * Sscounterenw 2219 * 2220 * The remaining features/extensions comes from RVA22U64. 2221 */ 2222 static RISCVCPUProfile RVA22S64 = { 2223 .parent = &RVA22U64, 2224 .name = "rva22s64", 2225 .misa_ext = RVS, 2226 .priv_spec = PRIV_VERSION_1_12_0, 2227 .satp_mode = VM_1_10_SV39, 2228 .ext_offsets = { 2229 /* rva22s64 exts */ 2230 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt), 2231 CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade), 2232 2233 RISCV_PROFILE_EXT_LIST_END 2234 } 2235 }; 2236 2237 RISCVCPUProfile *riscv_profiles[] = { 2238 &RVA22U64, 2239 &RVA22S64, 2240 NULL, 2241 }; 2242 2243 static Property riscv_cpu_properties[] = { 2244 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 2245 2246 {.name = "pmu-mask", .info = &prop_pmu_mask}, 2247 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 2248 2249 {.name = "mmu", .info = &prop_mmu}, 2250 {.name = "pmp", .info = &prop_pmp}, 2251 2252 {.name = "priv_spec", .info = &prop_priv_spec}, 2253 {.name = "vext_spec", .info = &prop_vext_spec}, 2254 2255 {.name = "vlen", .info = &prop_vlen}, 2256 {.name = "elen", .info = &prop_elen}, 2257 2258 {.name = "cbom_blocksize", .info = &prop_cbom_blksize}, 2259 {.name = "cbop_blocksize", .info = &prop_cbop_blksize}, 2260 {.name = "cboz_blocksize", .info = &prop_cboz_blksize}, 2261 2262 {.name = "mvendorid", .info = &prop_mvendorid}, 2263 {.name = "mimpid", .info = &prop_mimpid}, 2264 {.name = "marchid", .info = &prop_marchid}, 2265 2266 #ifndef CONFIG_USER_ONLY 2267 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 2268 #endif 2269 2270 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 2271 2272 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 2273 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 2274 2275 /* 2276 * write_misa() is marked as experimental for now so mark 2277 * it with -x and default to 'false'. 2278 */ 2279 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 2280 DEFINE_PROP_END_OF_LIST(), 2281 }; 2282 2283 #if defined(TARGET_RISCV64) 2284 static void rva22u64_profile_cpu_init(Object *obj) 2285 { 2286 rv64i_bare_cpu_init(obj); 2287 2288 RVA22U64.enabled = true; 2289 } 2290 2291 static void rva22s64_profile_cpu_init(Object *obj) 2292 { 2293 rv64i_bare_cpu_init(obj); 2294 2295 RVA22S64.enabled = true; 2296 } 2297 #endif 2298 2299 static const gchar *riscv_gdb_arch_name(CPUState *cs) 2300 { 2301 RISCVCPU *cpu = RISCV_CPU(cs); 2302 CPURISCVState *env = &cpu->env; 2303 2304 switch (riscv_cpu_mxl(env)) { 2305 case MXL_RV32: 2306 return "riscv:rv32"; 2307 case MXL_RV64: 2308 case MXL_RV128: 2309 return "riscv:rv64"; 2310 default: 2311 g_assert_not_reached(); 2312 } 2313 } 2314 2315 #ifndef CONFIG_USER_ONLY 2316 static int64_t riscv_get_arch_id(CPUState *cs) 2317 { 2318 RISCVCPU *cpu = RISCV_CPU(cs); 2319 2320 return cpu->env.mhartid; 2321 } 2322 2323 #include "hw/core/sysemu-cpu-ops.h" 2324 2325 static const struct SysemuCPUOps riscv_sysemu_ops = { 2326 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 2327 .write_elf64_note = riscv_cpu_write_elf64_note, 2328 .write_elf32_note = riscv_cpu_write_elf32_note, 2329 .legacy_vmsd = &vmstate_riscv_cpu, 2330 }; 2331 #endif 2332 2333 static void riscv_cpu_common_class_init(ObjectClass *c, void *data) 2334 { 2335 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2336 CPUClass *cc = CPU_CLASS(c); 2337 DeviceClass *dc = DEVICE_CLASS(c); 2338 ResettableClass *rc = RESETTABLE_CLASS(c); 2339 2340 device_class_set_parent_realize(dc, riscv_cpu_realize, 2341 &mcc->parent_realize); 2342 2343 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 2344 &mcc->parent_phases); 2345 2346 cc->class_by_name = riscv_cpu_class_by_name; 2347 cc->has_work = riscv_cpu_has_work; 2348 cc->mmu_index = riscv_cpu_mmu_index; 2349 cc->dump_state = riscv_cpu_dump_state; 2350 cc->set_pc = riscv_cpu_set_pc; 2351 cc->get_pc = riscv_cpu_get_pc; 2352 cc->gdb_read_register = riscv_cpu_gdb_read_register; 2353 cc->gdb_write_register = riscv_cpu_gdb_write_register; 2354 cc->gdb_stop_before_watchpoint = true; 2355 cc->disas_set_info = riscv_cpu_disas_set_info; 2356 #ifndef CONFIG_USER_ONLY 2357 cc->sysemu_ops = &riscv_sysemu_ops; 2358 cc->get_arch_id = riscv_get_arch_id; 2359 #endif 2360 cc->gdb_arch_name = riscv_gdb_arch_name; 2361 2362 device_class_set_props(dc, riscv_cpu_properties); 2363 } 2364 2365 static void riscv_cpu_class_init(ObjectClass *c, void *data) 2366 { 2367 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2368 2369 mcc->misa_mxl_max = (uint32_t)(uintptr_t)data; 2370 riscv_cpu_validate_misa_mxl(mcc); 2371 } 2372 2373 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 2374 int max_str_len) 2375 { 2376 const RISCVIsaExtData *edata; 2377 char *old = *isa_str; 2378 char *new = *isa_str; 2379 2380 for (edata = isa_edata_arr; edata && edata->name; edata++) { 2381 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 2382 new = g_strconcat(old, "_", edata->name, NULL); 2383 g_free(old); 2384 old = new; 2385 } 2386 } 2387 2388 *isa_str = new; 2389 } 2390 2391 char *riscv_isa_string(RISCVCPU *cpu) 2392 { 2393 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 2394 int i; 2395 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 2396 char *isa_str = g_new(char, maxlen); 2397 int xlen = riscv_cpu_max_xlen(mcc); 2398 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen); 2399 2400 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2401 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2402 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 2403 } 2404 } 2405 *p = '\0'; 2406 if (!cpu->cfg.short_isa_string) { 2407 riscv_isa_string_ext(cpu, &isa_str, maxlen); 2408 } 2409 return isa_str; 2410 } 2411 2412 #ifndef CONFIG_USER_ONLY 2413 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count) 2414 { 2415 int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr); 2416 char **extensions = g_new(char *, maxlen); 2417 2418 for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2419 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2420 extensions[*count] = g_new(char, 2); 2421 snprintf(extensions[*count], 2, "%c", 2422 qemu_tolower(riscv_single_letter_exts[i])); 2423 (*count)++; 2424 } 2425 } 2426 2427 for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) { 2428 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 2429 extensions[*count] = g_strdup(edata->name); 2430 (*count)++; 2431 } 2432 } 2433 2434 return extensions; 2435 } 2436 2437 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename) 2438 { 2439 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 2440 const size_t maxlen = sizeof("rv128i"); 2441 g_autofree char *isa_base = g_new(char, maxlen); 2442 g_autofree char *riscv_isa; 2443 char **isa_extensions; 2444 int count = 0; 2445 int xlen = riscv_cpu_max_xlen(mcc); 2446 2447 riscv_isa = riscv_isa_string(cpu); 2448 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa); 2449 2450 snprintf(isa_base, maxlen, "rv%di", xlen); 2451 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base); 2452 2453 isa_extensions = riscv_isa_extensions_list(cpu, &count); 2454 qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions", 2455 isa_extensions, count); 2456 2457 for (int i = 0; i < count; i++) { 2458 g_free(isa_extensions[i]); 2459 } 2460 2461 g_free(isa_extensions); 2462 } 2463 #endif 2464 2465 #define DEFINE_CPU(type_name, misa_mxl_max, initfn) \ 2466 { \ 2467 .name = (type_name), \ 2468 .parent = TYPE_RISCV_CPU, \ 2469 .instance_init = (initfn), \ 2470 .class_init = riscv_cpu_class_init, \ 2471 .class_data = (void *)(misa_mxl_max) \ 2472 } 2473 2474 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \ 2475 { \ 2476 .name = (type_name), \ 2477 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 2478 .instance_init = (initfn), \ 2479 .class_init = riscv_cpu_class_init, \ 2480 .class_data = (void *)(misa_mxl_max) \ 2481 } 2482 2483 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \ 2484 { \ 2485 .name = (type_name), \ 2486 .parent = TYPE_RISCV_VENDOR_CPU, \ 2487 .instance_init = (initfn), \ 2488 .class_init = riscv_cpu_class_init, \ 2489 .class_data = (void *)(misa_mxl_max) \ 2490 } 2491 2492 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \ 2493 { \ 2494 .name = (type_name), \ 2495 .parent = TYPE_RISCV_BARE_CPU, \ 2496 .instance_init = (initfn), \ 2497 .class_init = riscv_cpu_class_init, \ 2498 .class_data = (void *)(misa_mxl_max) \ 2499 } 2500 2501 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \ 2502 { \ 2503 .name = (type_name), \ 2504 .parent = TYPE_RISCV_BARE_CPU, \ 2505 .instance_init = (initfn), \ 2506 .class_init = riscv_cpu_class_init, \ 2507 .class_data = (void *)(misa_mxl_max) \ 2508 } 2509 2510 static const TypeInfo riscv_cpu_type_infos[] = { 2511 { 2512 .name = TYPE_RISCV_CPU, 2513 .parent = TYPE_CPU, 2514 .instance_size = sizeof(RISCVCPU), 2515 .instance_align = __alignof(RISCVCPU), 2516 .instance_init = riscv_cpu_init, 2517 .instance_post_init = riscv_cpu_post_init, 2518 .abstract = true, 2519 .class_size = sizeof(RISCVCPUClass), 2520 .class_init = riscv_cpu_common_class_init, 2521 }, 2522 { 2523 .name = TYPE_RISCV_DYNAMIC_CPU, 2524 .parent = TYPE_RISCV_CPU, 2525 .abstract = true, 2526 }, 2527 { 2528 .name = TYPE_RISCV_VENDOR_CPU, 2529 .parent = TYPE_RISCV_CPU, 2530 .abstract = true, 2531 }, 2532 { 2533 .name = TYPE_RISCV_BARE_CPU, 2534 .parent = TYPE_RISCV_CPU, 2535 .instance_init = riscv_bare_cpu_init, 2536 .abstract = true, 2537 }, 2538 #if defined(TARGET_RISCV32) 2539 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV32, riscv_any_cpu_init), 2540 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), 2541 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), 2542 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), 2543 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), 2544 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init), 2545 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), 2546 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init), 2547 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init), 2548 #elif defined(TARGET_RISCV64) 2549 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV64, riscv_any_cpu_init), 2550 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), 2551 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), 2552 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), 2553 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), 2554 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init), 2555 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init), 2556 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init), 2557 #ifdef CONFIG_TCG 2558 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init), 2559 #endif /* CONFIG_TCG */ 2560 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init), 2561 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init), 2562 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init), 2563 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init), 2564 #endif /* TARGET_RISCV64 */ 2565 }; 2566 2567 DEFINE_TYPES(riscv_cpu_type_infos) 2568