1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "exec/exec-all.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/error-report.h" 31 #include "hw/qdev-properties.h" 32 #include "hw/core/qdev-prop-internal.h" 33 #include "migration/vmstate.h" 34 #include "fpu/softfloat-helpers.h" 35 #include "sysemu/device_tree.h" 36 #include "sysemu/kvm.h" 37 #include "sysemu/tcg.h" 38 #include "kvm/kvm_riscv.h" 39 #include "tcg/tcg-cpu.h" 40 #include "tcg/tcg.h" 41 42 /* RISC-V CPU definitions */ 43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH"; 44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 45 RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0}; 46 47 /* 48 * From vector_helper.c 49 * Note that vector data is stored in host-endian 64-bit chunks, 50 * so addressing bytes needs a host-endian fixup. 51 */ 52 #if HOST_BIG_ENDIAN 53 #define BYTE(x) ((x) ^ 7) 54 #else 55 #define BYTE(x) (x) 56 #endif 57 58 bool riscv_cpu_is_32bit(RISCVCPU *cpu) 59 { 60 return riscv_cpu_mxl(&cpu->env) == MXL_RV32; 61 } 62 63 /* Hash that stores general user set numeric options */ 64 static GHashTable *general_user_opts; 65 66 static void cpu_option_add_user_setting(const char *optname, uint32_t value) 67 { 68 g_hash_table_insert(general_user_opts, (gpointer)optname, 69 GUINT_TO_POINTER(value)); 70 } 71 72 bool riscv_cpu_option_set(const char *optname) 73 { 74 return g_hash_table_contains(general_user_opts, optname); 75 } 76 77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 78 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 79 80 /* 81 * Here are the ordering rules of extension naming defined by RISC-V 82 * specification : 83 * 1. All extensions should be separated from other multi-letter extensions 84 * by an underscore. 85 * 2. The first letter following the 'Z' conventionally indicates the most 86 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 87 * If multiple 'Z' extensions are named, they should be ordered first 88 * by category, then alphabetically within a category. 89 * 3. Standard supervisor-level extensions (starts with 'S') should be 90 * listed after standard unprivileged extensions. If multiple 91 * supervisor-level extensions are listed, they should be ordered 92 * alphabetically. 93 * 4. Non-standard extensions (starts with 'X') must be listed after all 94 * standard extensions. They must be separated from other multi-letter 95 * extensions by an underscore. 96 * 97 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 98 * instead. 99 */ 100 const RISCVIsaExtData isa_edata_arr[] = { 101 ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b), 102 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 103 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop), 104 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 105 ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11), 106 ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11), 107 ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11), 108 ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11), 109 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 110 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 111 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 112 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 113 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 114 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 115 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 116 ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop), 117 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 118 ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_11), 119 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), 120 ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha), 121 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), 122 ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b), 123 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), 124 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 125 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 126 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 127 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 128 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 129 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 130 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 131 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 132 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 133 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 134 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 135 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 136 ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop), 137 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 138 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 139 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 140 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 141 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 142 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 143 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 144 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 145 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 146 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 147 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 148 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 149 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 150 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 151 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 152 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 153 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 154 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 155 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 156 ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso), 157 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 158 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 159 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 160 ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x), 161 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 162 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 163 ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x), 164 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 165 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 166 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 167 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 168 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 169 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 170 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 171 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 172 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 173 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 174 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 175 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 176 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 177 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 178 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 179 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 180 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 181 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 182 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 183 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 184 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 185 ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf), 186 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 187 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 188 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 189 ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11), 190 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 191 ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 192 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 193 ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 194 ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 195 ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade), 196 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 197 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 198 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 199 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 200 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 201 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 202 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 203 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 204 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 205 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 206 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 207 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 208 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 209 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 210 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 211 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 212 213 DEFINE_PROP_END_OF_LIST(), 214 }; 215 216 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 217 { 218 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 219 220 return *ext_enabled; 221 } 222 223 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 224 { 225 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 226 227 *ext_enabled = en; 228 } 229 230 bool riscv_cpu_is_vendor(Object *cpu_obj) 231 { 232 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; 233 } 234 235 const char * const riscv_int_regnames[] = { 236 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 237 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 238 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 239 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 240 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 241 }; 242 243 const char * const riscv_int_regnamesh[] = { 244 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 245 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 246 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 247 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 248 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 249 "x30h/t5h", "x31h/t6h" 250 }; 251 252 const char * const riscv_fpr_regnames[] = { 253 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 254 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 255 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 256 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 257 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 258 "f30/ft10", "f31/ft11" 259 }; 260 261 const char * const riscv_rvv_regnames[] = { 262 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 263 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 264 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 265 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 266 "v28", "v29", "v30", "v31" 267 }; 268 269 static const char * const riscv_excp_names[] = { 270 "misaligned_fetch", 271 "fault_fetch", 272 "illegal_instruction", 273 "breakpoint", 274 "misaligned_load", 275 "fault_load", 276 "misaligned_store", 277 "fault_store", 278 "user_ecall", 279 "supervisor_ecall", 280 "hypervisor_ecall", 281 "machine_ecall", 282 "exec_page_fault", 283 "load_page_fault", 284 "reserved", 285 "store_page_fault", 286 "reserved", 287 "reserved", 288 "reserved", 289 "reserved", 290 "guest_exec_page_fault", 291 "guest_load_page_fault", 292 "reserved", 293 "guest_store_page_fault", 294 }; 295 296 static const char * const riscv_intr_names[] = { 297 "u_software", 298 "s_software", 299 "vs_software", 300 "m_software", 301 "u_timer", 302 "s_timer", 303 "vs_timer", 304 "m_timer", 305 "u_external", 306 "s_external", 307 "vs_external", 308 "m_external", 309 "reserved", 310 "reserved", 311 "reserved", 312 "reserved" 313 }; 314 315 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 316 { 317 if (async) { 318 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 319 riscv_intr_names[cause] : "(unknown)"; 320 } else { 321 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 322 riscv_excp_names[cause] : "(unknown)"; 323 } 324 } 325 326 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) 327 { 328 env->misa_ext_mask = env->misa_ext = ext; 329 } 330 331 int riscv_cpu_max_xlen(RISCVCPUClass *mcc) 332 { 333 return 16 << mcc->misa_mxl_max; 334 } 335 336 #ifndef CONFIG_USER_ONLY 337 static uint8_t satp_mode_from_str(const char *satp_mode_str) 338 { 339 if (!strncmp(satp_mode_str, "mbare", 5)) { 340 return VM_1_10_MBARE; 341 } 342 343 if (!strncmp(satp_mode_str, "sv32", 4)) { 344 return VM_1_10_SV32; 345 } 346 347 if (!strncmp(satp_mode_str, "sv39", 4)) { 348 return VM_1_10_SV39; 349 } 350 351 if (!strncmp(satp_mode_str, "sv48", 4)) { 352 return VM_1_10_SV48; 353 } 354 355 if (!strncmp(satp_mode_str, "sv57", 4)) { 356 return VM_1_10_SV57; 357 } 358 359 if (!strncmp(satp_mode_str, "sv64", 4)) { 360 return VM_1_10_SV64; 361 } 362 363 g_assert_not_reached(); 364 } 365 366 uint8_t satp_mode_max_from_map(uint32_t map) 367 { 368 /* 369 * 'map = 0' will make us return (31 - 32), which C will 370 * happily overflow to UINT_MAX. There's no good result to 371 * return if 'map = 0' (e.g. returning 0 will be ambiguous 372 * with the result for 'map = 1'). 373 * 374 * Assert out if map = 0. Callers will have to deal with 375 * it outside of this function. 376 */ 377 g_assert(map > 0); 378 379 /* map here has at least one bit set, so no problem with clz */ 380 return 31 - __builtin_clz(map); 381 } 382 383 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 384 { 385 if (is_32_bit) { 386 switch (satp_mode) { 387 case VM_1_10_SV32: 388 return "sv32"; 389 case VM_1_10_MBARE: 390 return "none"; 391 } 392 } else { 393 switch (satp_mode) { 394 case VM_1_10_SV64: 395 return "sv64"; 396 case VM_1_10_SV57: 397 return "sv57"; 398 case VM_1_10_SV48: 399 return "sv48"; 400 case VM_1_10_SV39: 401 return "sv39"; 402 case VM_1_10_MBARE: 403 return "none"; 404 } 405 } 406 407 g_assert_not_reached(); 408 } 409 410 static void set_satp_mode_max_supported(RISCVCPU *cpu, 411 uint8_t satp_mode) 412 { 413 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 414 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 415 416 for (int i = 0; i <= satp_mode; ++i) { 417 if (valid_vm[i]) { 418 cpu->cfg.satp_mode.supported |= (1 << i); 419 } 420 } 421 } 422 423 /* Set the satp mode to the max supported */ 424 static void set_satp_mode_default_map(RISCVCPU *cpu) 425 { 426 /* 427 * Bare CPUs do not default to the max available. 428 * Users must set a valid satp_mode in the command 429 * line. 430 */ 431 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { 432 warn_report("No satp mode set. Defaulting to 'bare'"); 433 cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE); 434 return; 435 } 436 437 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 438 } 439 #endif 440 441 static void riscv_any_cpu_init(Object *obj) 442 { 443 RISCVCPU *cpu = RISCV_CPU(obj); 444 CPURISCVState *env = &cpu->env; 445 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 446 447 #ifndef CONFIG_USER_ONLY 448 set_satp_mode_max_supported(RISCV_CPU(obj), 449 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 450 VM_1_10_SV32 : VM_1_10_SV57); 451 #endif 452 453 env->priv_ver = PRIV_VERSION_LATEST; 454 455 /* inherited from parent obj via riscv_cpu_init() */ 456 cpu->cfg.ext_zifencei = true; 457 cpu->cfg.ext_zicsr = true; 458 cpu->cfg.mmu = true; 459 cpu->cfg.pmp = true; 460 } 461 462 static void riscv_max_cpu_init(Object *obj) 463 { 464 RISCVCPU *cpu = RISCV_CPU(obj); 465 CPURISCVState *env = &cpu->env; 466 467 cpu->cfg.mmu = true; 468 cpu->cfg.pmp = true; 469 470 env->priv_ver = PRIV_VERSION_LATEST; 471 #ifndef CONFIG_USER_ONLY 472 #ifdef TARGET_RISCV32 473 set_satp_mode_max_supported(cpu, VM_1_10_SV32); 474 #else 475 set_satp_mode_max_supported(cpu, VM_1_10_SV57); 476 #endif 477 #endif 478 } 479 480 #if defined(TARGET_RISCV64) 481 static void rv64_base_cpu_init(Object *obj) 482 { 483 RISCVCPU *cpu = RISCV_CPU(obj); 484 CPURISCVState *env = &cpu->env; 485 486 cpu->cfg.mmu = true; 487 cpu->cfg.pmp = true; 488 489 /* Set latest version of privileged specification */ 490 env->priv_ver = PRIV_VERSION_LATEST; 491 #ifndef CONFIG_USER_ONLY 492 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 493 #endif 494 } 495 496 static void rv64_sifive_u_cpu_init(Object *obj) 497 { 498 RISCVCPU *cpu = RISCV_CPU(obj); 499 CPURISCVState *env = &cpu->env; 500 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 501 env->priv_ver = PRIV_VERSION_1_10_0; 502 #ifndef CONFIG_USER_ONLY 503 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 504 #endif 505 506 /* inherited from parent obj via riscv_cpu_init() */ 507 cpu->cfg.ext_zifencei = true; 508 cpu->cfg.ext_zicsr = true; 509 cpu->cfg.mmu = true; 510 cpu->cfg.pmp = true; 511 } 512 513 static void rv64_sifive_e_cpu_init(Object *obj) 514 { 515 CPURISCVState *env = &RISCV_CPU(obj)->env; 516 RISCVCPU *cpu = RISCV_CPU(obj); 517 518 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 519 env->priv_ver = PRIV_VERSION_1_10_0; 520 #ifndef CONFIG_USER_ONLY 521 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 522 #endif 523 524 /* inherited from parent obj via riscv_cpu_init() */ 525 cpu->cfg.ext_zifencei = true; 526 cpu->cfg.ext_zicsr = true; 527 cpu->cfg.pmp = true; 528 } 529 530 static void rv64_thead_c906_cpu_init(Object *obj) 531 { 532 CPURISCVState *env = &RISCV_CPU(obj)->env; 533 RISCVCPU *cpu = RISCV_CPU(obj); 534 535 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU); 536 env->priv_ver = PRIV_VERSION_1_11_0; 537 538 cpu->cfg.ext_zfa = true; 539 cpu->cfg.ext_zfh = true; 540 cpu->cfg.mmu = true; 541 cpu->cfg.ext_xtheadba = true; 542 cpu->cfg.ext_xtheadbb = true; 543 cpu->cfg.ext_xtheadbs = true; 544 cpu->cfg.ext_xtheadcmo = true; 545 cpu->cfg.ext_xtheadcondmov = true; 546 cpu->cfg.ext_xtheadfmemidx = true; 547 cpu->cfg.ext_xtheadmac = true; 548 cpu->cfg.ext_xtheadmemidx = true; 549 cpu->cfg.ext_xtheadmempair = true; 550 cpu->cfg.ext_xtheadsync = true; 551 552 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 553 #ifndef CONFIG_USER_ONLY 554 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 555 th_register_custom_csrs(cpu); 556 #endif 557 558 /* inherited from parent obj via riscv_cpu_init() */ 559 cpu->cfg.pmp = true; 560 } 561 562 static void rv64_veyron_v1_cpu_init(Object *obj) 563 { 564 CPURISCVState *env = &RISCV_CPU(obj)->env; 565 RISCVCPU *cpu = RISCV_CPU(obj); 566 567 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH); 568 env->priv_ver = PRIV_VERSION_1_12_0; 569 570 /* Enable ISA extensions */ 571 cpu->cfg.mmu = true; 572 cpu->cfg.ext_zifencei = true; 573 cpu->cfg.ext_zicsr = true; 574 cpu->cfg.pmp = true; 575 cpu->cfg.ext_zicbom = true; 576 cpu->cfg.cbom_blocksize = 64; 577 cpu->cfg.cboz_blocksize = 64; 578 cpu->cfg.ext_zicboz = true; 579 cpu->cfg.ext_smaia = true; 580 cpu->cfg.ext_ssaia = true; 581 cpu->cfg.ext_sscofpmf = true; 582 cpu->cfg.ext_sstc = true; 583 cpu->cfg.ext_svinval = true; 584 cpu->cfg.ext_svnapot = true; 585 cpu->cfg.ext_svpbmt = true; 586 cpu->cfg.ext_smstateen = true; 587 cpu->cfg.ext_zba = true; 588 cpu->cfg.ext_zbb = true; 589 cpu->cfg.ext_zbc = true; 590 cpu->cfg.ext_zbs = true; 591 cpu->cfg.ext_XVentanaCondOps = true; 592 593 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 594 cpu->cfg.marchid = VEYRON_V1_MARCHID; 595 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 596 597 #ifndef CONFIG_USER_ONLY 598 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 599 #endif 600 } 601 602 #ifdef CONFIG_TCG 603 static void rv128_base_cpu_init(Object *obj) 604 { 605 RISCVCPU *cpu = RISCV_CPU(obj); 606 CPURISCVState *env = &cpu->env; 607 608 if (qemu_tcg_mttcg_enabled()) { 609 /* Missing 128-bit aligned atomics */ 610 error_report("128-bit RISC-V currently does not work with Multi " 611 "Threaded TCG. Please use: -accel tcg,thread=single"); 612 exit(EXIT_FAILURE); 613 } 614 615 cpu->cfg.mmu = true; 616 cpu->cfg.pmp = true; 617 618 /* Set latest version of privileged specification */ 619 env->priv_ver = PRIV_VERSION_LATEST; 620 #ifndef CONFIG_USER_ONLY 621 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 622 #endif 623 } 624 #endif /* CONFIG_TCG */ 625 626 static void rv64i_bare_cpu_init(Object *obj) 627 { 628 CPURISCVState *env = &RISCV_CPU(obj)->env; 629 riscv_cpu_set_misa_ext(env, RVI); 630 } 631 632 static void rv64e_bare_cpu_init(Object *obj) 633 { 634 CPURISCVState *env = &RISCV_CPU(obj)->env; 635 riscv_cpu_set_misa_ext(env, RVE); 636 } 637 638 #else /* !TARGET_RISCV64 */ 639 640 static void rv32_base_cpu_init(Object *obj) 641 { 642 RISCVCPU *cpu = RISCV_CPU(obj); 643 CPURISCVState *env = &cpu->env; 644 645 cpu->cfg.mmu = true; 646 cpu->cfg.pmp = true; 647 648 /* Set latest version of privileged specification */ 649 env->priv_ver = PRIV_VERSION_LATEST; 650 #ifndef CONFIG_USER_ONLY 651 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 652 #endif 653 } 654 655 static void rv32_sifive_u_cpu_init(Object *obj) 656 { 657 RISCVCPU *cpu = RISCV_CPU(obj); 658 CPURISCVState *env = &cpu->env; 659 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 660 env->priv_ver = PRIV_VERSION_1_10_0; 661 #ifndef CONFIG_USER_ONLY 662 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 663 #endif 664 665 /* inherited from parent obj via riscv_cpu_init() */ 666 cpu->cfg.ext_zifencei = true; 667 cpu->cfg.ext_zicsr = true; 668 cpu->cfg.mmu = true; 669 cpu->cfg.pmp = true; 670 } 671 672 static void rv32_sifive_e_cpu_init(Object *obj) 673 { 674 CPURISCVState *env = &RISCV_CPU(obj)->env; 675 RISCVCPU *cpu = RISCV_CPU(obj); 676 677 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 678 env->priv_ver = PRIV_VERSION_1_10_0; 679 #ifndef CONFIG_USER_ONLY 680 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 681 #endif 682 683 /* inherited from parent obj via riscv_cpu_init() */ 684 cpu->cfg.ext_zifencei = true; 685 cpu->cfg.ext_zicsr = true; 686 cpu->cfg.pmp = true; 687 } 688 689 static void rv32_ibex_cpu_init(Object *obj) 690 { 691 CPURISCVState *env = &RISCV_CPU(obj)->env; 692 RISCVCPU *cpu = RISCV_CPU(obj); 693 694 riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU); 695 env->priv_ver = PRIV_VERSION_1_12_0; 696 #ifndef CONFIG_USER_ONLY 697 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 698 #endif 699 /* inherited from parent obj via riscv_cpu_init() */ 700 cpu->cfg.ext_zifencei = true; 701 cpu->cfg.ext_zicsr = true; 702 cpu->cfg.pmp = true; 703 cpu->cfg.ext_smepmp = true; 704 } 705 706 static void rv32_imafcu_nommu_cpu_init(Object *obj) 707 { 708 CPURISCVState *env = &RISCV_CPU(obj)->env; 709 RISCVCPU *cpu = RISCV_CPU(obj); 710 711 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU); 712 env->priv_ver = PRIV_VERSION_1_10_0; 713 #ifndef CONFIG_USER_ONLY 714 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 715 #endif 716 717 /* inherited from parent obj via riscv_cpu_init() */ 718 cpu->cfg.ext_zifencei = true; 719 cpu->cfg.ext_zicsr = true; 720 cpu->cfg.pmp = true; 721 } 722 723 static void rv32i_bare_cpu_init(Object *obj) 724 { 725 CPURISCVState *env = &RISCV_CPU(obj)->env; 726 riscv_cpu_set_misa_ext(env, RVI); 727 } 728 729 static void rv32e_bare_cpu_init(Object *obj) 730 { 731 CPURISCVState *env = &RISCV_CPU(obj)->env; 732 riscv_cpu_set_misa_ext(env, RVE); 733 } 734 #endif 735 736 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 737 { 738 ObjectClass *oc; 739 char *typename; 740 char **cpuname; 741 742 cpuname = g_strsplit(cpu_model, ",", 1); 743 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 744 oc = object_class_by_name(typename); 745 g_strfreev(cpuname); 746 g_free(typename); 747 748 return oc; 749 } 750 751 char *riscv_cpu_get_name(RISCVCPU *cpu) 752 { 753 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 754 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 755 756 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 757 758 return cpu_model_from_type(typename); 759 } 760 761 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 762 { 763 RISCVCPU *cpu = RISCV_CPU(cs); 764 CPURISCVState *env = &cpu->env; 765 int i, j; 766 uint8_t *p; 767 768 #if !defined(CONFIG_USER_ONLY) 769 if (riscv_has_ext(env, RVH)) { 770 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 771 } 772 #endif 773 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 774 #ifndef CONFIG_USER_ONLY 775 { 776 static const int dump_csrs[] = { 777 CSR_MHARTID, 778 CSR_MSTATUS, 779 CSR_MSTATUSH, 780 /* 781 * CSR_SSTATUS is intentionally omitted here as its value 782 * can be figured out by looking at CSR_MSTATUS 783 */ 784 CSR_HSTATUS, 785 CSR_VSSTATUS, 786 CSR_MIP, 787 CSR_MIE, 788 CSR_MIDELEG, 789 CSR_HIDELEG, 790 CSR_MEDELEG, 791 CSR_HEDELEG, 792 CSR_MTVEC, 793 CSR_STVEC, 794 CSR_VSTVEC, 795 CSR_MEPC, 796 CSR_SEPC, 797 CSR_VSEPC, 798 CSR_MCAUSE, 799 CSR_SCAUSE, 800 CSR_VSCAUSE, 801 CSR_MTVAL, 802 CSR_STVAL, 803 CSR_HTVAL, 804 CSR_MTVAL2, 805 CSR_MSCRATCH, 806 CSR_SSCRATCH, 807 CSR_SATP, 808 CSR_MMTE, 809 CSR_UPMBASE, 810 CSR_UPMMASK, 811 CSR_SPMBASE, 812 CSR_SPMMASK, 813 CSR_MPMBASE, 814 CSR_MPMMASK, 815 }; 816 817 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 818 int csrno = dump_csrs[i]; 819 target_ulong val = 0; 820 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 821 822 /* 823 * Rely on the smode, hmode, etc, predicates within csr.c 824 * to do the filtering of the registers that are present. 825 */ 826 if (res == RISCV_EXCP_NONE) { 827 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 828 csr_ops[csrno].name, val); 829 } 830 } 831 } 832 #endif 833 834 for (i = 0; i < 32; i++) { 835 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 836 riscv_int_regnames[i], env->gpr[i]); 837 if ((i & 3) == 3) { 838 qemu_fprintf(f, "\n"); 839 } 840 } 841 if (flags & CPU_DUMP_FPU) { 842 for (i = 0; i < 32; i++) { 843 qemu_fprintf(f, " %-8s %016" PRIx64, 844 riscv_fpr_regnames[i], env->fpr[i]); 845 if ((i & 3) == 3) { 846 qemu_fprintf(f, "\n"); 847 } 848 } 849 } 850 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 851 static const int dump_rvv_csrs[] = { 852 CSR_VSTART, 853 CSR_VXSAT, 854 CSR_VXRM, 855 CSR_VCSR, 856 CSR_VL, 857 CSR_VTYPE, 858 CSR_VLENB, 859 }; 860 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 861 int csrno = dump_rvv_csrs[i]; 862 target_ulong val = 0; 863 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 864 865 /* 866 * Rely on the smode, hmode, etc, predicates within csr.c 867 * to do the filtering of the registers that are present. 868 */ 869 if (res == RISCV_EXCP_NONE) { 870 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 871 csr_ops[csrno].name, val); 872 } 873 } 874 uint16_t vlenb = cpu->cfg.vlenb; 875 876 for (i = 0; i < 32; i++) { 877 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 878 p = (uint8_t *)env->vreg; 879 for (j = vlenb - 1 ; j >= 0; j--) { 880 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 881 } 882 qemu_fprintf(f, "\n"); 883 } 884 } 885 } 886 887 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 888 { 889 RISCVCPU *cpu = RISCV_CPU(cs); 890 CPURISCVState *env = &cpu->env; 891 892 if (env->xl == MXL_RV32) { 893 env->pc = (int32_t)value; 894 } else { 895 env->pc = value; 896 } 897 } 898 899 static vaddr riscv_cpu_get_pc(CPUState *cs) 900 { 901 RISCVCPU *cpu = RISCV_CPU(cs); 902 CPURISCVState *env = &cpu->env; 903 904 /* Match cpu_get_tb_cpu_state. */ 905 if (env->xl == MXL_RV32) { 906 return env->pc & UINT32_MAX; 907 } 908 return env->pc; 909 } 910 911 bool riscv_cpu_has_work(CPUState *cs) 912 { 913 #ifndef CONFIG_USER_ONLY 914 RISCVCPU *cpu = RISCV_CPU(cs); 915 CPURISCVState *env = &cpu->env; 916 /* 917 * Definition of the WFI instruction requires it to ignore the privilege 918 * mode and delegation registers, but respect individual enables 919 */ 920 return riscv_cpu_all_pending(env) != 0 || 921 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 922 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 923 #else 924 return true; 925 #endif 926 } 927 928 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) 929 { 930 return riscv_env_mmu_index(cpu_env(cs), ifetch); 931 } 932 933 static void riscv_cpu_reset_hold(Object *obj, ResetType type) 934 { 935 #ifndef CONFIG_USER_ONLY 936 uint8_t iprio; 937 int i, irq, rdzero; 938 #endif 939 CPUState *cs = CPU(obj); 940 RISCVCPU *cpu = RISCV_CPU(cs); 941 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 942 CPURISCVState *env = &cpu->env; 943 944 if (mcc->parent_phases.hold) { 945 mcc->parent_phases.hold(obj, type); 946 } 947 #ifndef CONFIG_USER_ONLY 948 env->misa_mxl = mcc->misa_mxl_max; 949 env->priv = PRV_M; 950 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 951 if (env->misa_mxl > MXL_RV32) { 952 /* 953 * The reset status of SXL/UXL is undefined, but mstatus is WARL 954 * and we must ensure that the value after init is valid for read. 955 */ 956 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 957 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 958 if (riscv_has_ext(env, RVH)) { 959 env->vsstatus = set_field(env->vsstatus, 960 MSTATUS64_SXL, env->misa_mxl); 961 env->vsstatus = set_field(env->vsstatus, 962 MSTATUS64_UXL, env->misa_mxl); 963 env->mstatus_hs = set_field(env->mstatus_hs, 964 MSTATUS64_SXL, env->misa_mxl); 965 env->mstatus_hs = set_field(env->mstatus_hs, 966 MSTATUS64_UXL, env->misa_mxl); 967 } 968 } 969 env->mcause = 0; 970 env->miclaim = MIP_SGEIP; 971 env->pc = env->resetvec; 972 env->bins = 0; 973 env->two_stage_lookup = false; 974 975 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 976 (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ? 977 MENVCFG_ADUE : 0); 978 env->henvcfg = 0; 979 980 /* Initialized default priorities of local interrupts. */ 981 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 982 iprio = riscv_cpu_default_priority(i); 983 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 984 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 985 env->hviprio[i] = 0; 986 } 987 i = 0; 988 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 989 if (!rdzero) { 990 env->hviprio[irq] = env->miprio[irq]; 991 } 992 i++; 993 } 994 /* mmte is supposed to have pm.current hardwired to 1 */ 995 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 996 997 /* 998 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor 999 * extension is enabled. 1000 */ 1001 if (riscv_has_ext(env, RVH)) { 1002 env->mideleg |= HS_MODE_INTERRUPTS; 1003 } 1004 1005 /* 1006 * Clear mseccfg and unlock all the PMP entries upon reset. 1007 * This is allowed as per the priv and smepmp specifications 1008 * and is needed to clear stale entries across reboots. 1009 */ 1010 if (riscv_cpu_cfg(env)->ext_smepmp) { 1011 env->mseccfg = 0; 1012 } 1013 1014 pmp_unlock_entries(env); 1015 #endif 1016 env->xl = riscv_cpu_mxl(env); 1017 riscv_cpu_update_mask(env); 1018 cs->exception_index = RISCV_EXCP_NONE; 1019 env->load_res = -1; 1020 set_default_nan_mode(1, &env->fp_status); 1021 1022 #ifndef CONFIG_USER_ONLY 1023 if (cpu->cfg.debug) { 1024 riscv_trigger_reset_hold(env); 1025 } 1026 1027 if (kvm_enabled()) { 1028 kvm_riscv_reset_vcpu(cpu); 1029 } 1030 #endif 1031 } 1032 1033 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 1034 { 1035 RISCVCPU *cpu = RISCV_CPU(s); 1036 CPURISCVState *env = &cpu->env; 1037 info->target_info = &cpu->cfg; 1038 1039 switch (env->xl) { 1040 case MXL_RV32: 1041 info->print_insn = print_insn_riscv32; 1042 break; 1043 case MXL_RV64: 1044 info->print_insn = print_insn_riscv64; 1045 break; 1046 case MXL_RV128: 1047 info->print_insn = print_insn_riscv128; 1048 break; 1049 default: 1050 g_assert_not_reached(); 1051 } 1052 } 1053 1054 #ifndef CONFIG_USER_ONLY 1055 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1056 { 1057 bool rv32 = riscv_cpu_is_32bit(cpu); 1058 uint8_t satp_mode_map_max, satp_mode_supported_max; 1059 1060 /* The CPU wants the OS to decide which satp mode to use */ 1061 if (cpu->cfg.satp_mode.supported == 0) { 1062 return; 1063 } 1064 1065 satp_mode_supported_max = 1066 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1067 1068 if (cpu->cfg.satp_mode.map == 0) { 1069 if (cpu->cfg.satp_mode.init == 0) { 1070 /* If unset by the user, we fallback to the default satp mode. */ 1071 set_satp_mode_default_map(cpu); 1072 } else { 1073 /* 1074 * Find the lowest level that was disabled and then enable the 1075 * first valid level below which can be found in 1076 * valid_vm_1_10_32/64. 1077 */ 1078 for (int i = 1; i < 16; ++i) { 1079 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1080 (cpu->cfg.satp_mode.supported & (1 << i))) { 1081 for (int j = i - 1; j >= 0; --j) { 1082 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1083 cpu->cfg.satp_mode.map |= (1 << j); 1084 break; 1085 } 1086 } 1087 break; 1088 } 1089 } 1090 } 1091 } 1092 1093 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1094 1095 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1096 if (satp_mode_map_max > satp_mode_supported_max) { 1097 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1098 satp_mode_str(satp_mode_map_max, rv32), 1099 satp_mode_str(satp_mode_supported_max, rv32)); 1100 return; 1101 } 1102 1103 /* 1104 * Make sure the user did not ask for an invalid configuration as per 1105 * the specification. 1106 */ 1107 if (!rv32) { 1108 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1109 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1110 (cpu->cfg.satp_mode.init & (1 << i)) && 1111 (cpu->cfg.satp_mode.supported & (1 << i))) { 1112 error_setg(errp, "cannot disable %s satp mode if %s " 1113 "is enabled", satp_mode_str(i, false), 1114 satp_mode_str(satp_mode_map_max, false)); 1115 return; 1116 } 1117 } 1118 } 1119 1120 /* Finally expand the map so that all valid modes are set */ 1121 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1122 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1123 cpu->cfg.satp_mode.map |= (1 << i); 1124 } 1125 } 1126 } 1127 #endif 1128 1129 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1130 { 1131 Error *local_err = NULL; 1132 1133 #ifndef CONFIG_USER_ONLY 1134 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1135 if (local_err != NULL) { 1136 error_propagate(errp, local_err); 1137 return; 1138 } 1139 #endif 1140 1141 if (tcg_enabled()) { 1142 riscv_tcg_cpu_finalize_features(cpu, &local_err); 1143 if (local_err != NULL) { 1144 error_propagate(errp, local_err); 1145 return; 1146 } 1147 riscv_tcg_cpu_finalize_dynamic_decoder(cpu); 1148 } else if (kvm_enabled()) { 1149 riscv_kvm_cpu_finalize_features(cpu, &local_err); 1150 if (local_err != NULL) { 1151 error_propagate(errp, local_err); 1152 return; 1153 } 1154 } 1155 } 1156 1157 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1158 { 1159 CPUState *cs = CPU(dev); 1160 RISCVCPU *cpu = RISCV_CPU(dev); 1161 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1162 Error *local_err = NULL; 1163 1164 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) { 1165 warn_report("The 'any' CPU is deprecated and will be " 1166 "removed in the future."); 1167 } 1168 1169 cpu_exec_realizefn(cs, &local_err); 1170 if (local_err != NULL) { 1171 error_propagate(errp, local_err); 1172 return; 1173 } 1174 1175 riscv_cpu_finalize_features(cpu, &local_err); 1176 if (local_err != NULL) { 1177 error_propagate(errp, local_err); 1178 return; 1179 } 1180 1181 riscv_cpu_register_gdb_regs_for_features(cs); 1182 1183 #ifndef CONFIG_USER_ONLY 1184 if (cpu->cfg.debug) { 1185 riscv_trigger_realize(&cpu->env); 1186 } 1187 #endif 1188 1189 qemu_init_vcpu(cs); 1190 cpu_reset(cs); 1191 1192 mcc->parent_realize(dev, errp); 1193 } 1194 1195 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 1196 { 1197 if (tcg_enabled()) { 1198 return riscv_cpu_tcg_compatible(cpu); 1199 } 1200 1201 return true; 1202 } 1203 1204 #ifndef CONFIG_USER_ONLY 1205 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1206 void *opaque, Error **errp) 1207 { 1208 RISCVSATPMap *satp_map = opaque; 1209 uint8_t satp = satp_mode_from_str(name); 1210 bool value; 1211 1212 value = satp_map->map & (1 << satp); 1213 1214 visit_type_bool(v, name, &value, errp); 1215 } 1216 1217 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1218 void *opaque, Error **errp) 1219 { 1220 RISCVSATPMap *satp_map = opaque; 1221 uint8_t satp = satp_mode_from_str(name); 1222 bool value; 1223 1224 if (!visit_type_bool(v, name, &value, errp)) { 1225 return; 1226 } 1227 1228 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1229 satp_map->init |= 1 << satp; 1230 } 1231 1232 void riscv_add_satp_mode_properties(Object *obj) 1233 { 1234 RISCVCPU *cpu = RISCV_CPU(obj); 1235 1236 if (cpu->env.misa_mxl == MXL_RV32) { 1237 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1238 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1239 } else { 1240 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1241 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1242 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1243 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1244 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1245 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1246 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1247 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1248 } 1249 } 1250 1251 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1252 { 1253 RISCVCPU *cpu = RISCV_CPU(opaque); 1254 CPURISCVState *env = &cpu->env; 1255 1256 if (irq < IRQ_LOCAL_MAX) { 1257 switch (irq) { 1258 case IRQ_U_SOFT: 1259 case IRQ_S_SOFT: 1260 case IRQ_VS_SOFT: 1261 case IRQ_M_SOFT: 1262 case IRQ_U_TIMER: 1263 case IRQ_S_TIMER: 1264 case IRQ_VS_TIMER: 1265 case IRQ_M_TIMER: 1266 case IRQ_U_EXT: 1267 case IRQ_VS_EXT: 1268 case IRQ_M_EXT: 1269 if (kvm_enabled()) { 1270 kvm_riscv_set_irq(cpu, irq, level); 1271 } else { 1272 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1273 } 1274 break; 1275 case IRQ_S_EXT: 1276 if (kvm_enabled()) { 1277 kvm_riscv_set_irq(cpu, irq, level); 1278 } else { 1279 env->external_seip = level; 1280 riscv_cpu_update_mip(env, 1 << irq, 1281 BOOL_TO_MASK(level | env->software_seip)); 1282 } 1283 break; 1284 default: 1285 g_assert_not_reached(); 1286 } 1287 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1288 /* Require H-extension for handling guest local interrupts */ 1289 if (!riscv_has_ext(env, RVH)) { 1290 g_assert_not_reached(); 1291 } 1292 1293 /* Compute bit position in HGEIP CSR */ 1294 irq = irq - IRQ_LOCAL_MAX + 1; 1295 if (env->geilen < irq) { 1296 g_assert_not_reached(); 1297 } 1298 1299 /* Update HGEIP CSR */ 1300 env->hgeip &= ~((target_ulong)1 << irq); 1301 if (level) { 1302 env->hgeip |= (target_ulong)1 << irq; 1303 } 1304 1305 /* Update mip.SGEIP bit */ 1306 riscv_cpu_update_mip(env, MIP_SGEIP, 1307 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1308 } else { 1309 g_assert_not_reached(); 1310 } 1311 } 1312 #endif /* CONFIG_USER_ONLY */ 1313 1314 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1315 { 1316 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1317 } 1318 1319 static void riscv_cpu_post_init(Object *obj) 1320 { 1321 accel_cpu_instance_init(CPU(obj)); 1322 } 1323 1324 static void riscv_cpu_init(Object *obj) 1325 { 1326 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1327 RISCVCPU *cpu = RISCV_CPU(obj); 1328 CPURISCVState *env = &cpu->env; 1329 1330 env->misa_mxl = mcc->misa_mxl_max; 1331 1332 #ifndef CONFIG_USER_ONLY 1333 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1334 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1335 #endif /* CONFIG_USER_ONLY */ 1336 1337 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal); 1338 1339 /* 1340 * The timer and performance counters extensions were supported 1341 * in QEMU before they were added as discrete extensions in the 1342 * ISA. To keep compatibility we'll always default them to 'true' 1343 * for all CPUs. Each accelerator will decide what to do when 1344 * users disable them. 1345 */ 1346 RISCV_CPU(obj)->cfg.ext_zicntr = true; 1347 RISCV_CPU(obj)->cfg.ext_zihpm = true; 1348 1349 /* Default values for non-bool cpu properties */ 1350 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); 1351 cpu->cfg.vlenb = 128 >> 3; 1352 cpu->cfg.elen = 64; 1353 cpu->cfg.cbom_blocksize = 64; 1354 cpu->cfg.cbop_blocksize = 64; 1355 cpu->cfg.cboz_blocksize = 64; 1356 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1357 } 1358 1359 static void riscv_bare_cpu_init(Object *obj) 1360 { 1361 RISCVCPU *cpu = RISCV_CPU(obj); 1362 1363 /* 1364 * Bare CPUs do not inherit the timer and performance 1365 * counters from the parent class (see riscv_cpu_init() 1366 * for info on why the parent enables them). 1367 * 1368 * Users have to explicitly enable these counters for 1369 * bare CPUs. 1370 */ 1371 cpu->cfg.ext_zicntr = false; 1372 cpu->cfg.ext_zihpm = false; 1373 1374 /* Set to QEMU's first supported priv version */ 1375 cpu->env.priv_ver = PRIV_VERSION_1_10_0; 1376 1377 /* 1378 * Support all available satp_mode settings. The default 1379 * value will be set to MBARE if the user doesn't set 1380 * satp_mode manually (see set_satp_mode_default()). 1381 */ 1382 #ifndef CONFIG_USER_ONLY 1383 set_satp_mode_max_supported(cpu, VM_1_10_SV64); 1384 #endif 1385 } 1386 1387 typedef struct misa_ext_info { 1388 const char *name; 1389 const char *description; 1390 } MISAExtInfo; 1391 1392 #define MISA_INFO_IDX(_bit) \ 1393 __builtin_ctz(_bit) 1394 1395 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1396 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1397 1398 static const MISAExtInfo misa_ext_info_arr[] = { 1399 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1400 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1401 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1402 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1403 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1404 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1405 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1406 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1407 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1408 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1409 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), 1410 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1411 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1412 MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)") 1413 }; 1414 1415 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) 1416 { 1417 CPUClass *cc = CPU_CLASS(mcc); 1418 1419 /* Validate that MISA_MXL is set properly. */ 1420 switch (mcc->misa_mxl_max) { 1421 #ifdef TARGET_RISCV64 1422 case MXL_RV64: 1423 case MXL_RV128: 1424 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1425 break; 1426 #endif 1427 case MXL_RV32: 1428 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1429 break; 1430 default: 1431 g_assert_not_reached(); 1432 } 1433 } 1434 1435 static int riscv_validate_misa_info_idx(uint32_t bit) 1436 { 1437 int idx; 1438 1439 /* 1440 * Our lowest valid input (RVA) is 1 and 1441 * __builtin_ctz() is UB with zero. 1442 */ 1443 g_assert(bit != 0); 1444 idx = MISA_INFO_IDX(bit); 1445 1446 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1447 return idx; 1448 } 1449 1450 const char *riscv_get_misa_ext_name(uint32_t bit) 1451 { 1452 int idx = riscv_validate_misa_info_idx(bit); 1453 const char *val = misa_ext_info_arr[idx].name; 1454 1455 g_assert(val != NULL); 1456 return val; 1457 } 1458 1459 const char *riscv_get_misa_ext_description(uint32_t bit) 1460 { 1461 int idx = riscv_validate_misa_info_idx(bit); 1462 const char *val = misa_ext_info_arr[idx].description; 1463 1464 g_assert(val != NULL); 1465 return val; 1466 } 1467 1468 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1469 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1470 .enabled = _defval} 1471 1472 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1473 /* Defaults for standard extensions */ 1474 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1475 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1476 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1477 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1478 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1479 MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false), 1480 MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false), 1481 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), 1482 MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false), 1483 MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false), 1484 MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), 1485 MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), 1486 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1487 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1488 MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false), 1489 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1490 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1491 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1492 MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false), 1493 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1494 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1495 MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false), 1496 MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false), 1497 MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false), 1498 MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false), 1499 MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false), 1500 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1501 1502 MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false), 1503 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1504 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1505 MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false), 1506 MULTI_EXT_CFG_BOOL("svade", ext_svade, false), 1507 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1508 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1509 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1510 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1511 1512 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1513 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1514 1515 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1516 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1517 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1518 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1519 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1520 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1521 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1522 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1523 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1524 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1525 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1526 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1527 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1528 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1529 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1530 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1531 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1532 MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false), 1533 1534 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1535 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1536 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1537 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1538 1539 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1540 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true), 1541 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1542 1543 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1544 1545 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1546 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1547 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1548 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1549 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1550 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1551 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1552 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1553 1554 /* Vector cryptography extensions */ 1555 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1556 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1557 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false), 1558 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1559 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1560 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1561 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1562 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1563 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1564 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1565 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1566 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1567 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1568 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1569 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1570 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1571 1572 DEFINE_PROP_END_OF_LIST(), 1573 }; 1574 1575 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1576 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1577 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1578 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1579 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1580 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1581 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1582 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1583 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1584 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1585 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1586 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1587 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1588 1589 DEFINE_PROP_END_OF_LIST(), 1590 }; 1591 1592 /* These are experimental so mark with 'x-' */ 1593 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1594 DEFINE_PROP_END_OF_LIST(), 1595 }; 1596 1597 /* 1598 * 'Named features' is the name we give to extensions that we 1599 * don't want to expose to users. They are either immutable 1600 * (always enabled/disable) or they'll vary depending on 1601 * the resulting CPU state. They have riscv,isa strings 1602 * and priv_ver like regular extensions. 1603 */ 1604 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { 1605 MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true), 1606 1607 DEFINE_PROP_END_OF_LIST(), 1608 }; 1609 1610 /* Deprecated entries marked for future removal */ 1611 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1612 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1613 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1614 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1615 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1616 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1617 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1618 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1619 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1620 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1621 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1622 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1623 1624 DEFINE_PROP_END_OF_LIST(), 1625 }; 1626 1627 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname, 1628 Error **errp) 1629 { 1630 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1631 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'", 1632 cpuname, propname); 1633 } 1634 1635 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1636 void *opaque, Error **errp) 1637 { 1638 RISCVCPU *cpu = RISCV_CPU(obj); 1639 uint8_t pmu_num, curr_pmu_num; 1640 uint32_t pmu_mask; 1641 1642 visit_type_uint8(v, name, &pmu_num, errp); 1643 1644 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask); 1645 1646 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) { 1647 cpu_set_prop_err(cpu, name, errp); 1648 error_append_hint(errp, "Current '%s' val: %u\n", 1649 name, curr_pmu_num); 1650 return; 1651 } 1652 1653 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1654 error_setg(errp, "Number of counters exceeds maximum available"); 1655 return; 1656 } 1657 1658 if (pmu_num == 0) { 1659 pmu_mask = 0; 1660 } else { 1661 pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1662 } 1663 1664 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1665 cpu->cfg.pmu_mask = pmu_mask; 1666 cpu_option_add_user_setting("pmu-mask", pmu_mask); 1667 } 1668 1669 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1670 void *opaque, Error **errp) 1671 { 1672 RISCVCPU *cpu = RISCV_CPU(obj); 1673 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1674 1675 visit_type_uint8(v, name, &pmu_num, errp); 1676 } 1677 1678 static const PropertyInfo prop_pmu_num = { 1679 .name = "pmu-num", 1680 .get = prop_pmu_num_get, 1681 .set = prop_pmu_num_set, 1682 }; 1683 1684 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name, 1685 void *opaque, Error **errp) 1686 { 1687 RISCVCPU *cpu = RISCV_CPU(obj); 1688 uint32_t value; 1689 uint8_t pmu_num; 1690 1691 visit_type_uint32(v, name, &value, errp); 1692 1693 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) { 1694 cpu_set_prop_err(cpu, name, errp); 1695 error_append_hint(errp, "Current '%s' val: %x\n", 1696 name, cpu->cfg.pmu_mask); 1697 return; 1698 } 1699 1700 pmu_num = ctpop32(value); 1701 1702 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1703 error_setg(errp, "Number of counters exceeds maximum available"); 1704 return; 1705 } 1706 1707 cpu_option_add_user_setting(name, value); 1708 cpu->cfg.pmu_mask = value; 1709 } 1710 1711 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name, 1712 void *opaque, Error **errp) 1713 { 1714 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask; 1715 1716 visit_type_uint8(v, name, &pmu_mask, errp); 1717 } 1718 1719 static const PropertyInfo prop_pmu_mask = { 1720 .name = "pmu-mask", 1721 .get = prop_pmu_mask_get, 1722 .set = prop_pmu_mask_set, 1723 }; 1724 1725 static void prop_mmu_set(Object *obj, Visitor *v, const char *name, 1726 void *opaque, Error **errp) 1727 { 1728 RISCVCPU *cpu = RISCV_CPU(obj); 1729 bool value; 1730 1731 visit_type_bool(v, name, &value, errp); 1732 1733 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) { 1734 cpu_set_prop_err(cpu, "mmu", errp); 1735 return; 1736 } 1737 1738 cpu_option_add_user_setting(name, value); 1739 cpu->cfg.mmu = value; 1740 } 1741 1742 static void prop_mmu_get(Object *obj, Visitor *v, const char *name, 1743 void *opaque, Error **errp) 1744 { 1745 bool value = RISCV_CPU(obj)->cfg.mmu; 1746 1747 visit_type_bool(v, name, &value, errp); 1748 } 1749 1750 static const PropertyInfo prop_mmu = { 1751 .name = "mmu", 1752 .get = prop_mmu_get, 1753 .set = prop_mmu_set, 1754 }; 1755 1756 static void prop_pmp_set(Object *obj, Visitor *v, const char *name, 1757 void *opaque, Error **errp) 1758 { 1759 RISCVCPU *cpu = RISCV_CPU(obj); 1760 bool value; 1761 1762 visit_type_bool(v, name, &value, errp); 1763 1764 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) { 1765 cpu_set_prop_err(cpu, name, errp); 1766 return; 1767 } 1768 1769 cpu_option_add_user_setting(name, value); 1770 cpu->cfg.pmp = value; 1771 } 1772 1773 static void prop_pmp_get(Object *obj, Visitor *v, const char *name, 1774 void *opaque, Error **errp) 1775 { 1776 bool value = RISCV_CPU(obj)->cfg.pmp; 1777 1778 visit_type_bool(v, name, &value, errp); 1779 } 1780 1781 static const PropertyInfo prop_pmp = { 1782 .name = "pmp", 1783 .get = prop_pmp_get, 1784 .set = prop_pmp_set, 1785 }; 1786 1787 static int priv_spec_from_str(const char *priv_spec_str) 1788 { 1789 int priv_version = -1; 1790 1791 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) { 1792 priv_version = PRIV_VERSION_1_13_0; 1793 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) { 1794 priv_version = PRIV_VERSION_1_12_0; 1795 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) { 1796 priv_version = PRIV_VERSION_1_11_0; 1797 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) { 1798 priv_version = PRIV_VERSION_1_10_0; 1799 } 1800 1801 return priv_version; 1802 } 1803 1804 const char *priv_spec_to_str(int priv_version) 1805 { 1806 switch (priv_version) { 1807 case PRIV_VERSION_1_10_0: 1808 return PRIV_VER_1_10_0_STR; 1809 case PRIV_VERSION_1_11_0: 1810 return PRIV_VER_1_11_0_STR; 1811 case PRIV_VERSION_1_12_0: 1812 return PRIV_VER_1_12_0_STR; 1813 case PRIV_VERSION_1_13_0: 1814 return PRIV_VER_1_13_0_STR; 1815 default: 1816 return NULL; 1817 } 1818 } 1819 1820 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name, 1821 void *opaque, Error **errp) 1822 { 1823 RISCVCPU *cpu = RISCV_CPU(obj); 1824 g_autofree char *value = NULL; 1825 int priv_version = -1; 1826 1827 visit_type_str(v, name, &value, errp); 1828 1829 priv_version = priv_spec_from_str(value); 1830 if (priv_version < 0) { 1831 error_setg(errp, "Unsupported privilege spec version '%s'", value); 1832 return; 1833 } 1834 1835 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) { 1836 cpu_set_prop_err(cpu, name, errp); 1837 error_append_hint(errp, "Current '%s' val: %s\n", name, 1838 object_property_get_str(obj, name, NULL)); 1839 return; 1840 } 1841 1842 cpu_option_add_user_setting(name, priv_version); 1843 cpu->env.priv_ver = priv_version; 1844 } 1845 1846 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name, 1847 void *opaque, Error **errp) 1848 { 1849 RISCVCPU *cpu = RISCV_CPU(obj); 1850 const char *value = priv_spec_to_str(cpu->env.priv_ver); 1851 1852 visit_type_str(v, name, (char **)&value, errp); 1853 } 1854 1855 static const PropertyInfo prop_priv_spec = { 1856 .name = "priv_spec", 1857 .get = prop_priv_spec_get, 1858 .set = prop_priv_spec_set, 1859 }; 1860 1861 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name, 1862 void *opaque, Error **errp) 1863 { 1864 RISCVCPU *cpu = RISCV_CPU(obj); 1865 g_autofree char *value = NULL; 1866 1867 visit_type_str(v, name, &value, errp); 1868 1869 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) { 1870 error_setg(errp, "Unsupported vector spec version '%s'", value); 1871 return; 1872 } 1873 1874 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0); 1875 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1876 } 1877 1878 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name, 1879 void *opaque, Error **errp) 1880 { 1881 const char *value = VEXT_VER_1_00_0_STR; 1882 1883 visit_type_str(v, name, (char **)&value, errp); 1884 } 1885 1886 static const PropertyInfo prop_vext_spec = { 1887 .name = "vext_spec", 1888 .get = prop_vext_spec_get, 1889 .set = prop_vext_spec_set, 1890 }; 1891 1892 static void prop_vlen_set(Object *obj, Visitor *v, const char *name, 1893 void *opaque, Error **errp) 1894 { 1895 RISCVCPU *cpu = RISCV_CPU(obj); 1896 uint16_t value; 1897 1898 if (!visit_type_uint16(v, name, &value, errp)) { 1899 return; 1900 } 1901 1902 if (!is_power_of_2(value)) { 1903 error_setg(errp, "Vector extension VLEN must be power of 2"); 1904 return; 1905 } 1906 1907 if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) { 1908 cpu_set_prop_err(cpu, name, errp); 1909 error_append_hint(errp, "Current '%s' val: %u\n", 1910 name, cpu->cfg.vlenb << 3); 1911 return; 1912 } 1913 1914 cpu_option_add_user_setting(name, value); 1915 cpu->cfg.vlenb = value >> 3; 1916 } 1917 1918 static void prop_vlen_get(Object *obj, Visitor *v, const char *name, 1919 void *opaque, Error **errp) 1920 { 1921 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3; 1922 1923 visit_type_uint16(v, name, &value, errp); 1924 } 1925 1926 static const PropertyInfo prop_vlen = { 1927 .name = "vlen", 1928 .get = prop_vlen_get, 1929 .set = prop_vlen_set, 1930 }; 1931 1932 static void prop_elen_set(Object *obj, Visitor *v, const char *name, 1933 void *opaque, Error **errp) 1934 { 1935 RISCVCPU *cpu = RISCV_CPU(obj); 1936 uint16_t value; 1937 1938 if (!visit_type_uint16(v, name, &value, errp)) { 1939 return; 1940 } 1941 1942 if (!is_power_of_2(value)) { 1943 error_setg(errp, "Vector extension ELEN must be power of 2"); 1944 return; 1945 } 1946 1947 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) { 1948 cpu_set_prop_err(cpu, name, errp); 1949 error_append_hint(errp, "Current '%s' val: %u\n", 1950 name, cpu->cfg.elen); 1951 return; 1952 } 1953 1954 cpu_option_add_user_setting(name, value); 1955 cpu->cfg.elen = value; 1956 } 1957 1958 static void prop_elen_get(Object *obj, Visitor *v, const char *name, 1959 void *opaque, Error **errp) 1960 { 1961 uint16_t value = RISCV_CPU(obj)->cfg.elen; 1962 1963 visit_type_uint16(v, name, &value, errp); 1964 } 1965 1966 static const PropertyInfo prop_elen = { 1967 .name = "elen", 1968 .get = prop_elen_get, 1969 .set = prop_elen_set, 1970 }; 1971 1972 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name, 1973 void *opaque, Error **errp) 1974 { 1975 RISCVCPU *cpu = RISCV_CPU(obj); 1976 uint16_t value; 1977 1978 if (!visit_type_uint16(v, name, &value, errp)) { 1979 return; 1980 } 1981 1982 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) { 1983 cpu_set_prop_err(cpu, name, errp); 1984 error_append_hint(errp, "Current '%s' val: %u\n", 1985 name, cpu->cfg.cbom_blocksize); 1986 return; 1987 } 1988 1989 cpu_option_add_user_setting(name, value); 1990 cpu->cfg.cbom_blocksize = value; 1991 } 1992 1993 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name, 1994 void *opaque, Error **errp) 1995 { 1996 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize; 1997 1998 visit_type_uint16(v, name, &value, errp); 1999 } 2000 2001 static const PropertyInfo prop_cbom_blksize = { 2002 .name = "cbom_blocksize", 2003 .get = prop_cbom_blksize_get, 2004 .set = prop_cbom_blksize_set, 2005 }; 2006 2007 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name, 2008 void *opaque, Error **errp) 2009 { 2010 RISCVCPU *cpu = RISCV_CPU(obj); 2011 uint16_t value; 2012 2013 if (!visit_type_uint16(v, name, &value, errp)) { 2014 return; 2015 } 2016 2017 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) { 2018 cpu_set_prop_err(cpu, name, errp); 2019 error_append_hint(errp, "Current '%s' val: %u\n", 2020 name, cpu->cfg.cbop_blocksize); 2021 return; 2022 } 2023 2024 cpu_option_add_user_setting(name, value); 2025 cpu->cfg.cbop_blocksize = value; 2026 } 2027 2028 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name, 2029 void *opaque, Error **errp) 2030 { 2031 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize; 2032 2033 visit_type_uint16(v, name, &value, errp); 2034 } 2035 2036 static const PropertyInfo prop_cbop_blksize = { 2037 .name = "cbop_blocksize", 2038 .get = prop_cbop_blksize_get, 2039 .set = prop_cbop_blksize_set, 2040 }; 2041 2042 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name, 2043 void *opaque, Error **errp) 2044 { 2045 RISCVCPU *cpu = RISCV_CPU(obj); 2046 uint16_t value; 2047 2048 if (!visit_type_uint16(v, name, &value, errp)) { 2049 return; 2050 } 2051 2052 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) { 2053 cpu_set_prop_err(cpu, name, errp); 2054 error_append_hint(errp, "Current '%s' val: %u\n", 2055 name, cpu->cfg.cboz_blocksize); 2056 return; 2057 } 2058 2059 cpu_option_add_user_setting(name, value); 2060 cpu->cfg.cboz_blocksize = value; 2061 } 2062 2063 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name, 2064 void *opaque, Error **errp) 2065 { 2066 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize; 2067 2068 visit_type_uint16(v, name, &value, errp); 2069 } 2070 2071 static const PropertyInfo prop_cboz_blksize = { 2072 .name = "cboz_blocksize", 2073 .get = prop_cboz_blksize_get, 2074 .set = prop_cboz_blksize_set, 2075 }; 2076 2077 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name, 2078 void *opaque, Error **errp) 2079 { 2080 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2081 RISCVCPU *cpu = RISCV_CPU(obj); 2082 uint32_t prev_val = cpu->cfg.mvendorid; 2083 uint32_t value; 2084 2085 if (!visit_type_uint32(v, name, &value, errp)) { 2086 return; 2087 } 2088 2089 if (!dynamic_cpu && prev_val != value) { 2090 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2091 object_get_typename(obj), prev_val); 2092 return; 2093 } 2094 2095 cpu->cfg.mvendorid = value; 2096 } 2097 2098 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name, 2099 void *opaque, Error **errp) 2100 { 2101 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; 2102 2103 visit_type_uint32(v, name, &value, errp); 2104 } 2105 2106 static const PropertyInfo prop_mvendorid = { 2107 .name = "mvendorid", 2108 .get = prop_mvendorid_get, 2109 .set = prop_mvendorid_set, 2110 }; 2111 2112 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name, 2113 void *opaque, Error **errp) 2114 { 2115 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2116 RISCVCPU *cpu = RISCV_CPU(obj); 2117 uint64_t prev_val = cpu->cfg.mimpid; 2118 uint64_t value; 2119 2120 if (!visit_type_uint64(v, name, &value, errp)) { 2121 return; 2122 } 2123 2124 if (!dynamic_cpu && prev_val != value) { 2125 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2126 object_get_typename(obj), prev_val); 2127 return; 2128 } 2129 2130 cpu->cfg.mimpid = value; 2131 } 2132 2133 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name, 2134 void *opaque, Error **errp) 2135 { 2136 uint64_t value = RISCV_CPU(obj)->cfg.mimpid; 2137 2138 visit_type_uint64(v, name, &value, errp); 2139 } 2140 2141 static const PropertyInfo prop_mimpid = { 2142 .name = "mimpid", 2143 .get = prop_mimpid_get, 2144 .set = prop_mimpid_set, 2145 }; 2146 2147 static void prop_marchid_set(Object *obj, Visitor *v, const char *name, 2148 void *opaque, Error **errp) 2149 { 2150 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2151 RISCVCPU *cpu = RISCV_CPU(obj); 2152 uint64_t prev_val = cpu->cfg.marchid; 2153 uint64_t value, invalid_val; 2154 uint32_t mxlen = 0; 2155 2156 if (!visit_type_uint64(v, name, &value, errp)) { 2157 return; 2158 } 2159 2160 if (!dynamic_cpu && prev_val != value) { 2161 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2162 object_get_typename(obj), prev_val); 2163 return; 2164 } 2165 2166 switch (riscv_cpu_mxl(&cpu->env)) { 2167 case MXL_RV32: 2168 mxlen = 32; 2169 break; 2170 case MXL_RV64: 2171 case MXL_RV128: 2172 mxlen = 64; 2173 break; 2174 default: 2175 g_assert_not_reached(); 2176 } 2177 2178 invalid_val = 1LL << (mxlen - 1); 2179 2180 if (value == invalid_val) { 2181 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2182 "and the remaining bits zero", mxlen); 2183 return; 2184 } 2185 2186 cpu->cfg.marchid = value; 2187 } 2188 2189 static void prop_marchid_get(Object *obj, Visitor *v, const char *name, 2190 void *opaque, Error **errp) 2191 { 2192 uint64_t value = RISCV_CPU(obj)->cfg.marchid; 2193 2194 visit_type_uint64(v, name, &value, errp); 2195 } 2196 2197 static const PropertyInfo prop_marchid = { 2198 .name = "marchid", 2199 .get = prop_marchid_get, 2200 .set = prop_marchid_set, 2201 }; 2202 2203 /* 2204 * RVA22U64 defines some 'named features' that are cache 2205 * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa 2206 * and Zicclsm. They are always implemented in TCG and 2207 * doesn't need to be manually enabled by the profile. 2208 */ 2209 static RISCVCPUProfile RVA22U64 = { 2210 .parent = NULL, 2211 .name = "rva22u64", 2212 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU, 2213 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2214 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2215 .ext_offsets = { 2216 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause), 2217 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb), 2218 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin), 2219 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr), 2220 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom), 2221 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz), 2222 2223 /* mandatory named features for this profile */ 2224 CPU_CFG_OFFSET(ext_zic64b), 2225 2226 RISCV_PROFILE_EXT_LIST_END 2227 } 2228 }; 2229 2230 /* 2231 * As with RVA22U64, RVA22S64 also defines 'named features'. 2232 * 2233 * Cache related features that we consider enabled since we don't 2234 * implement cache: Ssccptr 2235 * 2236 * Other named features that we already implement: Sstvecd, Sstvala, 2237 * Sscounterenw 2238 * 2239 * The remaining features/extensions comes from RVA22U64. 2240 */ 2241 static RISCVCPUProfile RVA22S64 = { 2242 .parent = &RVA22U64, 2243 .name = "rva22s64", 2244 .misa_ext = RVS, 2245 .priv_spec = PRIV_VERSION_1_12_0, 2246 .satp_mode = VM_1_10_SV39, 2247 .ext_offsets = { 2248 /* rva22s64 exts */ 2249 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt), 2250 CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade), 2251 2252 RISCV_PROFILE_EXT_LIST_END 2253 } 2254 }; 2255 2256 RISCVCPUProfile *riscv_profiles[] = { 2257 &RVA22U64, 2258 &RVA22S64, 2259 NULL, 2260 }; 2261 2262 static RISCVCPUImpliedExtsRule RVA_IMPLIED = { 2263 .is_misa = true, 2264 .ext = RVA, 2265 .implied_multi_exts = { 2266 CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo), 2267 2268 RISCV_IMPLIED_EXTS_RULE_END 2269 }, 2270 }; 2271 2272 static RISCVCPUImpliedExtsRule RVD_IMPLIED = { 2273 .is_misa = true, 2274 .ext = RVD, 2275 .implied_misa_exts = RVF, 2276 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2277 }; 2278 2279 static RISCVCPUImpliedExtsRule RVF_IMPLIED = { 2280 .is_misa = true, 2281 .ext = RVF, 2282 .implied_multi_exts = { 2283 CPU_CFG_OFFSET(ext_zicsr), 2284 2285 RISCV_IMPLIED_EXTS_RULE_END 2286 }, 2287 }; 2288 2289 static RISCVCPUImpliedExtsRule RVM_IMPLIED = { 2290 .is_misa = true, 2291 .ext = RVM, 2292 .implied_multi_exts = { 2293 CPU_CFG_OFFSET(ext_zmmul), 2294 2295 RISCV_IMPLIED_EXTS_RULE_END 2296 }, 2297 }; 2298 2299 static RISCVCPUImpliedExtsRule RVV_IMPLIED = { 2300 .is_misa = true, 2301 .ext = RVV, 2302 .implied_multi_exts = { 2303 CPU_CFG_OFFSET(ext_zve64d), 2304 2305 RISCV_IMPLIED_EXTS_RULE_END 2306 }, 2307 }; 2308 2309 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = { 2310 .ext = CPU_CFG_OFFSET(ext_zcb), 2311 .implied_multi_exts = { 2312 CPU_CFG_OFFSET(ext_zca), 2313 2314 RISCV_IMPLIED_EXTS_RULE_END 2315 }, 2316 }; 2317 2318 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = { 2319 .ext = CPU_CFG_OFFSET(ext_zcd), 2320 .implied_misa_exts = RVD, 2321 .implied_multi_exts = { 2322 CPU_CFG_OFFSET(ext_zca), 2323 2324 RISCV_IMPLIED_EXTS_RULE_END 2325 }, 2326 }; 2327 2328 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = { 2329 .ext = CPU_CFG_OFFSET(ext_zce), 2330 .implied_multi_exts = { 2331 CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp), 2332 CPU_CFG_OFFSET(ext_zcmt), 2333 2334 RISCV_IMPLIED_EXTS_RULE_END 2335 }, 2336 }; 2337 2338 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = { 2339 .ext = CPU_CFG_OFFSET(ext_zcf), 2340 .implied_misa_exts = RVF, 2341 .implied_multi_exts = { 2342 CPU_CFG_OFFSET(ext_zca), 2343 2344 RISCV_IMPLIED_EXTS_RULE_END 2345 }, 2346 }; 2347 2348 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = { 2349 .ext = CPU_CFG_OFFSET(ext_zcmp), 2350 .implied_multi_exts = { 2351 CPU_CFG_OFFSET(ext_zca), 2352 2353 RISCV_IMPLIED_EXTS_RULE_END 2354 }, 2355 }; 2356 2357 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = { 2358 .ext = CPU_CFG_OFFSET(ext_zcmt), 2359 .implied_multi_exts = { 2360 CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr), 2361 2362 RISCV_IMPLIED_EXTS_RULE_END 2363 }, 2364 }; 2365 2366 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = { 2367 .ext = CPU_CFG_OFFSET(ext_zdinx), 2368 .implied_multi_exts = { 2369 CPU_CFG_OFFSET(ext_zfinx), 2370 2371 RISCV_IMPLIED_EXTS_RULE_END 2372 }, 2373 }; 2374 2375 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = { 2376 .ext = CPU_CFG_OFFSET(ext_zfa), 2377 .implied_misa_exts = RVF, 2378 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2379 }; 2380 2381 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = { 2382 .ext = CPU_CFG_OFFSET(ext_zfbfmin), 2383 .implied_misa_exts = RVF, 2384 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2385 }; 2386 2387 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = { 2388 .ext = CPU_CFG_OFFSET(ext_zfh), 2389 .implied_multi_exts = { 2390 CPU_CFG_OFFSET(ext_zfhmin), 2391 2392 RISCV_IMPLIED_EXTS_RULE_END 2393 }, 2394 }; 2395 2396 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = { 2397 .ext = CPU_CFG_OFFSET(ext_zfhmin), 2398 .implied_misa_exts = RVF, 2399 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2400 }; 2401 2402 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = { 2403 .ext = CPU_CFG_OFFSET(ext_zfinx), 2404 .implied_multi_exts = { 2405 CPU_CFG_OFFSET(ext_zicsr), 2406 2407 RISCV_IMPLIED_EXTS_RULE_END 2408 }, 2409 }; 2410 2411 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = { 2412 .ext = CPU_CFG_OFFSET(ext_zhinx), 2413 .implied_multi_exts = { 2414 CPU_CFG_OFFSET(ext_zhinxmin), 2415 2416 RISCV_IMPLIED_EXTS_RULE_END 2417 }, 2418 }; 2419 2420 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = { 2421 .ext = CPU_CFG_OFFSET(ext_zhinxmin), 2422 .implied_multi_exts = { 2423 CPU_CFG_OFFSET(ext_zfinx), 2424 2425 RISCV_IMPLIED_EXTS_RULE_END 2426 }, 2427 }; 2428 2429 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = { 2430 .ext = CPU_CFG_OFFSET(ext_zicntr), 2431 .implied_multi_exts = { 2432 CPU_CFG_OFFSET(ext_zicsr), 2433 2434 RISCV_IMPLIED_EXTS_RULE_END 2435 }, 2436 }; 2437 2438 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = { 2439 .ext = CPU_CFG_OFFSET(ext_zihpm), 2440 .implied_multi_exts = { 2441 CPU_CFG_OFFSET(ext_zicsr), 2442 2443 RISCV_IMPLIED_EXTS_RULE_END 2444 }, 2445 }; 2446 2447 static RISCVCPUImpliedExtsRule ZK_IMPLIED = { 2448 .ext = CPU_CFG_OFFSET(ext_zk), 2449 .implied_multi_exts = { 2450 CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr), 2451 CPU_CFG_OFFSET(ext_zkt), 2452 2453 RISCV_IMPLIED_EXTS_RULE_END 2454 }, 2455 }; 2456 2457 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = { 2458 .ext = CPU_CFG_OFFSET(ext_zkn), 2459 .implied_multi_exts = { 2460 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2461 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne), 2462 CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh), 2463 2464 RISCV_IMPLIED_EXTS_RULE_END 2465 }, 2466 }; 2467 2468 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = { 2469 .ext = CPU_CFG_OFFSET(ext_zks), 2470 .implied_multi_exts = { 2471 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2472 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed), 2473 CPU_CFG_OFFSET(ext_zksh), 2474 2475 RISCV_IMPLIED_EXTS_RULE_END 2476 }, 2477 }; 2478 2479 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = { 2480 .ext = CPU_CFG_OFFSET(ext_zvbb), 2481 .implied_multi_exts = { 2482 CPU_CFG_OFFSET(ext_zvkb), 2483 2484 RISCV_IMPLIED_EXTS_RULE_END 2485 }, 2486 }; 2487 2488 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = { 2489 .ext = CPU_CFG_OFFSET(ext_zve32f), 2490 .implied_misa_exts = RVF, 2491 .implied_multi_exts = { 2492 CPU_CFG_OFFSET(ext_zve32x), 2493 2494 RISCV_IMPLIED_EXTS_RULE_END 2495 }, 2496 }; 2497 2498 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = { 2499 .ext = CPU_CFG_OFFSET(ext_zve32x), 2500 .implied_multi_exts = { 2501 CPU_CFG_OFFSET(ext_zicsr), 2502 2503 RISCV_IMPLIED_EXTS_RULE_END 2504 }, 2505 }; 2506 2507 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = { 2508 .ext = CPU_CFG_OFFSET(ext_zve64d), 2509 .implied_misa_exts = RVD, 2510 .implied_multi_exts = { 2511 CPU_CFG_OFFSET(ext_zve64f), 2512 2513 RISCV_IMPLIED_EXTS_RULE_END 2514 }, 2515 }; 2516 2517 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = { 2518 .ext = CPU_CFG_OFFSET(ext_zve64f), 2519 .implied_misa_exts = RVF, 2520 .implied_multi_exts = { 2521 CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x), 2522 2523 RISCV_IMPLIED_EXTS_RULE_END 2524 }, 2525 }; 2526 2527 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = { 2528 .ext = CPU_CFG_OFFSET(ext_zve64x), 2529 .implied_multi_exts = { 2530 CPU_CFG_OFFSET(ext_zve32x), 2531 2532 RISCV_IMPLIED_EXTS_RULE_END 2533 }, 2534 }; 2535 2536 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = { 2537 .ext = CPU_CFG_OFFSET(ext_zvfbfmin), 2538 .implied_multi_exts = { 2539 CPU_CFG_OFFSET(ext_zve32f), 2540 2541 RISCV_IMPLIED_EXTS_RULE_END 2542 }, 2543 }; 2544 2545 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = { 2546 .ext = CPU_CFG_OFFSET(ext_zvfbfwma), 2547 .implied_multi_exts = { 2548 CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin), 2549 2550 RISCV_IMPLIED_EXTS_RULE_END 2551 }, 2552 }; 2553 2554 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = { 2555 .ext = CPU_CFG_OFFSET(ext_zvfh), 2556 .implied_multi_exts = { 2557 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin), 2558 2559 RISCV_IMPLIED_EXTS_RULE_END 2560 }, 2561 }; 2562 2563 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = { 2564 .ext = CPU_CFG_OFFSET(ext_zvfhmin), 2565 .implied_multi_exts = { 2566 CPU_CFG_OFFSET(ext_zve32f), 2567 2568 RISCV_IMPLIED_EXTS_RULE_END 2569 }, 2570 }; 2571 2572 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = { 2573 .ext = CPU_CFG_OFFSET(ext_zvkn), 2574 .implied_multi_exts = { 2575 CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb), 2576 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2577 2578 RISCV_IMPLIED_EXTS_RULE_END 2579 }, 2580 }; 2581 2582 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = { 2583 .ext = CPU_CFG_OFFSET(ext_zvknc), 2584 .implied_multi_exts = { 2585 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc), 2586 2587 RISCV_IMPLIED_EXTS_RULE_END 2588 }, 2589 }; 2590 2591 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = { 2592 .ext = CPU_CFG_OFFSET(ext_zvkng), 2593 .implied_multi_exts = { 2594 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg), 2595 2596 RISCV_IMPLIED_EXTS_RULE_END 2597 }, 2598 }; 2599 2600 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = { 2601 .ext = CPU_CFG_OFFSET(ext_zvknhb), 2602 .implied_multi_exts = { 2603 CPU_CFG_OFFSET(ext_zve64x), 2604 2605 RISCV_IMPLIED_EXTS_RULE_END 2606 }, 2607 }; 2608 2609 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = { 2610 .ext = CPU_CFG_OFFSET(ext_zvks), 2611 .implied_multi_exts = { 2612 CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh), 2613 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2614 2615 RISCV_IMPLIED_EXTS_RULE_END 2616 }, 2617 }; 2618 2619 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = { 2620 .ext = CPU_CFG_OFFSET(ext_zvksc), 2621 .implied_multi_exts = { 2622 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc), 2623 2624 RISCV_IMPLIED_EXTS_RULE_END 2625 }, 2626 }; 2627 2628 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = { 2629 .ext = CPU_CFG_OFFSET(ext_zvksg), 2630 .implied_multi_exts = { 2631 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg), 2632 2633 RISCV_IMPLIED_EXTS_RULE_END 2634 }, 2635 }; 2636 2637 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = { 2638 &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED, 2639 &RVM_IMPLIED, &RVV_IMPLIED, NULL 2640 }; 2641 2642 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = { 2643 &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED, 2644 &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED, 2645 &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED, 2646 &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED, 2647 &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED, 2648 &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED, 2649 &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED, 2650 &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED, 2651 &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED, 2652 &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED, 2653 &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED, 2654 &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, 2655 NULL 2656 }; 2657 2658 static Property riscv_cpu_properties[] = { 2659 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 2660 2661 {.name = "pmu-mask", .info = &prop_pmu_mask}, 2662 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 2663 2664 {.name = "mmu", .info = &prop_mmu}, 2665 {.name = "pmp", .info = &prop_pmp}, 2666 2667 {.name = "priv_spec", .info = &prop_priv_spec}, 2668 {.name = "vext_spec", .info = &prop_vext_spec}, 2669 2670 {.name = "vlen", .info = &prop_vlen}, 2671 {.name = "elen", .info = &prop_elen}, 2672 2673 {.name = "cbom_blocksize", .info = &prop_cbom_blksize}, 2674 {.name = "cbop_blocksize", .info = &prop_cbop_blksize}, 2675 {.name = "cboz_blocksize", .info = &prop_cboz_blksize}, 2676 2677 {.name = "mvendorid", .info = &prop_mvendorid}, 2678 {.name = "mimpid", .info = &prop_mimpid}, 2679 {.name = "marchid", .info = &prop_marchid}, 2680 2681 #ifndef CONFIG_USER_ONLY 2682 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 2683 #endif 2684 2685 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 2686 2687 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 2688 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 2689 2690 /* 2691 * write_misa() is marked as experimental for now so mark 2692 * it with -x and default to 'false'. 2693 */ 2694 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 2695 DEFINE_PROP_END_OF_LIST(), 2696 }; 2697 2698 #if defined(TARGET_RISCV64) 2699 static void rva22u64_profile_cpu_init(Object *obj) 2700 { 2701 rv64i_bare_cpu_init(obj); 2702 2703 RVA22U64.enabled = true; 2704 } 2705 2706 static void rva22s64_profile_cpu_init(Object *obj) 2707 { 2708 rv64i_bare_cpu_init(obj); 2709 2710 RVA22S64.enabled = true; 2711 } 2712 #endif 2713 2714 static const gchar *riscv_gdb_arch_name(CPUState *cs) 2715 { 2716 RISCVCPU *cpu = RISCV_CPU(cs); 2717 CPURISCVState *env = &cpu->env; 2718 2719 switch (riscv_cpu_mxl(env)) { 2720 case MXL_RV32: 2721 return "riscv:rv32"; 2722 case MXL_RV64: 2723 case MXL_RV128: 2724 return "riscv:rv64"; 2725 default: 2726 g_assert_not_reached(); 2727 } 2728 } 2729 2730 #ifndef CONFIG_USER_ONLY 2731 static int64_t riscv_get_arch_id(CPUState *cs) 2732 { 2733 RISCVCPU *cpu = RISCV_CPU(cs); 2734 2735 return cpu->env.mhartid; 2736 } 2737 2738 #include "hw/core/sysemu-cpu-ops.h" 2739 2740 static const struct SysemuCPUOps riscv_sysemu_ops = { 2741 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 2742 .write_elf64_note = riscv_cpu_write_elf64_note, 2743 .write_elf32_note = riscv_cpu_write_elf32_note, 2744 .legacy_vmsd = &vmstate_riscv_cpu, 2745 }; 2746 #endif 2747 2748 static void riscv_cpu_common_class_init(ObjectClass *c, void *data) 2749 { 2750 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2751 CPUClass *cc = CPU_CLASS(c); 2752 DeviceClass *dc = DEVICE_CLASS(c); 2753 ResettableClass *rc = RESETTABLE_CLASS(c); 2754 2755 device_class_set_parent_realize(dc, riscv_cpu_realize, 2756 &mcc->parent_realize); 2757 2758 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 2759 &mcc->parent_phases); 2760 2761 cc->class_by_name = riscv_cpu_class_by_name; 2762 cc->has_work = riscv_cpu_has_work; 2763 cc->mmu_index = riscv_cpu_mmu_index; 2764 cc->dump_state = riscv_cpu_dump_state; 2765 cc->set_pc = riscv_cpu_set_pc; 2766 cc->get_pc = riscv_cpu_get_pc; 2767 cc->gdb_read_register = riscv_cpu_gdb_read_register; 2768 cc->gdb_write_register = riscv_cpu_gdb_write_register; 2769 cc->gdb_stop_before_watchpoint = true; 2770 cc->disas_set_info = riscv_cpu_disas_set_info; 2771 #ifndef CONFIG_USER_ONLY 2772 cc->sysemu_ops = &riscv_sysemu_ops; 2773 cc->get_arch_id = riscv_get_arch_id; 2774 #endif 2775 cc->gdb_arch_name = riscv_gdb_arch_name; 2776 2777 device_class_set_props(dc, riscv_cpu_properties); 2778 } 2779 2780 static void riscv_cpu_class_init(ObjectClass *c, void *data) 2781 { 2782 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2783 2784 mcc->misa_mxl_max = (uint32_t)(uintptr_t)data; 2785 riscv_cpu_validate_misa_mxl(mcc); 2786 } 2787 2788 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 2789 int max_str_len) 2790 { 2791 const RISCVIsaExtData *edata; 2792 char *old = *isa_str; 2793 char *new = *isa_str; 2794 2795 for (edata = isa_edata_arr; edata && edata->name; edata++) { 2796 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 2797 new = g_strconcat(old, "_", edata->name, NULL); 2798 g_free(old); 2799 old = new; 2800 } 2801 } 2802 2803 *isa_str = new; 2804 } 2805 2806 char *riscv_isa_string(RISCVCPU *cpu) 2807 { 2808 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 2809 int i; 2810 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 2811 char *isa_str = g_new(char, maxlen); 2812 int xlen = riscv_cpu_max_xlen(mcc); 2813 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen); 2814 2815 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2816 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2817 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 2818 } 2819 } 2820 *p = '\0'; 2821 if (!cpu->cfg.short_isa_string) { 2822 riscv_isa_string_ext(cpu, &isa_str, maxlen); 2823 } 2824 return isa_str; 2825 } 2826 2827 #ifndef CONFIG_USER_ONLY 2828 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count) 2829 { 2830 int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr); 2831 char **extensions = g_new(char *, maxlen); 2832 2833 for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2834 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2835 extensions[*count] = g_new(char, 2); 2836 snprintf(extensions[*count], 2, "%c", 2837 qemu_tolower(riscv_single_letter_exts[i])); 2838 (*count)++; 2839 } 2840 } 2841 2842 for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) { 2843 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 2844 extensions[*count] = g_strdup(edata->name); 2845 (*count)++; 2846 } 2847 } 2848 2849 return extensions; 2850 } 2851 2852 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename) 2853 { 2854 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 2855 const size_t maxlen = sizeof("rv128i"); 2856 g_autofree char *isa_base = g_new(char, maxlen); 2857 g_autofree char *riscv_isa; 2858 char **isa_extensions; 2859 int count = 0; 2860 int xlen = riscv_cpu_max_xlen(mcc); 2861 2862 riscv_isa = riscv_isa_string(cpu); 2863 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa); 2864 2865 snprintf(isa_base, maxlen, "rv%di", xlen); 2866 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base); 2867 2868 isa_extensions = riscv_isa_extensions_list(cpu, &count); 2869 qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions", 2870 isa_extensions, count); 2871 2872 for (int i = 0; i < count; i++) { 2873 g_free(isa_extensions[i]); 2874 } 2875 2876 g_free(isa_extensions); 2877 } 2878 #endif 2879 2880 #define DEFINE_CPU(type_name, misa_mxl_max, initfn) \ 2881 { \ 2882 .name = (type_name), \ 2883 .parent = TYPE_RISCV_CPU, \ 2884 .instance_init = (initfn), \ 2885 .class_init = riscv_cpu_class_init, \ 2886 .class_data = (void *)(misa_mxl_max) \ 2887 } 2888 2889 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \ 2890 { \ 2891 .name = (type_name), \ 2892 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 2893 .instance_init = (initfn), \ 2894 .class_init = riscv_cpu_class_init, \ 2895 .class_data = (void *)(misa_mxl_max) \ 2896 } 2897 2898 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \ 2899 { \ 2900 .name = (type_name), \ 2901 .parent = TYPE_RISCV_VENDOR_CPU, \ 2902 .instance_init = (initfn), \ 2903 .class_init = riscv_cpu_class_init, \ 2904 .class_data = (void *)(misa_mxl_max) \ 2905 } 2906 2907 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \ 2908 { \ 2909 .name = (type_name), \ 2910 .parent = TYPE_RISCV_BARE_CPU, \ 2911 .instance_init = (initfn), \ 2912 .class_init = riscv_cpu_class_init, \ 2913 .class_data = (void *)(misa_mxl_max) \ 2914 } 2915 2916 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \ 2917 { \ 2918 .name = (type_name), \ 2919 .parent = TYPE_RISCV_BARE_CPU, \ 2920 .instance_init = (initfn), \ 2921 .class_init = riscv_cpu_class_init, \ 2922 .class_data = (void *)(misa_mxl_max) \ 2923 } 2924 2925 static const TypeInfo riscv_cpu_type_infos[] = { 2926 { 2927 .name = TYPE_RISCV_CPU, 2928 .parent = TYPE_CPU, 2929 .instance_size = sizeof(RISCVCPU), 2930 .instance_align = __alignof(RISCVCPU), 2931 .instance_init = riscv_cpu_init, 2932 .instance_post_init = riscv_cpu_post_init, 2933 .abstract = true, 2934 .class_size = sizeof(RISCVCPUClass), 2935 .class_init = riscv_cpu_common_class_init, 2936 }, 2937 { 2938 .name = TYPE_RISCV_DYNAMIC_CPU, 2939 .parent = TYPE_RISCV_CPU, 2940 .abstract = true, 2941 }, 2942 { 2943 .name = TYPE_RISCV_VENDOR_CPU, 2944 .parent = TYPE_RISCV_CPU, 2945 .abstract = true, 2946 }, 2947 { 2948 .name = TYPE_RISCV_BARE_CPU, 2949 .parent = TYPE_RISCV_CPU, 2950 .instance_init = riscv_bare_cpu_init, 2951 .abstract = true, 2952 }, 2953 #if defined(TARGET_RISCV32) 2954 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV32, riscv_any_cpu_init), 2955 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), 2956 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), 2957 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), 2958 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), 2959 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init), 2960 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), 2961 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init), 2962 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init), 2963 #elif defined(TARGET_RISCV64) 2964 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV64, riscv_any_cpu_init), 2965 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), 2966 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), 2967 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), 2968 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), 2969 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init), 2970 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init), 2971 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init), 2972 #ifdef CONFIG_TCG 2973 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init), 2974 #endif /* CONFIG_TCG */ 2975 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init), 2976 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init), 2977 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init), 2978 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init), 2979 #endif /* TARGET_RISCV64 */ 2980 }; 2981 2982 DEFINE_TYPES(riscv_cpu_type_infos) 2983