1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "exec/exec-all.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/error-report.h" 31 #include "hw/qdev-properties.h" 32 #include "hw/core/qdev-prop-internal.h" 33 #include "migration/vmstate.h" 34 #include "fpu/softfloat-helpers.h" 35 #include "sysemu/device_tree.h" 36 #include "sysemu/kvm.h" 37 #include "sysemu/tcg.h" 38 #include "kvm/kvm_riscv.h" 39 #include "tcg/tcg-cpu.h" 40 #include "tcg/tcg.h" 41 42 /* RISC-V CPU definitions */ 43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH"; 44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 45 RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0}; 46 47 /* 48 * From vector_helper.c 49 * Note that vector data is stored in host-endian 64-bit chunks, 50 * so addressing bytes needs a host-endian fixup. 51 */ 52 #if HOST_BIG_ENDIAN 53 #define BYTE(x) ((x) ^ 7) 54 #else 55 #define BYTE(x) (x) 56 #endif 57 58 bool riscv_cpu_is_32bit(RISCVCPU *cpu) 59 { 60 return riscv_cpu_mxl(&cpu->env) == MXL_RV32; 61 } 62 63 /* Hash that stores general user set numeric options */ 64 static GHashTable *general_user_opts; 65 66 static void cpu_option_add_user_setting(const char *optname, uint32_t value) 67 { 68 g_hash_table_insert(general_user_opts, (gpointer)optname, 69 GUINT_TO_POINTER(value)); 70 } 71 72 bool riscv_cpu_option_set(const char *optname) 73 { 74 return g_hash_table_contains(general_user_opts, optname); 75 } 76 77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 78 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 79 80 /* 81 * Here are the ordering rules of extension naming defined by RISC-V 82 * specification : 83 * 1. All extensions should be separated from other multi-letter extensions 84 * by an underscore. 85 * 2. The first letter following the 'Z' conventionally indicates the most 86 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 87 * If multiple 'Z' extensions are named, they should be ordered first 88 * by category, then alphabetically within a category. 89 * 3. Standard supervisor-level extensions (starts with 'S') should be 90 * listed after standard unprivileged extensions. If multiple 91 * supervisor-level extensions are listed, they should be ordered 92 * alphabetically. 93 * 4. Non-standard extensions (starts with 'X') must be listed after all 94 * standard extensions. They must be separated from other multi-letter 95 * extensions by an underscore. 96 * 97 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 98 * instead. 99 */ 100 const RISCVIsaExtData isa_edata_arr[] = { 101 ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b), 102 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 103 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop), 104 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 105 ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, ext_always_enabled), 106 ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, ext_always_enabled), 107 ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, ext_always_enabled), 108 ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, ext_always_enabled), 109 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 110 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 111 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 112 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 113 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 114 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 115 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 116 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 117 ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, ext_always_enabled), 118 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), 119 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), 120 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), 121 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 122 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 123 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 124 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 125 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 126 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 127 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 128 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 129 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 130 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 131 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 132 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 133 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 134 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 135 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 136 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 137 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 138 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 139 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 140 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 141 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 142 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 143 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 144 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 145 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 146 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 147 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 148 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 149 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 150 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 151 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 152 ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso), 153 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 154 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 155 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 156 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 157 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 158 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 159 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 160 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 161 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 162 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 163 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 164 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 165 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 166 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 167 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 168 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 169 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 170 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 171 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 172 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 173 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 174 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 175 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 176 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 177 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 178 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 179 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 180 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 181 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 182 ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, ext_always_enabled), 183 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 184 ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, ext_always_enabled), 185 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 186 ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, ext_always_enabled), 187 ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, ext_always_enabled), 188 ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade), 189 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 190 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 191 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 192 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 193 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 194 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 195 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 196 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 197 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 198 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 199 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 200 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 201 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 202 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 203 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 204 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 205 206 DEFINE_PROP_END_OF_LIST(), 207 }; 208 209 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 210 { 211 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 212 213 return *ext_enabled; 214 } 215 216 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 217 { 218 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 219 220 *ext_enabled = en; 221 } 222 223 bool riscv_cpu_is_vendor(Object *cpu_obj) 224 { 225 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; 226 } 227 228 const char * const riscv_int_regnames[] = { 229 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 230 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 231 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 232 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 233 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 234 }; 235 236 const char * const riscv_int_regnamesh[] = { 237 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 238 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 239 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 240 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 241 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 242 "x30h/t5h", "x31h/t6h" 243 }; 244 245 const char * const riscv_fpr_regnames[] = { 246 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 247 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 248 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 249 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 250 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 251 "f30/ft10", "f31/ft11" 252 }; 253 254 const char * const riscv_rvv_regnames[] = { 255 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 256 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 257 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 258 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 259 "v28", "v29", "v30", "v31" 260 }; 261 262 static const char * const riscv_excp_names[] = { 263 "misaligned_fetch", 264 "fault_fetch", 265 "illegal_instruction", 266 "breakpoint", 267 "misaligned_load", 268 "fault_load", 269 "misaligned_store", 270 "fault_store", 271 "user_ecall", 272 "supervisor_ecall", 273 "hypervisor_ecall", 274 "machine_ecall", 275 "exec_page_fault", 276 "load_page_fault", 277 "reserved", 278 "store_page_fault", 279 "reserved", 280 "reserved", 281 "reserved", 282 "reserved", 283 "guest_exec_page_fault", 284 "guest_load_page_fault", 285 "reserved", 286 "guest_store_page_fault", 287 }; 288 289 static const char * const riscv_intr_names[] = { 290 "u_software", 291 "s_software", 292 "vs_software", 293 "m_software", 294 "u_timer", 295 "s_timer", 296 "vs_timer", 297 "m_timer", 298 "u_external", 299 "s_external", 300 "vs_external", 301 "m_external", 302 "reserved", 303 "reserved", 304 "reserved", 305 "reserved" 306 }; 307 308 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 309 { 310 if (async) { 311 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 312 riscv_intr_names[cause] : "(unknown)"; 313 } else { 314 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 315 riscv_excp_names[cause] : "(unknown)"; 316 } 317 } 318 319 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) 320 { 321 env->misa_ext_mask = env->misa_ext = ext; 322 } 323 324 int riscv_cpu_max_xlen(RISCVCPUClass *mcc) 325 { 326 return 16 << mcc->misa_mxl_max; 327 } 328 329 #ifndef CONFIG_USER_ONLY 330 static uint8_t satp_mode_from_str(const char *satp_mode_str) 331 { 332 if (!strncmp(satp_mode_str, "mbare", 5)) { 333 return VM_1_10_MBARE; 334 } 335 336 if (!strncmp(satp_mode_str, "sv32", 4)) { 337 return VM_1_10_SV32; 338 } 339 340 if (!strncmp(satp_mode_str, "sv39", 4)) { 341 return VM_1_10_SV39; 342 } 343 344 if (!strncmp(satp_mode_str, "sv48", 4)) { 345 return VM_1_10_SV48; 346 } 347 348 if (!strncmp(satp_mode_str, "sv57", 4)) { 349 return VM_1_10_SV57; 350 } 351 352 if (!strncmp(satp_mode_str, "sv64", 4)) { 353 return VM_1_10_SV64; 354 } 355 356 g_assert_not_reached(); 357 } 358 359 uint8_t satp_mode_max_from_map(uint32_t map) 360 { 361 /* 362 * 'map = 0' will make us return (31 - 32), which C will 363 * happily overflow to UINT_MAX. There's no good result to 364 * return if 'map = 0' (e.g. returning 0 will be ambiguous 365 * with the result for 'map = 1'). 366 * 367 * Assert out if map = 0. Callers will have to deal with 368 * it outside of this function. 369 */ 370 g_assert(map > 0); 371 372 /* map here has at least one bit set, so no problem with clz */ 373 return 31 - __builtin_clz(map); 374 } 375 376 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 377 { 378 if (is_32_bit) { 379 switch (satp_mode) { 380 case VM_1_10_SV32: 381 return "sv32"; 382 case VM_1_10_MBARE: 383 return "none"; 384 } 385 } else { 386 switch (satp_mode) { 387 case VM_1_10_SV64: 388 return "sv64"; 389 case VM_1_10_SV57: 390 return "sv57"; 391 case VM_1_10_SV48: 392 return "sv48"; 393 case VM_1_10_SV39: 394 return "sv39"; 395 case VM_1_10_MBARE: 396 return "none"; 397 } 398 } 399 400 g_assert_not_reached(); 401 } 402 403 static void set_satp_mode_max_supported(RISCVCPU *cpu, 404 uint8_t satp_mode) 405 { 406 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 407 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 408 409 for (int i = 0; i <= satp_mode; ++i) { 410 if (valid_vm[i]) { 411 cpu->cfg.satp_mode.supported |= (1 << i); 412 } 413 } 414 } 415 416 /* Set the satp mode to the max supported */ 417 static void set_satp_mode_default_map(RISCVCPU *cpu) 418 { 419 /* 420 * Bare CPUs do not default to the max available. 421 * Users must set a valid satp_mode in the command 422 * line. 423 */ 424 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { 425 warn_report("No satp mode set. Defaulting to 'bare'"); 426 cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE); 427 return; 428 } 429 430 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 431 } 432 #endif 433 434 static void riscv_any_cpu_init(Object *obj) 435 { 436 RISCVCPU *cpu = RISCV_CPU(obj); 437 CPURISCVState *env = &cpu->env; 438 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 439 440 #ifndef CONFIG_USER_ONLY 441 set_satp_mode_max_supported(RISCV_CPU(obj), 442 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 443 VM_1_10_SV32 : VM_1_10_SV57); 444 #endif 445 446 env->priv_ver = PRIV_VERSION_LATEST; 447 448 /* inherited from parent obj via riscv_cpu_init() */ 449 cpu->cfg.ext_zifencei = true; 450 cpu->cfg.ext_zicsr = true; 451 cpu->cfg.mmu = true; 452 cpu->cfg.pmp = true; 453 } 454 455 static void riscv_max_cpu_init(Object *obj) 456 { 457 RISCVCPU *cpu = RISCV_CPU(obj); 458 CPURISCVState *env = &cpu->env; 459 460 cpu->cfg.mmu = true; 461 cpu->cfg.pmp = true; 462 463 env->priv_ver = PRIV_VERSION_LATEST; 464 #ifndef CONFIG_USER_ONLY 465 #ifdef TARGET_RISCV32 466 set_satp_mode_max_supported(cpu, VM_1_10_SV32); 467 #else 468 set_satp_mode_max_supported(cpu, VM_1_10_SV57); 469 #endif 470 #endif 471 } 472 473 #if defined(TARGET_RISCV64) 474 static void rv64_base_cpu_init(Object *obj) 475 { 476 RISCVCPU *cpu = RISCV_CPU(obj); 477 CPURISCVState *env = &cpu->env; 478 479 cpu->cfg.mmu = true; 480 cpu->cfg.pmp = true; 481 482 /* Set latest version of privileged specification */ 483 env->priv_ver = PRIV_VERSION_LATEST; 484 #ifndef CONFIG_USER_ONLY 485 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 486 #endif 487 } 488 489 static void rv64_sifive_u_cpu_init(Object *obj) 490 { 491 RISCVCPU *cpu = RISCV_CPU(obj); 492 CPURISCVState *env = &cpu->env; 493 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 494 env->priv_ver = PRIV_VERSION_1_10_0; 495 #ifndef CONFIG_USER_ONLY 496 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 497 #endif 498 499 /* inherited from parent obj via riscv_cpu_init() */ 500 cpu->cfg.ext_zifencei = true; 501 cpu->cfg.ext_zicsr = true; 502 cpu->cfg.mmu = true; 503 cpu->cfg.pmp = true; 504 } 505 506 static void rv64_sifive_e_cpu_init(Object *obj) 507 { 508 CPURISCVState *env = &RISCV_CPU(obj)->env; 509 RISCVCPU *cpu = RISCV_CPU(obj); 510 511 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 512 env->priv_ver = PRIV_VERSION_1_10_0; 513 #ifndef CONFIG_USER_ONLY 514 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 515 #endif 516 517 /* inherited from parent obj via riscv_cpu_init() */ 518 cpu->cfg.ext_zifencei = true; 519 cpu->cfg.ext_zicsr = true; 520 cpu->cfg.pmp = true; 521 } 522 523 static void rv64_thead_c906_cpu_init(Object *obj) 524 { 525 CPURISCVState *env = &RISCV_CPU(obj)->env; 526 RISCVCPU *cpu = RISCV_CPU(obj); 527 528 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU); 529 env->priv_ver = PRIV_VERSION_1_11_0; 530 531 cpu->cfg.ext_zfa = true; 532 cpu->cfg.ext_zfh = true; 533 cpu->cfg.mmu = true; 534 cpu->cfg.ext_xtheadba = true; 535 cpu->cfg.ext_xtheadbb = true; 536 cpu->cfg.ext_xtheadbs = true; 537 cpu->cfg.ext_xtheadcmo = true; 538 cpu->cfg.ext_xtheadcondmov = true; 539 cpu->cfg.ext_xtheadfmemidx = true; 540 cpu->cfg.ext_xtheadmac = true; 541 cpu->cfg.ext_xtheadmemidx = true; 542 cpu->cfg.ext_xtheadmempair = true; 543 cpu->cfg.ext_xtheadsync = true; 544 545 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 546 #ifndef CONFIG_USER_ONLY 547 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 548 #endif 549 550 /* inherited from parent obj via riscv_cpu_init() */ 551 cpu->cfg.pmp = true; 552 } 553 554 static void rv64_veyron_v1_cpu_init(Object *obj) 555 { 556 CPURISCVState *env = &RISCV_CPU(obj)->env; 557 RISCVCPU *cpu = RISCV_CPU(obj); 558 559 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH); 560 env->priv_ver = PRIV_VERSION_1_12_0; 561 562 /* Enable ISA extensions */ 563 cpu->cfg.mmu = true; 564 cpu->cfg.ext_zifencei = true; 565 cpu->cfg.ext_zicsr = true; 566 cpu->cfg.pmp = true; 567 cpu->cfg.ext_zicbom = true; 568 cpu->cfg.cbom_blocksize = 64; 569 cpu->cfg.cboz_blocksize = 64; 570 cpu->cfg.ext_zicboz = true; 571 cpu->cfg.ext_smaia = true; 572 cpu->cfg.ext_ssaia = true; 573 cpu->cfg.ext_sscofpmf = true; 574 cpu->cfg.ext_sstc = true; 575 cpu->cfg.ext_svinval = true; 576 cpu->cfg.ext_svnapot = true; 577 cpu->cfg.ext_svpbmt = true; 578 cpu->cfg.ext_smstateen = true; 579 cpu->cfg.ext_zba = true; 580 cpu->cfg.ext_zbb = true; 581 cpu->cfg.ext_zbc = true; 582 cpu->cfg.ext_zbs = true; 583 cpu->cfg.ext_XVentanaCondOps = true; 584 585 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 586 cpu->cfg.marchid = VEYRON_V1_MARCHID; 587 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 588 589 #ifndef CONFIG_USER_ONLY 590 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 591 #endif 592 } 593 594 static void rv128_base_cpu_init(Object *obj) 595 { 596 RISCVCPU *cpu = RISCV_CPU(obj); 597 CPURISCVState *env = &cpu->env; 598 599 if (qemu_tcg_mttcg_enabled()) { 600 /* Missing 128-bit aligned atomics */ 601 error_report("128-bit RISC-V currently does not work with Multi " 602 "Threaded TCG. Please use: -accel tcg,thread=single"); 603 exit(EXIT_FAILURE); 604 } 605 606 cpu->cfg.mmu = true; 607 cpu->cfg.pmp = true; 608 609 /* Set latest version of privileged specification */ 610 env->priv_ver = PRIV_VERSION_LATEST; 611 #ifndef CONFIG_USER_ONLY 612 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 613 #endif 614 } 615 616 static void rv64i_bare_cpu_init(Object *obj) 617 { 618 CPURISCVState *env = &RISCV_CPU(obj)->env; 619 riscv_cpu_set_misa_ext(env, RVI); 620 } 621 622 static void rv64e_bare_cpu_init(Object *obj) 623 { 624 CPURISCVState *env = &RISCV_CPU(obj)->env; 625 riscv_cpu_set_misa_ext(env, RVE); 626 } 627 #else 628 static void rv32_base_cpu_init(Object *obj) 629 { 630 RISCVCPU *cpu = RISCV_CPU(obj); 631 CPURISCVState *env = &cpu->env; 632 633 cpu->cfg.mmu = true; 634 cpu->cfg.pmp = true; 635 636 /* Set latest version of privileged specification */ 637 env->priv_ver = PRIV_VERSION_LATEST; 638 #ifndef CONFIG_USER_ONLY 639 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 640 #endif 641 } 642 643 static void rv32_sifive_u_cpu_init(Object *obj) 644 { 645 RISCVCPU *cpu = RISCV_CPU(obj); 646 CPURISCVState *env = &cpu->env; 647 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 648 env->priv_ver = PRIV_VERSION_1_10_0; 649 #ifndef CONFIG_USER_ONLY 650 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 651 #endif 652 653 /* inherited from parent obj via riscv_cpu_init() */ 654 cpu->cfg.ext_zifencei = true; 655 cpu->cfg.ext_zicsr = true; 656 cpu->cfg.mmu = true; 657 cpu->cfg.pmp = true; 658 } 659 660 static void rv32_sifive_e_cpu_init(Object *obj) 661 { 662 CPURISCVState *env = &RISCV_CPU(obj)->env; 663 RISCVCPU *cpu = RISCV_CPU(obj); 664 665 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 666 env->priv_ver = PRIV_VERSION_1_10_0; 667 #ifndef CONFIG_USER_ONLY 668 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 669 #endif 670 671 /* inherited from parent obj via riscv_cpu_init() */ 672 cpu->cfg.ext_zifencei = true; 673 cpu->cfg.ext_zicsr = true; 674 cpu->cfg.pmp = true; 675 } 676 677 static void rv32_ibex_cpu_init(Object *obj) 678 { 679 CPURISCVState *env = &RISCV_CPU(obj)->env; 680 RISCVCPU *cpu = RISCV_CPU(obj); 681 682 riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU); 683 env->priv_ver = PRIV_VERSION_1_12_0; 684 #ifndef CONFIG_USER_ONLY 685 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 686 #endif 687 /* inherited from parent obj via riscv_cpu_init() */ 688 cpu->cfg.ext_zifencei = true; 689 cpu->cfg.ext_zicsr = true; 690 cpu->cfg.pmp = true; 691 cpu->cfg.ext_smepmp = true; 692 } 693 694 static void rv32_imafcu_nommu_cpu_init(Object *obj) 695 { 696 CPURISCVState *env = &RISCV_CPU(obj)->env; 697 RISCVCPU *cpu = RISCV_CPU(obj); 698 699 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU); 700 env->priv_ver = PRIV_VERSION_1_10_0; 701 #ifndef CONFIG_USER_ONLY 702 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 703 #endif 704 705 /* inherited from parent obj via riscv_cpu_init() */ 706 cpu->cfg.ext_zifencei = true; 707 cpu->cfg.ext_zicsr = true; 708 cpu->cfg.pmp = true; 709 } 710 711 static void rv32i_bare_cpu_init(Object *obj) 712 { 713 CPURISCVState *env = &RISCV_CPU(obj)->env; 714 riscv_cpu_set_misa_ext(env, RVI); 715 } 716 717 static void rv32e_bare_cpu_init(Object *obj) 718 { 719 CPURISCVState *env = &RISCV_CPU(obj)->env; 720 riscv_cpu_set_misa_ext(env, RVE); 721 } 722 #endif 723 724 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 725 { 726 ObjectClass *oc; 727 char *typename; 728 char **cpuname; 729 730 cpuname = g_strsplit(cpu_model, ",", 1); 731 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 732 oc = object_class_by_name(typename); 733 g_strfreev(cpuname); 734 g_free(typename); 735 736 return oc; 737 } 738 739 char *riscv_cpu_get_name(RISCVCPU *cpu) 740 { 741 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 742 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 743 744 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 745 746 return cpu_model_from_type(typename); 747 } 748 749 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 750 { 751 RISCVCPU *cpu = RISCV_CPU(cs); 752 CPURISCVState *env = &cpu->env; 753 int i, j; 754 uint8_t *p; 755 756 #if !defined(CONFIG_USER_ONLY) 757 if (riscv_has_ext(env, RVH)) { 758 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 759 } 760 #endif 761 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 762 #ifndef CONFIG_USER_ONLY 763 { 764 static const int dump_csrs[] = { 765 CSR_MHARTID, 766 CSR_MSTATUS, 767 CSR_MSTATUSH, 768 /* 769 * CSR_SSTATUS is intentionally omitted here as its value 770 * can be figured out by looking at CSR_MSTATUS 771 */ 772 CSR_HSTATUS, 773 CSR_VSSTATUS, 774 CSR_MIP, 775 CSR_MIE, 776 CSR_MIDELEG, 777 CSR_HIDELEG, 778 CSR_MEDELEG, 779 CSR_HEDELEG, 780 CSR_MTVEC, 781 CSR_STVEC, 782 CSR_VSTVEC, 783 CSR_MEPC, 784 CSR_SEPC, 785 CSR_VSEPC, 786 CSR_MCAUSE, 787 CSR_SCAUSE, 788 CSR_VSCAUSE, 789 CSR_MTVAL, 790 CSR_STVAL, 791 CSR_HTVAL, 792 CSR_MTVAL2, 793 CSR_MSCRATCH, 794 CSR_SSCRATCH, 795 CSR_SATP, 796 CSR_MMTE, 797 CSR_UPMBASE, 798 CSR_UPMMASK, 799 CSR_SPMBASE, 800 CSR_SPMMASK, 801 CSR_MPMBASE, 802 CSR_MPMMASK, 803 }; 804 805 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 806 int csrno = dump_csrs[i]; 807 target_ulong val = 0; 808 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 809 810 /* 811 * Rely on the smode, hmode, etc, predicates within csr.c 812 * to do the filtering of the registers that are present. 813 */ 814 if (res == RISCV_EXCP_NONE) { 815 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 816 csr_ops[csrno].name, val); 817 } 818 } 819 } 820 #endif 821 822 for (i = 0; i < 32; i++) { 823 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 824 riscv_int_regnames[i], env->gpr[i]); 825 if ((i & 3) == 3) { 826 qemu_fprintf(f, "\n"); 827 } 828 } 829 if (flags & CPU_DUMP_FPU) { 830 for (i = 0; i < 32; i++) { 831 qemu_fprintf(f, " %-8s %016" PRIx64, 832 riscv_fpr_regnames[i], env->fpr[i]); 833 if ((i & 3) == 3) { 834 qemu_fprintf(f, "\n"); 835 } 836 } 837 } 838 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 839 static const int dump_rvv_csrs[] = { 840 CSR_VSTART, 841 CSR_VXSAT, 842 CSR_VXRM, 843 CSR_VCSR, 844 CSR_VL, 845 CSR_VTYPE, 846 CSR_VLENB, 847 }; 848 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 849 int csrno = dump_rvv_csrs[i]; 850 target_ulong val = 0; 851 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 852 853 /* 854 * Rely on the smode, hmode, etc, predicates within csr.c 855 * to do the filtering of the registers that are present. 856 */ 857 if (res == RISCV_EXCP_NONE) { 858 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 859 csr_ops[csrno].name, val); 860 } 861 } 862 uint16_t vlenb = cpu->cfg.vlenb; 863 864 for (i = 0; i < 32; i++) { 865 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 866 p = (uint8_t *)env->vreg; 867 for (j = vlenb - 1 ; j >= 0; j--) { 868 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 869 } 870 qemu_fprintf(f, "\n"); 871 } 872 } 873 } 874 875 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 876 { 877 RISCVCPU *cpu = RISCV_CPU(cs); 878 CPURISCVState *env = &cpu->env; 879 880 if (env->xl == MXL_RV32) { 881 env->pc = (int32_t)value; 882 } else { 883 env->pc = value; 884 } 885 } 886 887 static vaddr riscv_cpu_get_pc(CPUState *cs) 888 { 889 RISCVCPU *cpu = RISCV_CPU(cs); 890 CPURISCVState *env = &cpu->env; 891 892 /* Match cpu_get_tb_cpu_state. */ 893 if (env->xl == MXL_RV32) { 894 return env->pc & UINT32_MAX; 895 } 896 return env->pc; 897 } 898 899 static bool riscv_cpu_has_work(CPUState *cs) 900 { 901 #ifndef CONFIG_USER_ONLY 902 RISCVCPU *cpu = RISCV_CPU(cs); 903 CPURISCVState *env = &cpu->env; 904 /* 905 * Definition of the WFI instruction requires it to ignore the privilege 906 * mode and delegation registers, but respect individual enables 907 */ 908 return riscv_cpu_all_pending(env) != 0 || 909 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 910 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 911 #else 912 return true; 913 #endif 914 } 915 916 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) 917 { 918 return riscv_env_mmu_index(cpu_env(cs), ifetch); 919 } 920 921 static void riscv_cpu_reset_hold(Object *obj) 922 { 923 #ifndef CONFIG_USER_ONLY 924 uint8_t iprio; 925 int i, irq, rdzero; 926 #endif 927 CPUState *cs = CPU(obj); 928 RISCVCPU *cpu = RISCV_CPU(cs); 929 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 930 CPURISCVState *env = &cpu->env; 931 932 if (mcc->parent_phases.hold) { 933 mcc->parent_phases.hold(obj); 934 } 935 #ifndef CONFIG_USER_ONLY 936 env->misa_mxl = mcc->misa_mxl_max; 937 env->priv = PRV_M; 938 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 939 if (env->misa_mxl > MXL_RV32) { 940 /* 941 * The reset status of SXL/UXL is undefined, but mstatus is WARL 942 * and we must ensure that the value after init is valid for read. 943 */ 944 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 945 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 946 if (riscv_has_ext(env, RVH)) { 947 env->vsstatus = set_field(env->vsstatus, 948 MSTATUS64_SXL, env->misa_mxl); 949 env->vsstatus = set_field(env->vsstatus, 950 MSTATUS64_UXL, env->misa_mxl); 951 env->mstatus_hs = set_field(env->mstatus_hs, 952 MSTATUS64_SXL, env->misa_mxl); 953 env->mstatus_hs = set_field(env->mstatus_hs, 954 MSTATUS64_UXL, env->misa_mxl); 955 } 956 } 957 env->mcause = 0; 958 env->miclaim = MIP_SGEIP; 959 env->pc = env->resetvec; 960 env->bins = 0; 961 env->two_stage_lookup = false; 962 963 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 964 (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ? 965 MENVCFG_ADUE : 0); 966 env->henvcfg = 0; 967 968 /* Initialized default priorities of local interrupts. */ 969 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 970 iprio = riscv_cpu_default_priority(i); 971 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 972 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 973 env->hviprio[i] = 0; 974 } 975 i = 0; 976 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 977 if (!rdzero) { 978 env->hviprio[irq] = env->miprio[irq]; 979 } 980 i++; 981 } 982 /* mmte is supposed to have pm.current hardwired to 1 */ 983 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 984 985 /* 986 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor 987 * extension is enabled. 988 */ 989 if (riscv_has_ext(env, RVH)) { 990 env->mideleg |= HS_MODE_INTERRUPTS; 991 } 992 993 /* 994 * Clear mseccfg and unlock all the PMP entries upon reset. 995 * This is allowed as per the priv and smepmp specifications 996 * and is needed to clear stale entries across reboots. 997 */ 998 if (riscv_cpu_cfg(env)->ext_smepmp) { 999 env->mseccfg = 0; 1000 } 1001 1002 pmp_unlock_entries(env); 1003 #endif 1004 env->xl = riscv_cpu_mxl(env); 1005 riscv_cpu_update_mask(env); 1006 cs->exception_index = RISCV_EXCP_NONE; 1007 env->load_res = -1; 1008 set_default_nan_mode(1, &env->fp_status); 1009 1010 #ifndef CONFIG_USER_ONLY 1011 if (cpu->cfg.debug) { 1012 riscv_trigger_reset_hold(env); 1013 } 1014 1015 if (kvm_enabled()) { 1016 kvm_riscv_reset_vcpu(cpu); 1017 } 1018 #endif 1019 } 1020 1021 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 1022 { 1023 RISCVCPU *cpu = RISCV_CPU(s); 1024 CPURISCVState *env = &cpu->env; 1025 info->target_info = &cpu->cfg; 1026 1027 switch (env->xl) { 1028 case MXL_RV32: 1029 info->print_insn = print_insn_riscv32; 1030 break; 1031 case MXL_RV64: 1032 info->print_insn = print_insn_riscv64; 1033 break; 1034 case MXL_RV128: 1035 info->print_insn = print_insn_riscv128; 1036 break; 1037 default: 1038 g_assert_not_reached(); 1039 } 1040 } 1041 1042 #ifndef CONFIG_USER_ONLY 1043 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1044 { 1045 bool rv32 = riscv_cpu_is_32bit(cpu); 1046 uint8_t satp_mode_map_max, satp_mode_supported_max; 1047 1048 /* The CPU wants the OS to decide which satp mode to use */ 1049 if (cpu->cfg.satp_mode.supported == 0) { 1050 return; 1051 } 1052 1053 satp_mode_supported_max = 1054 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1055 1056 if (cpu->cfg.satp_mode.map == 0) { 1057 if (cpu->cfg.satp_mode.init == 0) { 1058 /* If unset by the user, we fallback to the default satp mode. */ 1059 set_satp_mode_default_map(cpu); 1060 } else { 1061 /* 1062 * Find the lowest level that was disabled and then enable the 1063 * first valid level below which can be found in 1064 * valid_vm_1_10_32/64. 1065 */ 1066 for (int i = 1; i < 16; ++i) { 1067 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1068 (cpu->cfg.satp_mode.supported & (1 << i))) { 1069 for (int j = i - 1; j >= 0; --j) { 1070 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1071 cpu->cfg.satp_mode.map |= (1 << j); 1072 break; 1073 } 1074 } 1075 break; 1076 } 1077 } 1078 } 1079 } 1080 1081 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1082 1083 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1084 if (satp_mode_map_max > satp_mode_supported_max) { 1085 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1086 satp_mode_str(satp_mode_map_max, rv32), 1087 satp_mode_str(satp_mode_supported_max, rv32)); 1088 return; 1089 } 1090 1091 /* 1092 * Make sure the user did not ask for an invalid configuration as per 1093 * the specification. 1094 */ 1095 if (!rv32) { 1096 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1097 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1098 (cpu->cfg.satp_mode.init & (1 << i)) && 1099 (cpu->cfg.satp_mode.supported & (1 << i))) { 1100 error_setg(errp, "cannot disable %s satp mode if %s " 1101 "is enabled", satp_mode_str(i, false), 1102 satp_mode_str(satp_mode_map_max, false)); 1103 return; 1104 } 1105 } 1106 } 1107 1108 /* Finally expand the map so that all valid modes are set */ 1109 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1110 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1111 cpu->cfg.satp_mode.map |= (1 << i); 1112 } 1113 } 1114 } 1115 #endif 1116 1117 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1118 { 1119 Error *local_err = NULL; 1120 1121 #ifndef CONFIG_USER_ONLY 1122 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1123 if (local_err != NULL) { 1124 error_propagate(errp, local_err); 1125 return; 1126 } 1127 #endif 1128 1129 if (tcg_enabled()) { 1130 riscv_tcg_cpu_finalize_features(cpu, &local_err); 1131 if (local_err != NULL) { 1132 error_propagate(errp, local_err); 1133 return; 1134 } 1135 } else if (kvm_enabled()) { 1136 riscv_kvm_cpu_finalize_features(cpu, &local_err); 1137 if (local_err != NULL) { 1138 error_propagate(errp, local_err); 1139 return; 1140 } 1141 } 1142 } 1143 1144 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1145 { 1146 CPUState *cs = CPU(dev); 1147 RISCVCPU *cpu = RISCV_CPU(dev); 1148 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1149 Error *local_err = NULL; 1150 1151 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) { 1152 warn_report("The 'any' CPU is deprecated and will be " 1153 "removed in the future."); 1154 } 1155 1156 cpu_exec_realizefn(cs, &local_err); 1157 if (local_err != NULL) { 1158 error_propagate(errp, local_err); 1159 return; 1160 } 1161 1162 riscv_cpu_finalize_features(cpu, &local_err); 1163 if (local_err != NULL) { 1164 error_propagate(errp, local_err); 1165 return; 1166 } 1167 1168 riscv_cpu_register_gdb_regs_for_features(cs); 1169 1170 #ifndef CONFIG_USER_ONLY 1171 if (cpu->cfg.debug) { 1172 riscv_trigger_realize(&cpu->env); 1173 } 1174 #endif 1175 1176 qemu_init_vcpu(cs); 1177 cpu_reset(cs); 1178 1179 mcc->parent_realize(dev, errp); 1180 } 1181 1182 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 1183 { 1184 if (tcg_enabled()) { 1185 return riscv_cpu_tcg_compatible(cpu); 1186 } 1187 1188 return true; 1189 } 1190 1191 #ifndef CONFIG_USER_ONLY 1192 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1193 void *opaque, Error **errp) 1194 { 1195 RISCVSATPMap *satp_map = opaque; 1196 uint8_t satp = satp_mode_from_str(name); 1197 bool value; 1198 1199 value = satp_map->map & (1 << satp); 1200 1201 visit_type_bool(v, name, &value, errp); 1202 } 1203 1204 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1205 void *opaque, Error **errp) 1206 { 1207 RISCVSATPMap *satp_map = opaque; 1208 uint8_t satp = satp_mode_from_str(name); 1209 bool value; 1210 1211 if (!visit_type_bool(v, name, &value, errp)) { 1212 return; 1213 } 1214 1215 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1216 satp_map->init |= 1 << satp; 1217 } 1218 1219 void riscv_add_satp_mode_properties(Object *obj) 1220 { 1221 RISCVCPU *cpu = RISCV_CPU(obj); 1222 1223 if (cpu->env.misa_mxl == MXL_RV32) { 1224 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1225 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1226 } else { 1227 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1228 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1229 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1230 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1231 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1232 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1233 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1234 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1235 } 1236 } 1237 1238 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1239 { 1240 RISCVCPU *cpu = RISCV_CPU(opaque); 1241 CPURISCVState *env = &cpu->env; 1242 1243 if (irq < IRQ_LOCAL_MAX) { 1244 switch (irq) { 1245 case IRQ_U_SOFT: 1246 case IRQ_S_SOFT: 1247 case IRQ_VS_SOFT: 1248 case IRQ_M_SOFT: 1249 case IRQ_U_TIMER: 1250 case IRQ_S_TIMER: 1251 case IRQ_VS_TIMER: 1252 case IRQ_M_TIMER: 1253 case IRQ_U_EXT: 1254 case IRQ_VS_EXT: 1255 case IRQ_M_EXT: 1256 if (kvm_enabled()) { 1257 kvm_riscv_set_irq(cpu, irq, level); 1258 } else { 1259 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1260 } 1261 break; 1262 case IRQ_S_EXT: 1263 if (kvm_enabled()) { 1264 kvm_riscv_set_irq(cpu, irq, level); 1265 } else { 1266 env->external_seip = level; 1267 riscv_cpu_update_mip(env, 1 << irq, 1268 BOOL_TO_MASK(level | env->software_seip)); 1269 } 1270 break; 1271 default: 1272 g_assert_not_reached(); 1273 } 1274 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1275 /* Require H-extension for handling guest local interrupts */ 1276 if (!riscv_has_ext(env, RVH)) { 1277 g_assert_not_reached(); 1278 } 1279 1280 /* Compute bit position in HGEIP CSR */ 1281 irq = irq - IRQ_LOCAL_MAX + 1; 1282 if (env->geilen < irq) { 1283 g_assert_not_reached(); 1284 } 1285 1286 /* Update HGEIP CSR */ 1287 env->hgeip &= ~((target_ulong)1 << irq); 1288 if (level) { 1289 env->hgeip |= (target_ulong)1 << irq; 1290 } 1291 1292 /* Update mip.SGEIP bit */ 1293 riscv_cpu_update_mip(env, MIP_SGEIP, 1294 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1295 } else { 1296 g_assert_not_reached(); 1297 } 1298 } 1299 #endif /* CONFIG_USER_ONLY */ 1300 1301 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1302 { 1303 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1304 } 1305 1306 static void riscv_cpu_post_init(Object *obj) 1307 { 1308 accel_cpu_instance_init(CPU(obj)); 1309 } 1310 1311 static void riscv_cpu_init(Object *obj) 1312 { 1313 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1314 RISCVCPU *cpu = RISCV_CPU(obj); 1315 CPURISCVState *env = &cpu->env; 1316 1317 env->misa_mxl = mcc->misa_mxl_max; 1318 1319 #ifndef CONFIG_USER_ONLY 1320 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1321 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1322 #endif /* CONFIG_USER_ONLY */ 1323 1324 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal); 1325 1326 /* 1327 * The timer and performance counters extensions were supported 1328 * in QEMU before they were added as discrete extensions in the 1329 * ISA. To keep compatibility we'll always default them to 'true' 1330 * for all CPUs. Each accelerator will decide what to do when 1331 * users disable them. 1332 */ 1333 RISCV_CPU(obj)->cfg.ext_zicntr = true; 1334 RISCV_CPU(obj)->cfg.ext_zihpm = true; 1335 1336 /* Default values for non-bool cpu properties */ 1337 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); 1338 cpu->cfg.vlenb = 128 >> 3; 1339 cpu->cfg.elen = 64; 1340 cpu->cfg.cbom_blocksize = 64; 1341 cpu->cfg.cbop_blocksize = 64; 1342 cpu->cfg.cboz_blocksize = 64; 1343 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1344 } 1345 1346 static void riscv_bare_cpu_init(Object *obj) 1347 { 1348 RISCVCPU *cpu = RISCV_CPU(obj); 1349 1350 /* 1351 * Bare CPUs do not inherit the timer and performance 1352 * counters from the parent class (see riscv_cpu_init() 1353 * for info on why the parent enables them). 1354 * 1355 * Users have to explicitly enable these counters for 1356 * bare CPUs. 1357 */ 1358 cpu->cfg.ext_zicntr = false; 1359 cpu->cfg.ext_zihpm = false; 1360 1361 /* Set to QEMU's first supported priv version */ 1362 cpu->env.priv_ver = PRIV_VERSION_1_10_0; 1363 1364 /* 1365 * Support all available satp_mode settings. The default 1366 * value will be set to MBARE if the user doesn't set 1367 * satp_mode manually (see set_satp_mode_default()). 1368 */ 1369 #ifndef CONFIG_USER_ONLY 1370 set_satp_mode_max_supported(cpu, VM_1_10_SV64); 1371 #endif 1372 } 1373 1374 typedef struct misa_ext_info { 1375 const char *name; 1376 const char *description; 1377 } MISAExtInfo; 1378 1379 #define MISA_INFO_IDX(_bit) \ 1380 __builtin_ctz(_bit) 1381 1382 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1383 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1384 1385 static const MISAExtInfo misa_ext_info_arr[] = { 1386 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1387 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1388 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1389 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1390 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1391 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1392 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1393 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1394 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1395 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1396 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), 1397 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1398 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1399 MISA_EXT_INFO(RVB, "x-b", "Bit manipulation (Zba_Zbb_Zbs)") 1400 }; 1401 1402 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) 1403 { 1404 CPUClass *cc = CPU_CLASS(mcc); 1405 1406 /* Validate that MISA_MXL is set properly. */ 1407 switch (mcc->misa_mxl_max) { 1408 #ifdef TARGET_RISCV64 1409 case MXL_RV64: 1410 case MXL_RV128: 1411 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1412 break; 1413 #endif 1414 case MXL_RV32: 1415 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1416 break; 1417 default: 1418 g_assert_not_reached(); 1419 } 1420 } 1421 1422 static int riscv_validate_misa_info_idx(uint32_t bit) 1423 { 1424 int idx; 1425 1426 /* 1427 * Our lowest valid input (RVA) is 1 and 1428 * __builtin_ctz() is UB with zero. 1429 */ 1430 g_assert(bit != 0); 1431 idx = MISA_INFO_IDX(bit); 1432 1433 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1434 return idx; 1435 } 1436 1437 const char *riscv_get_misa_ext_name(uint32_t bit) 1438 { 1439 int idx = riscv_validate_misa_info_idx(bit); 1440 const char *val = misa_ext_info_arr[idx].name; 1441 1442 g_assert(val != NULL); 1443 return val; 1444 } 1445 1446 const char *riscv_get_misa_ext_description(uint32_t bit) 1447 { 1448 int idx = riscv_validate_misa_info_idx(bit); 1449 const char *val = misa_ext_info_arr[idx].description; 1450 1451 g_assert(val != NULL); 1452 return val; 1453 } 1454 1455 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1456 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1457 .enabled = _defval} 1458 1459 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1460 /* Defaults for standard extensions */ 1461 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1462 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1463 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1464 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1465 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1466 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), 1467 MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), 1468 MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), 1469 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1470 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1471 MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false), 1472 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1473 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1474 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1475 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1476 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1477 MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false), 1478 MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false), 1479 MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false), 1480 MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false), 1481 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1482 1483 MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false), 1484 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1485 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1486 MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false), 1487 MULTI_EXT_CFG_BOOL("svade", ext_svade, false), 1488 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1489 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1490 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1491 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1492 1493 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1494 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1495 1496 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1497 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1498 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1499 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1500 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1501 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1502 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1503 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1504 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1505 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1506 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1507 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1508 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1509 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1510 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1511 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1512 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1513 MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false), 1514 1515 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1516 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1517 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1518 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1519 1520 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1521 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true), 1522 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1523 1524 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1525 1526 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1527 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1528 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1529 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1530 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1531 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1532 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1533 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1534 1535 /* Vector cryptography extensions */ 1536 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1537 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1538 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkg, false), 1539 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1540 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1541 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1542 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1543 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1544 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1545 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1546 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1547 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1548 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1549 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1550 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1551 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1552 1553 DEFINE_PROP_END_OF_LIST(), 1554 }; 1555 1556 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1557 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1558 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1559 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1560 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1561 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1562 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1563 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1564 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1565 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1566 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1567 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1568 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1569 1570 DEFINE_PROP_END_OF_LIST(), 1571 }; 1572 1573 /* These are experimental so mark with 'x-' */ 1574 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1575 DEFINE_PROP_END_OF_LIST(), 1576 }; 1577 1578 #define ALWAYS_ENABLED_FEATURE(_name) \ 1579 {.name = _name, \ 1580 .offset = CPU_CFG_OFFSET(ext_always_enabled), \ 1581 .enabled = true} 1582 1583 /* 1584 * 'Named features' is the name we give to extensions that we 1585 * don't want to expose to users. They are either immutable 1586 * (always enabled/disable) or they'll vary depending on 1587 * the resulting CPU state. They have riscv,isa strings 1588 * and priv_ver like regular extensions. 1589 */ 1590 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { 1591 MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true), 1592 1593 /* 1594 * cache-related extensions that are always enabled 1595 * in TCG since QEMU RISC-V does not have a cache 1596 * model. 1597 */ 1598 ALWAYS_ENABLED_FEATURE("za64rs"), 1599 ALWAYS_ENABLED_FEATURE("ziccif"), 1600 ALWAYS_ENABLED_FEATURE("ziccrse"), 1601 ALWAYS_ENABLED_FEATURE("ziccamoa"), 1602 ALWAYS_ENABLED_FEATURE("zicclsm"), 1603 ALWAYS_ENABLED_FEATURE("ssccptr"), 1604 1605 /* Other named features that TCG always implements */ 1606 ALWAYS_ENABLED_FEATURE("sstvecd"), 1607 ALWAYS_ENABLED_FEATURE("sstvala"), 1608 ALWAYS_ENABLED_FEATURE("sscounterenw"), 1609 1610 DEFINE_PROP_END_OF_LIST(), 1611 }; 1612 1613 /* Deprecated entries marked for future removal */ 1614 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1615 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1616 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1617 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1618 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1619 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1620 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1621 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1622 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1623 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1624 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1625 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1626 1627 DEFINE_PROP_END_OF_LIST(), 1628 }; 1629 1630 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname, 1631 Error **errp) 1632 { 1633 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1634 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'", 1635 cpuname, propname); 1636 } 1637 1638 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1639 void *opaque, Error **errp) 1640 { 1641 RISCVCPU *cpu = RISCV_CPU(obj); 1642 uint8_t pmu_num, curr_pmu_num; 1643 uint32_t pmu_mask; 1644 1645 visit_type_uint8(v, name, &pmu_num, errp); 1646 1647 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask); 1648 1649 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) { 1650 cpu_set_prop_err(cpu, name, errp); 1651 error_append_hint(errp, "Current '%s' val: %u\n", 1652 name, curr_pmu_num); 1653 return; 1654 } 1655 1656 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1657 error_setg(errp, "Number of counters exceeds maximum available"); 1658 return; 1659 } 1660 1661 if (pmu_num == 0) { 1662 pmu_mask = 0; 1663 } else { 1664 pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1665 } 1666 1667 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1668 cpu->cfg.pmu_mask = pmu_mask; 1669 cpu_option_add_user_setting("pmu-mask", pmu_mask); 1670 } 1671 1672 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1673 void *opaque, Error **errp) 1674 { 1675 RISCVCPU *cpu = RISCV_CPU(obj); 1676 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1677 1678 visit_type_uint8(v, name, &pmu_num, errp); 1679 } 1680 1681 static const PropertyInfo prop_pmu_num = { 1682 .name = "pmu-num", 1683 .get = prop_pmu_num_get, 1684 .set = prop_pmu_num_set, 1685 }; 1686 1687 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name, 1688 void *opaque, Error **errp) 1689 { 1690 RISCVCPU *cpu = RISCV_CPU(obj); 1691 uint32_t value; 1692 uint8_t pmu_num; 1693 1694 visit_type_uint32(v, name, &value, errp); 1695 1696 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) { 1697 cpu_set_prop_err(cpu, name, errp); 1698 error_append_hint(errp, "Current '%s' val: %x\n", 1699 name, cpu->cfg.pmu_mask); 1700 return; 1701 } 1702 1703 pmu_num = ctpop32(value); 1704 1705 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1706 error_setg(errp, "Number of counters exceeds maximum available"); 1707 return; 1708 } 1709 1710 cpu_option_add_user_setting(name, value); 1711 cpu->cfg.pmu_mask = value; 1712 } 1713 1714 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name, 1715 void *opaque, Error **errp) 1716 { 1717 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask; 1718 1719 visit_type_uint8(v, name, &pmu_mask, errp); 1720 } 1721 1722 static const PropertyInfo prop_pmu_mask = { 1723 .name = "pmu-mask", 1724 .get = prop_pmu_mask_get, 1725 .set = prop_pmu_mask_set, 1726 }; 1727 1728 static void prop_mmu_set(Object *obj, Visitor *v, const char *name, 1729 void *opaque, Error **errp) 1730 { 1731 RISCVCPU *cpu = RISCV_CPU(obj); 1732 bool value; 1733 1734 visit_type_bool(v, name, &value, errp); 1735 1736 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) { 1737 cpu_set_prop_err(cpu, "mmu", errp); 1738 return; 1739 } 1740 1741 cpu_option_add_user_setting(name, value); 1742 cpu->cfg.mmu = value; 1743 } 1744 1745 static void prop_mmu_get(Object *obj, Visitor *v, const char *name, 1746 void *opaque, Error **errp) 1747 { 1748 bool value = RISCV_CPU(obj)->cfg.mmu; 1749 1750 visit_type_bool(v, name, &value, errp); 1751 } 1752 1753 static const PropertyInfo prop_mmu = { 1754 .name = "mmu", 1755 .get = prop_mmu_get, 1756 .set = prop_mmu_set, 1757 }; 1758 1759 static void prop_pmp_set(Object *obj, Visitor *v, const char *name, 1760 void *opaque, Error **errp) 1761 { 1762 RISCVCPU *cpu = RISCV_CPU(obj); 1763 bool value; 1764 1765 visit_type_bool(v, name, &value, errp); 1766 1767 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) { 1768 cpu_set_prop_err(cpu, name, errp); 1769 return; 1770 } 1771 1772 cpu_option_add_user_setting(name, value); 1773 cpu->cfg.pmp = value; 1774 } 1775 1776 static void prop_pmp_get(Object *obj, Visitor *v, const char *name, 1777 void *opaque, Error **errp) 1778 { 1779 bool value = RISCV_CPU(obj)->cfg.pmp; 1780 1781 visit_type_bool(v, name, &value, errp); 1782 } 1783 1784 static const PropertyInfo prop_pmp = { 1785 .name = "pmp", 1786 .get = prop_pmp_get, 1787 .set = prop_pmp_set, 1788 }; 1789 1790 static int priv_spec_from_str(const char *priv_spec_str) 1791 { 1792 int priv_version = -1; 1793 1794 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) { 1795 priv_version = PRIV_VERSION_1_12_0; 1796 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) { 1797 priv_version = PRIV_VERSION_1_11_0; 1798 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) { 1799 priv_version = PRIV_VERSION_1_10_0; 1800 } 1801 1802 return priv_version; 1803 } 1804 1805 static const char *priv_spec_to_str(int priv_version) 1806 { 1807 switch (priv_version) { 1808 case PRIV_VERSION_1_10_0: 1809 return PRIV_VER_1_10_0_STR; 1810 case PRIV_VERSION_1_11_0: 1811 return PRIV_VER_1_11_0_STR; 1812 case PRIV_VERSION_1_12_0: 1813 return PRIV_VER_1_12_0_STR; 1814 default: 1815 return NULL; 1816 } 1817 } 1818 1819 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name, 1820 void *opaque, Error **errp) 1821 { 1822 RISCVCPU *cpu = RISCV_CPU(obj); 1823 g_autofree char *value = NULL; 1824 int priv_version = -1; 1825 1826 visit_type_str(v, name, &value, errp); 1827 1828 priv_version = priv_spec_from_str(value); 1829 if (priv_version < 0) { 1830 error_setg(errp, "Unsupported privilege spec version '%s'", value); 1831 return; 1832 } 1833 1834 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) { 1835 cpu_set_prop_err(cpu, name, errp); 1836 error_append_hint(errp, "Current '%s' val: %s\n", name, 1837 object_property_get_str(obj, name, NULL)); 1838 return; 1839 } 1840 1841 cpu_option_add_user_setting(name, priv_version); 1842 cpu->env.priv_ver = priv_version; 1843 } 1844 1845 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name, 1846 void *opaque, Error **errp) 1847 { 1848 RISCVCPU *cpu = RISCV_CPU(obj); 1849 const char *value = priv_spec_to_str(cpu->env.priv_ver); 1850 1851 visit_type_str(v, name, (char **)&value, errp); 1852 } 1853 1854 static const PropertyInfo prop_priv_spec = { 1855 .name = "priv_spec", 1856 .get = prop_priv_spec_get, 1857 .set = prop_priv_spec_set, 1858 }; 1859 1860 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name, 1861 void *opaque, Error **errp) 1862 { 1863 RISCVCPU *cpu = RISCV_CPU(obj); 1864 g_autofree char *value = NULL; 1865 1866 visit_type_str(v, name, &value, errp); 1867 1868 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) { 1869 error_setg(errp, "Unsupported vector spec version '%s'", value); 1870 return; 1871 } 1872 1873 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0); 1874 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1875 } 1876 1877 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name, 1878 void *opaque, Error **errp) 1879 { 1880 const char *value = VEXT_VER_1_00_0_STR; 1881 1882 visit_type_str(v, name, (char **)&value, errp); 1883 } 1884 1885 static const PropertyInfo prop_vext_spec = { 1886 .name = "vext_spec", 1887 .get = prop_vext_spec_get, 1888 .set = prop_vext_spec_set, 1889 }; 1890 1891 static void prop_vlen_set(Object *obj, Visitor *v, const char *name, 1892 void *opaque, Error **errp) 1893 { 1894 RISCVCPU *cpu = RISCV_CPU(obj); 1895 uint16_t value; 1896 1897 if (!visit_type_uint16(v, name, &value, errp)) { 1898 return; 1899 } 1900 1901 if (!is_power_of_2(value)) { 1902 error_setg(errp, "Vector extension VLEN must be power of 2"); 1903 return; 1904 } 1905 1906 if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) { 1907 cpu_set_prop_err(cpu, name, errp); 1908 error_append_hint(errp, "Current '%s' val: %u\n", 1909 name, cpu->cfg.vlenb << 3); 1910 return; 1911 } 1912 1913 cpu_option_add_user_setting(name, value); 1914 cpu->cfg.vlenb = value >> 3; 1915 } 1916 1917 static void prop_vlen_get(Object *obj, Visitor *v, const char *name, 1918 void *opaque, Error **errp) 1919 { 1920 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3; 1921 1922 visit_type_uint16(v, name, &value, errp); 1923 } 1924 1925 static const PropertyInfo prop_vlen = { 1926 .name = "vlen", 1927 .get = prop_vlen_get, 1928 .set = prop_vlen_set, 1929 }; 1930 1931 static void prop_elen_set(Object *obj, Visitor *v, const char *name, 1932 void *opaque, Error **errp) 1933 { 1934 RISCVCPU *cpu = RISCV_CPU(obj); 1935 uint16_t value; 1936 1937 if (!visit_type_uint16(v, name, &value, errp)) { 1938 return; 1939 } 1940 1941 if (!is_power_of_2(value)) { 1942 error_setg(errp, "Vector extension ELEN must be power of 2"); 1943 return; 1944 } 1945 1946 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) { 1947 cpu_set_prop_err(cpu, name, errp); 1948 error_append_hint(errp, "Current '%s' val: %u\n", 1949 name, cpu->cfg.elen); 1950 return; 1951 } 1952 1953 cpu_option_add_user_setting(name, value); 1954 cpu->cfg.elen = value; 1955 } 1956 1957 static void prop_elen_get(Object *obj, Visitor *v, const char *name, 1958 void *opaque, Error **errp) 1959 { 1960 uint16_t value = RISCV_CPU(obj)->cfg.elen; 1961 1962 visit_type_uint16(v, name, &value, errp); 1963 } 1964 1965 static const PropertyInfo prop_elen = { 1966 .name = "elen", 1967 .get = prop_elen_get, 1968 .set = prop_elen_set, 1969 }; 1970 1971 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name, 1972 void *opaque, Error **errp) 1973 { 1974 RISCVCPU *cpu = RISCV_CPU(obj); 1975 uint16_t value; 1976 1977 if (!visit_type_uint16(v, name, &value, errp)) { 1978 return; 1979 } 1980 1981 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) { 1982 cpu_set_prop_err(cpu, name, errp); 1983 error_append_hint(errp, "Current '%s' val: %u\n", 1984 name, cpu->cfg.cbom_blocksize); 1985 return; 1986 } 1987 1988 cpu_option_add_user_setting(name, value); 1989 cpu->cfg.cbom_blocksize = value; 1990 } 1991 1992 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name, 1993 void *opaque, Error **errp) 1994 { 1995 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize; 1996 1997 visit_type_uint16(v, name, &value, errp); 1998 } 1999 2000 static const PropertyInfo prop_cbom_blksize = { 2001 .name = "cbom_blocksize", 2002 .get = prop_cbom_blksize_get, 2003 .set = prop_cbom_blksize_set, 2004 }; 2005 2006 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name, 2007 void *opaque, Error **errp) 2008 { 2009 RISCVCPU *cpu = RISCV_CPU(obj); 2010 uint16_t value; 2011 2012 if (!visit_type_uint16(v, name, &value, errp)) { 2013 return; 2014 } 2015 2016 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) { 2017 cpu_set_prop_err(cpu, name, errp); 2018 error_append_hint(errp, "Current '%s' val: %u\n", 2019 name, cpu->cfg.cbop_blocksize); 2020 return; 2021 } 2022 2023 cpu_option_add_user_setting(name, value); 2024 cpu->cfg.cbop_blocksize = value; 2025 } 2026 2027 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name, 2028 void *opaque, Error **errp) 2029 { 2030 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize; 2031 2032 visit_type_uint16(v, name, &value, errp); 2033 } 2034 2035 static const PropertyInfo prop_cbop_blksize = { 2036 .name = "cbop_blocksize", 2037 .get = prop_cbop_blksize_get, 2038 .set = prop_cbop_blksize_set, 2039 }; 2040 2041 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name, 2042 void *opaque, Error **errp) 2043 { 2044 RISCVCPU *cpu = RISCV_CPU(obj); 2045 uint16_t value; 2046 2047 if (!visit_type_uint16(v, name, &value, errp)) { 2048 return; 2049 } 2050 2051 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) { 2052 cpu_set_prop_err(cpu, name, errp); 2053 error_append_hint(errp, "Current '%s' val: %u\n", 2054 name, cpu->cfg.cboz_blocksize); 2055 return; 2056 } 2057 2058 cpu_option_add_user_setting(name, value); 2059 cpu->cfg.cboz_blocksize = value; 2060 } 2061 2062 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name, 2063 void *opaque, Error **errp) 2064 { 2065 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize; 2066 2067 visit_type_uint16(v, name, &value, errp); 2068 } 2069 2070 static const PropertyInfo prop_cboz_blksize = { 2071 .name = "cboz_blocksize", 2072 .get = prop_cboz_blksize_get, 2073 .set = prop_cboz_blksize_set, 2074 }; 2075 2076 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name, 2077 void *opaque, Error **errp) 2078 { 2079 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2080 RISCVCPU *cpu = RISCV_CPU(obj); 2081 uint32_t prev_val = cpu->cfg.mvendorid; 2082 uint32_t value; 2083 2084 if (!visit_type_uint32(v, name, &value, errp)) { 2085 return; 2086 } 2087 2088 if (!dynamic_cpu && prev_val != value) { 2089 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2090 object_get_typename(obj), prev_val); 2091 return; 2092 } 2093 2094 cpu->cfg.mvendorid = value; 2095 } 2096 2097 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name, 2098 void *opaque, Error **errp) 2099 { 2100 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; 2101 2102 visit_type_uint32(v, name, &value, errp); 2103 } 2104 2105 static const PropertyInfo prop_mvendorid = { 2106 .name = "mvendorid", 2107 .get = prop_mvendorid_get, 2108 .set = prop_mvendorid_set, 2109 }; 2110 2111 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name, 2112 void *opaque, Error **errp) 2113 { 2114 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2115 RISCVCPU *cpu = RISCV_CPU(obj); 2116 uint64_t prev_val = cpu->cfg.mimpid; 2117 uint64_t value; 2118 2119 if (!visit_type_uint64(v, name, &value, errp)) { 2120 return; 2121 } 2122 2123 if (!dynamic_cpu && prev_val != value) { 2124 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2125 object_get_typename(obj), prev_val); 2126 return; 2127 } 2128 2129 cpu->cfg.mimpid = value; 2130 } 2131 2132 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name, 2133 void *opaque, Error **errp) 2134 { 2135 uint64_t value = RISCV_CPU(obj)->cfg.mimpid; 2136 2137 visit_type_uint64(v, name, &value, errp); 2138 } 2139 2140 static const PropertyInfo prop_mimpid = { 2141 .name = "mimpid", 2142 .get = prop_mimpid_get, 2143 .set = prop_mimpid_set, 2144 }; 2145 2146 static void prop_marchid_set(Object *obj, Visitor *v, const char *name, 2147 void *opaque, Error **errp) 2148 { 2149 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2150 RISCVCPU *cpu = RISCV_CPU(obj); 2151 uint64_t prev_val = cpu->cfg.marchid; 2152 uint64_t value, invalid_val; 2153 uint32_t mxlen = 0; 2154 2155 if (!visit_type_uint64(v, name, &value, errp)) { 2156 return; 2157 } 2158 2159 if (!dynamic_cpu && prev_val != value) { 2160 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2161 object_get_typename(obj), prev_val); 2162 return; 2163 } 2164 2165 switch (riscv_cpu_mxl(&cpu->env)) { 2166 case MXL_RV32: 2167 mxlen = 32; 2168 break; 2169 case MXL_RV64: 2170 case MXL_RV128: 2171 mxlen = 64; 2172 break; 2173 default: 2174 g_assert_not_reached(); 2175 } 2176 2177 invalid_val = 1LL << (mxlen - 1); 2178 2179 if (value == invalid_val) { 2180 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2181 "and the remaining bits zero", mxlen); 2182 return; 2183 } 2184 2185 cpu->cfg.marchid = value; 2186 } 2187 2188 static void prop_marchid_get(Object *obj, Visitor *v, const char *name, 2189 void *opaque, Error **errp) 2190 { 2191 uint64_t value = RISCV_CPU(obj)->cfg.marchid; 2192 2193 visit_type_uint64(v, name, &value, errp); 2194 } 2195 2196 static const PropertyInfo prop_marchid = { 2197 .name = "marchid", 2198 .get = prop_marchid_get, 2199 .set = prop_marchid_set, 2200 }; 2201 2202 /* 2203 * RVA22U64 defines some 'named features' that are cache 2204 * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa 2205 * and Zicclsm. They are always implemented in TCG and 2206 * doesn't need to be manually enabled by the profile. 2207 */ 2208 static RISCVCPUProfile RVA22U64 = { 2209 .parent = NULL, 2210 .name = "rva22u64", 2211 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU, 2212 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2213 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2214 .ext_offsets = { 2215 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause), 2216 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb), 2217 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin), 2218 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr), 2219 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom), 2220 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz), 2221 2222 /* mandatory named features for this profile */ 2223 CPU_CFG_OFFSET(ext_zic64b), 2224 2225 RISCV_PROFILE_EXT_LIST_END 2226 } 2227 }; 2228 2229 /* 2230 * As with RVA22U64, RVA22S64 also defines 'named features'. 2231 * 2232 * Cache related features that we consider enabled since we don't 2233 * implement cache: Ssccptr 2234 * 2235 * Other named features that we already implement: Sstvecd, Sstvala, 2236 * Sscounterenw 2237 * 2238 * The remaining features/extensions comes from RVA22U64. 2239 */ 2240 static RISCVCPUProfile RVA22S64 = { 2241 .parent = &RVA22U64, 2242 .name = "rva22s64", 2243 .misa_ext = RVS, 2244 .priv_spec = PRIV_VERSION_1_12_0, 2245 .satp_mode = VM_1_10_SV39, 2246 .ext_offsets = { 2247 /* rva22s64 exts */ 2248 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt), 2249 CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade), 2250 2251 RISCV_PROFILE_EXT_LIST_END 2252 } 2253 }; 2254 2255 RISCVCPUProfile *riscv_profiles[] = { 2256 &RVA22U64, 2257 &RVA22S64, 2258 NULL, 2259 }; 2260 2261 static Property riscv_cpu_properties[] = { 2262 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 2263 2264 {.name = "pmu-mask", .info = &prop_pmu_mask}, 2265 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 2266 2267 {.name = "mmu", .info = &prop_mmu}, 2268 {.name = "pmp", .info = &prop_pmp}, 2269 2270 {.name = "priv_spec", .info = &prop_priv_spec}, 2271 {.name = "vext_spec", .info = &prop_vext_spec}, 2272 2273 {.name = "vlen", .info = &prop_vlen}, 2274 {.name = "elen", .info = &prop_elen}, 2275 2276 {.name = "cbom_blocksize", .info = &prop_cbom_blksize}, 2277 {.name = "cbop_blocksize", .info = &prop_cbop_blksize}, 2278 {.name = "cboz_blocksize", .info = &prop_cboz_blksize}, 2279 2280 {.name = "mvendorid", .info = &prop_mvendorid}, 2281 {.name = "mimpid", .info = &prop_mimpid}, 2282 {.name = "marchid", .info = &prop_marchid}, 2283 2284 #ifndef CONFIG_USER_ONLY 2285 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 2286 #endif 2287 2288 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 2289 2290 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 2291 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 2292 2293 /* 2294 * write_misa() is marked as experimental for now so mark 2295 * it with -x and default to 'false'. 2296 */ 2297 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 2298 DEFINE_PROP_END_OF_LIST(), 2299 }; 2300 2301 #if defined(TARGET_RISCV64) 2302 static void rva22u64_profile_cpu_init(Object *obj) 2303 { 2304 rv64i_bare_cpu_init(obj); 2305 2306 RVA22U64.enabled = true; 2307 } 2308 2309 static void rva22s64_profile_cpu_init(Object *obj) 2310 { 2311 rv64i_bare_cpu_init(obj); 2312 2313 RVA22S64.enabled = true; 2314 } 2315 #endif 2316 2317 static const gchar *riscv_gdb_arch_name(CPUState *cs) 2318 { 2319 RISCVCPU *cpu = RISCV_CPU(cs); 2320 CPURISCVState *env = &cpu->env; 2321 2322 switch (riscv_cpu_mxl(env)) { 2323 case MXL_RV32: 2324 return "riscv:rv32"; 2325 case MXL_RV64: 2326 case MXL_RV128: 2327 return "riscv:rv64"; 2328 default: 2329 g_assert_not_reached(); 2330 } 2331 } 2332 2333 #ifndef CONFIG_USER_ONLY 2334 static int64_t riscv_get_arch_id(CPUState *cs) 2335 { 2336 RISCVCPU *cpu = RISCV_CPU(cs); 2337 2338 return cpu->env.mhartid; 2339 } 2340 2341 #include "hw/core/sysemu-cpu-ops.h" 2342 2343 static const struct SysemuCPUOps riscv_sysemu_ops = { 2344 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 2345 .write_elf64_note = riscv_cpu_write_elf64_note, 2346 .write_elf32_note = riscv_cpu_write_elf32_note, 2347 .legacy_vmsd = &vmstate_riscv_cpu, 2348 }; 2349 #endif 2350 2351 static void riscv_cpu_common_class_init(ObjectClass *c, void *data) 2352 { 2353 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2354 CPUClass *cc = CPU_CLASS(c); 2355 DeviceClass *dc = DEVICE_CLASS(c); 2356 ResettableClass *rc = RESETTABLE_CLASS(c); 2357 2358 device_class_set_parent_realize(dc, riscv_cpu_realize, 2359 &mcc->parent_realize); 2360 2361 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 2362 &mcc->parent_phases); 2363 2364 cc->class_by_name = riscv_cpu_class_by_name; 2365 cc->has_work = riscv_cpu_has_work; 2366 cc->mmu_index = riscv_cpu_mmu_index; 2367 cc->dump_state = riscv_cpu_dump_state; 2368 cc->set_pc = riscv_cpu_set_pc; 2369 cc->get_pc = riscv_cpu_get_pc; 2370 cc->gdb_read_register = riscv_cpu_gdb_read_register; 2371 cc->gdb_write_register = riscv_cpu_gdb_write_register; 2372 cc->gdb_stop_before_watchpoint = true; 2373 cc->disas_set_info = riscv_cpu_disas_set_info; 2374 #ifndef CONFIG_USER_ONLY 2375 cc->sysemu_ops = &riscv_sysemu_ops; 2376 cc->get_arch_id = riscv_get_arch_id; 2377 #endif 2378 cc->gdb_arch_name = riscv_gdb_arch_name; 2379 2380 device_class_set_props(dc, riscv_cpu_properties); 2381 } 2382 2383 static void riscv_cpu_class_init(ObjectClass *c, void *data) 2384 { 2385 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2386 2387 mcc->misa_mxl_max = (uint32_t)(uintptr_t)data; 2388 riscv_cpu_validate_misa_mxl(mcc); 2389 } 2390 2391 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 2392 int max_str_len) 2393 { 2394 const RISCVIsaExtData *edata; 2395 char *old = *isa_str; 2396 char *new = *isa_str; 2397 2398 for (edata = isa_edata_arr; edata && edata->name; edata++) { 2399 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 2400 new = g_strconcat(old, "_", edata->name, NULL); 2401 g_free(old); 2402 old = new; 2403 } 2404 } 2405 2406 *isa_str = new; 2407 } 2408 2409 char *riscv_isa_string(RISCVCPU *cpu) 2410 { 2411 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 2412 int i; 2413 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 2414 char *isa_str = g_new(char, maxlen); 2415 int xlen = riscv_cpu_max_xlen(mcc); 2416 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen); 2417 2418 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2419 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2420 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 2421 } 2422 } 2423 *p = '\0'; 2424 if (!cpu->cfg.short_isa_string) { 2425 riscv_isa_string_ext(cpu, &isa_str, maxlen); 2426 } 2427 return isa_str; 2428 } 2429 2430 #ifndef CONFIG_USER_ONLY 2431 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count) 2432 { 2433 int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr); 2434 char **extensions = g_new(char *, maxlen); 2435 2436 for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2437 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2438 extensions[*count] = g_new(char, 2); 2439 snprintf(extensions[*count], 2, "%c", 2440 qemu_tolower(riscv_single_letter_exts[i])); 2441 (*count)++; 2442 } 2443 } 2444 2445 for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) { 2446 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 2447 extensions[*count] = g_strdup(edata->name); 2448 (*count)++; 2449 } 2450 } 2451 2452 return extensions; 2453 } 2454 2455 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename) 2456 { 2457 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 2458 const size_t maxlen = sizeof("rv128i"); 2459 g_autofree char *isa_base = g_new(char, maxlen); 2460 g_autofree char *riscv_isa; 2461 char **isa_extensions; 2462 int count = 0; 2463 int xlen = riscv_cpu_max_xlen(mcc); 2464 2465 riscv_isa = riscv_isa_string(cpu); 2466 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa); 2467 2468 snprintf(isa_base, maxlen, "rv%di", xlen); 2469 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base); 2470 2471 isa_extensions = riscv_isa_extensions_list(cpu, &count); 2472 qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions", 2473 isa_extensions, count); 2474 2475 for (int i = 0; i < count; i++) { 2476 g_free(isa_extensions[i]); 2477 } 2478 2479 g_free(isa_extensions); 2480 } 2481 #endif 2482 2483 #define DEFINE_CPU(type_name, misa_mxl_max, initfn) \ 2484 { \ 2485 .name = (type_name), \ 2486 .parent = TYPE_RISCV_CPU, \ 2487 .instance_init = (initfn), \ 2488 .class_init = riscv_cpu_class_init, \ 2489 .class_data = (void *)(misa_mxl_max) \ 2490 } 2491 2492 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \ 2493 { \ 2494 .name = (type_name), \ 2495 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 2496 .instance_init = (initfn), \ 2497 .class_init = riscv_cpu_class_init, \ 2498 .class_data = (void *)(misa_mxl_max) \ 2499 } 2500 2501 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \ 2502 { \ 2503 .name = (type_name), \ 2504 .parent = TYPE_RISCV_VENDOR_CPU, \ 2505 .instance_init = (initfn), \ 2506 .class_init = riscv_cpu_class_init, \ 2507 .class_data = (void *)(misa_mxl_max) \ 2508 } 2509 2510 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \ 2511 { \ 2512 .name = (type_name), \ 2513 .parent = TYPE_RISCV_BARE_CPU, \ 2514 .instance_init = (initfn), \ 2515 .class_init = riscv_cpu_class_init, \ 2516 .class_data = (void *)(misa_mxl_max) \ 2517 } 2518 2519 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \ 2520 { \ 2521 .name = (type_name), \ 2522 .parent = TYPE_RISCV_BARE_CPU, \ 2523 .instance_init = (initfn), \ 2524 .class_init = riscv_cpu_class_init, \ 2525 .class_data = (void *)(misa_mxl_max) \ 2526 } 2527 2528 static const TypeInfo riscv_cpu_type_infos[] = { 2529 { 2530 .name = TYPE_RISCV_CPU, 2531 .parent = TYPE_CPU, 2532 .instance_size = sizeof(RISCVCPU), 2533 .instance_align = __alignof(RISCVCPU), 2534 .instance_init = riscv_cpu_init, 2535 .instance_post_init = riscv_cpu_post_init, 2536 .abstract = true, 2537 .class_size = sizeof(RISCVCPUClass), 2538 .class_init = riscv_cpu_common_class_init, 2539 }, 2540 { 2541 .name = TYPE_RISCV_DYNAMIC_CPU, 2542 .parent = TYPE_RISCV_CPU, 2543 .abstract = true, 2544 }, 2545 { 2546 .name = TYPE_RISCV_VENDOR_CPU, 2547 .parent = TYPE_RISCV_CPU, 2548 .abstract = true, 2549 }, 2550 { 2551 .name = TYPE_RISCV_BARE_CPU, 2552 .parent = TYPE_RISCV_CPU, 2553 .instance_init = riscv_bare_cpu_init, 2554 .abstract = true, 2555 }, 2556 #if defined(TARGET_RISCV32) 2557 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV32, riscv_any_cpu_init), 2558 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), 2559 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), 2560 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), 2561 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), 2562 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init), 2563 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), 2564 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init), 2565 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init), 2566 #elif defined(TARGET_RISCV64) 2567 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV64, riscv_any_cpu_init), 2568 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), 2569 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), 2570 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), 2571 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), 2572 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init), 2573 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init), 2574 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init), 2575 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init), 2576 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init), 2577 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init), 2578 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init), 2579 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init), 2580 #endif 2581 }; 2582 2583 DEFINE_TYPES(riscv_cpu_type_infos) 2584