1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "exec/exec-all.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/error-report.h" 31 #include "hw/qdev-properties.h" 32 #include "hw/core/qdev-prop-internal.h" 33 #include "migration/vmstate.h" 34 #include "fpu/softfloat-helpers.h" 35 #include "sysemu/device_tree.h" 36 #include "sysemu/kvm.h" 37 #include "sysemu/tcg.h" 38 #include "kvm/kvm_riscv.h" 39 #include "tcg/tcg-cpu.h" 40 #include "tcg/tcg.h" 41 42 /* RISC-V CPU definitions */ 43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH"; 44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 45 RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0}; 46 47 /* 48 * From vector_helper.c 49 * Note that vector data is stored in host-endian 64-bit chunks, 50 * so addressing bytes needs a host-endian fixup. 51 */ 52 #if HOST_BIG_ENDIAN 53 #define BYTE(x) ((x) ^ 7) 54 #else 55 #define BYTE(x) (x) 56 #endif 57 58 bool riscv_cpu_is_32bit(RISCVCPU *cpu) 59 { 60 return riscv_cpu_mxl(&cpu->env) == MXL_RV32; 61 } 62 63 /* Hash that stores general user set numeric options */ 64 static GHashTable *general_user_opts; 65 66 static void cpu_option_add_user_setting(const char *optname, uint32_t value) 67 { 68 g_hash_table_insert(general_user_opts, (gpointer)optname, 69 GUINT_TO_POINTER(value)); 70 } 71 72 bool riscv_cpu_option_set(const char *optname) 73 { 74 return g_hash_table_contains(general_user_opts, optname); 75 } 76 77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 78 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 79 80 /* 81 * Here are the ordering rules of extension naming defined by RISC-V 82 * specification : 83 * 1. All extensions should be separated from other multi-letter extensions 84 * by an underscore. 85 * 2. The first letter following the 'Z' conventionally indicates the most 86 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 87 * If multiple 'Z' extensions are named, they should be ordered first 88 * by category, then alphabetically within a category. 89 * 3. Standard supervisor-level extensions (starts with 'S') should be 90 * listed after standard unprivileged extensions. If multiple 91 * supervisor-level extensions are listed, they should be ordered 92 * alphabetically. 93 * 4. Non-standard extensions (starts with 'X') must be listed after all 94 * standard extensions. They must be separated from other multi-letter 95 * extensions by an underscore. 96 * 97 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 98 * instead. 99 */ 100 const RISCVIsaExtData isa_edata_arr[] = { 101 ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b), 102 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 103 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop), 104 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 105 ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, ext_always_enabled), 106 ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, ext_always_enabled), 107 ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, ext_always_enabled), 108 ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, ext_always_enabled), 109 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 110 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 111 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 112 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 113 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 114 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 115 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 116 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 117 ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, ext_always_enabled), 118 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), 119 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), 120 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), 121 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 122 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 123 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 124 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 125 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 126 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 127 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 128 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 129 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 130 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 131 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 132 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 133 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 134 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 135 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 136 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 137 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 138 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 139 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 140 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 141 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 142 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 143 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 144 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 145 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 146 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 147 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 148 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 149 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 150 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 151 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 152 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 153 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 154 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 155 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 156 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 157 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 158 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 159 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 160 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 161 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 162 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 163 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 164 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 165 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 166 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 167 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 168 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 169 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 170 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 171 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 172 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 173 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 174 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 175 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 176 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 177 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 178 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 179 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 180 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 181 ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, ext_always_enabled), 182 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 183 ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, ext_always_enabled), 184 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 185 ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, ext_always_enabled), 186 ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, ext_always_enabled), 187 ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade), 188 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 189 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 190 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 191 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 192 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 193 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 194 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 195 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 196 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 197 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 198 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 199 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 200 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 201 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 202 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 203 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 204 205 DEFINE_PROP_END_OF_LIST(), 206 }; 207 208 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 209 { 210 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 211 212 return *ext_enabled; 213 } 214 215 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 216 { 217 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 218 219 *ext_enabled = en; 220 } 221 222 bool riscv_cpu_is_vendor(Object *cpu_obj) 223 { 224 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; 225 } 226 227 const char * const riscv_int_regnames[] = { 228 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 229 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 230 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 231 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 232 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 233 }; 234 235 const char * const riscv_int_regnamesh[] = { 236 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 237 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 238 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 239 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 240 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 241 "x30h/t5h", "x31h/t6h" 242 }; 243 244 const char * const riscv_fpr_regnames[] = { 245 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 246 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 247 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 248 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 249 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 250 "f30/ft10", "f31/ft11" 251 }; 252 253 const char * const riscv_rvv_regnames[] = { 254 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 255 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 256 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 257 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 258 "v28", "v29", "v30", "v31" 259 }; 260 261 static const char * const riscv_excp_names[] = { 262 "misaligned_fetch", 263 "fault_fetch", 264 "illegal_instruction", 265 "breakpoint", 266 "misaligned_load", 267 "fault_load", 268 "misaligned_store", 269 "fault_store", 270 "user_ecall", 271 "supervisor_ecall", 272 "hypervisor_ecall", 273 "machine_ecall", 274 "exec_page_fault", 275 "load_page_fault", 276 "reserved", 277 "store_page_fault", 278 "reserved", 279 "reserved", 280 "reserved", 281 "reserved", 282 "guest_exec_page_fault", 283 "guest_load_page_fault", 284 "reserved", 285 "guest_store_page_fault", 286 }; 287 288 static const char * const riscv_intr_names[] = { 289 "u_software", 290 "s_software", 291 "vs_software", 292 "m_software", 293 "u_timer", 294 "s_timer", 295 "vs_timer", 296 "m_timer", 297 "u_external", 298 "s_external", 299 "vs_external", 300 "m_external", 301 "reserved", 302 "reserved", 303 "reserved", 304 "reserved" 305 }; 306 307 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 308 { 309 if (async) { 310 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 311 riscv_intr_names[cause] : "(unknown)"; 312 } else { 313 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 314 riscv_excp_names[cause] : "(unknown)"; 315 } 316 } 317 318 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) 319 { 320 env->misa_ext_mask = env->misa_ext = ext; 321 } 322 323 int riscv_cpu_max_xlen(RISCVCPUClass *mcc) 324 { 325 return 16 << mcc->misa_mxl_max; 326 } 327 328 #ifndef CONFIG_USER_ONLY 329 static uint8_t satp_mode_from_str(const char *satp_mode_str) 330 { 331 if (!strncmp(satp_mode_str, "mbare", 5)) { 332 return VM_1_10_MBARE; 333 } 334 335 if (!strncmp(satp_mode_str, "sv32", 4)) { 336 return VM_1_10_SV32; 337 } 338 339 if (!strncmp(satp_mode_str, "sv39", 4)) { 340 return VM_1_10_SV39; 341 } 342 343 if (!strncmp(satp_mode_str, "sv48", 4)) { 344 return VM_1_10_SV48; 345 } 346 347 if (!strncmp(satp_mode_str, "sv57", 4)) { 348 return VM_1_10_SV57; 349 } 350 351 if (!strncmp(satp_mode_str, "sv64", 4)) { 352 return VM_1_10_SV64; 353 } 354 355 g_assert_not_reached(); 356 } 357 358 uint8_t satp_mode_max_from_map(uint32_t map) 359 { 360 /* 361 * 'map = 0' will make us return (31 - 32), which C will 362 * happily overflow to UINT_MAX. There's no good result to 363 * return if 'map = 0' (e.g. returning 0 will be ambiguous 364 * with the result for 'map = 1'). 365 * 366 * Assert out if map = 0. Callers will have to deal with 367 * it outside of this function. 368 */ 369 g_assert(map > 0); 370 371 /* map here has at least one bit set, so no problem with clz */ 372 return 31 - __builtin_clz(map); 373 } 374 375 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 376 { 377 if (is_32_bit) { 378 switch (satp_mode) { 379 case VM_1_10_SV32: 380 return "sv32"; 381 case VM_1_10_MBARE: 382 return "none"; 383 } 384 } else { 385 switch (satp_mode) { 386 case VM_1_10_SV64: 387 return "sv64"; 388 case VM_1_10_SV57: 389 return "sv57"; 390 case VM_1_10_SV48: 391 return "sv48"; 392 case VM_1_10_SV39: 393 return "sv39"; 394 case VM_1_10_MBARE: 395 return "none"; 396 } 397 } 398 399 g_assert_not_reached(); 400 } 401 402 static void set_satp_mode_max_supported(RISCVCPU *cpu, 403 uint8_t satp_mode) 404 { 405 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 406 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 407 408 for (int i = 0; i <= satp_mode; ++i) { 409 if (valid_vm[i]) { 410 cpu->cfg.satp_mode.supported |= (1 << i); 411 } 412 } 413 } 414 415 /* Set the satp mode to the max supported */ 416 static void set_satp_mode_default_map(RISCVCPU *cpu) 417 { 418 /* 419 * Bare CPUs do not default to the max available. 420 * Users must set a valid satp_mode in the command 421 * line. 422 */ 423 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { 424 warn_report("No satp mode set. Defaulting to 'bare'"); 425 cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE); 426 return; 427 } 428 429 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 430 } 431 #endif 432 433 static void riscv_any_cpu_init(Object *obj) 434 { 435 RISCVCPU *cpu = RISCV_CPU(obj); 436 CPURISCVState *env = &cpu->env; 437 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 438 439 #ifndef CONFIG_USER_ONLY 440 set_satp_mode_max_supported(RISCV_CPU(obj), 441 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 442 VM_1_10_SV32 : VM_1_10_SV57); 443 #endif 444 445 env->priv_ver = PRIV_VERSION_LATEST; 446 447 /* inherited from parent obj via riscv_cpu_init() */ 448 cpu->cfg.ext_zifencei = true; 449 cpu->cfg.ext_zicsr = true; 450 cpu->cfg.mmu = true; 451 cpu->cfg.pmp = true; 452 } 453 454 static void riscv_max_cpu_init(Object *obj) 455 { 456 RISCVCPU *cpu = RISCV_CPU(obj); 457 CPURISCVState *env = &cpu->env; 458 459 cpu->cfg.mmu = true; 460 cpu->cfg.pmp = true; 461 462 env->priv_ver = PRIV_VERSION_LATEST; 463 #ifndef CONFIG_USER_ONLY 464 #ifdef TARGET_RISCV32 465 set_satp_mode_max_supported(cpu, VM_1_10_SV32); 466 #else 467 set_satp_mode_max_supported(cpu, VM_1_10_SV57); 468 #endif 469 #endif 470 } 471 472 #if defined(TARGET_RISCV64) 473 static void rv64_base_cpu_init(Object *obj) 474 { 475 RISCVCPU *cpu = RISCV_CPU(obj); 476 CPURISCVState *env = &cpu->env; 477 478 cpu->cfg.mmu = true; 479 cpu->cfg.pmp = true; 480 481 /* Set latest version of privileged specification */ 482 env->priv_ver = PRIV_VERSION_LATEST; 483 #ifndef CONFIG_USER_ONLY 484 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 485 #endif 486 } 487 488 static void rv64_sifive_u_cpu_init(Object *obj) 489 { 490 RISCVCPU *cpu = RISCV_CPU(obj); 491 CPURISCVState *env = &cpu->env; 492 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 493 env->priv_ver = PRIV_VERSION_1_10_0; 494 #ifndef CONFIG_USER_ONLY 495 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 496 #endif 497 498 /* inherited from parent obj via riscv_cpu_init() */ 499 cpu->cfg.ext_zifencei = true; 500 cpu->cfg.ext_zicsr = true; 501 cpu->cfg.mmu = true; 502 cpu->cfg.pmp = true; 503 } 504 505 static void rv64_sifive_e_cpu_init(Object *obj) 506 { 507 CPURISCVState *env = &RISCV_CPU(obj)->env; 508 RISCVCPU *cpu = RISCV_CPU(obj); 509 510 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 511 env->priv_ver = PRIV_VERSION_1_10_0; 512 #ifndef CONFIG_USER_ONLY 513 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 514 #endif 515 516 /* inherited from parent obj via riscv_cpu_init() */ 517 cpu->cfg.ext_zifencei = true; 518 cpu->cfg.ext_zicsr = true; 519 cpu->cfg.pmp = true; 520 } 521 522 static void rv64_thead_c906_cpu_init(Object *obj) 523 { 524 CPURISCVState *env = &RISCV_CPU(obj)->env; 525 RISCVCPU *cpu = RISCV_CPU(obj); 526 527 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU); 528 env->priv_ver = PRIV_VERSION_1_11_0; 529 530 cpu->cfg.ext_zfa = true; 531 cpu->cfg.ext_zfh = true; 532 cpu->cfg.mmu = true; 533 cpu->cfg.ext_xtheadba = true; 534 cpu->cfg.ext_xtheadbb = true; 535 cpu->cfg.ext_xtheadbs = true; 536 cpu->cfg.ext_xtheadcmo = true; 537 cpu->cfg.ext_xtheadcondmov = true; 538 cpu->cfg.ext_xtheadfmemidx = true; 539 cpu->cfg.ext_xtheadmac = true; 540 cpu->cfg.ext_xtheadmemidx = true; 541 cpu->cfg.ext_xtheadmempair = true; 542 cpu->cfg.ext_xtheadsync = true; 543 544 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 545 #ifndef CONFIG_USER_ONLY 546 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 547 #endif 548 549 /* inherited from parent obj via riscv_cpu_init() */ 550 cpu->cfg.pmp = true; 551 } 552 553 static void rv64_veyron_v1_cpu_init(Object *obj) 554 { 555 CPURISCVState *env = &RISCV_CPU(obj)->env; 556 RISCVCPU *cpu = RISCV_CPU(obj); 557 558 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH); 559 env->priv_ver = PRIV_VERSION_1_12_0; 560 561 /* Enable ISA extensions */ 562 cpu->cfg.mmu = true; 563 cpu->cfg.ext_zifencei = true; 564 cpu->cfg.ext_zicsr = true; 565 cpu->cfg.pmp = true; 566 cpu->cfg.ext_zicbom = true; 567 cpu->cfg.cbom_blocksize = 64; 568 cpu->cfg.cboz_blocksize = 64; 569 cpu->cfg.ext_zicboz = true; 570 cpu->cfg.ext_smaia = true; 571 cpu->cfg.ext_ssaia = true; 572 cpu->cfg.ext_sscofpmf = true; 573 cpu->cfg.ext_sstc = true; 574 cpu->cfg.ext_svinval = true; 575 cpu->cfg.ext_svnapot = true; 576 cpu->cfg.ext_svpbmt = true; 577 cpu->cfg.ext_smstateen = true; 578 cpu->cfg.ext_zba = true; 579 cpu->cfg.ext_zbb = true; 580 cpu->cfg.ext_zbc = true; 581 cpu->cfg.ext_zbs = true; 582 cpu->cfg.ext_XVentanaCondOps = true; 583 584 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 585 cpu->cfg.marchid = VEYRON_V1_MARCHID; 586 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 587 588 #ifndef CONFIG_USER_ONLY 589 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 590 #endif 591 } 592 593 static void rv128_base_cpu_init(Object *obj) 594 { 595 RISCVCPU *cpu = RISCV_CPU(obj); 596 CPURISCVState *env = &cpu->env; 597 598 if (qemu_tcg_mttcg_enabled()) { 599 /* Missing 128-bit aligned atomics */ 600 error_report("128-bit RISC-V currently does not work with Multi " 601 "Threaded TCG. Please use: -accel tcg,thread=single"); 602 exit(EXIT_FAILURE); 603 } 604 605 cpu->cfg.mmu = true; 606 cpu->cfg.pmp = true; 607 608 /* Set latest version of privileged specification */ 609 env->priv_ver = PRIV_VERSION_LATEST; 610 #ifndef CONFIG_USER_ONLY 611 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 612 #endif 613 } 614 615 static void rv64i_bare_cpu_init(Object *obj) 616 { 617 CPURISCVState *env = &RISCV_CPU(obj)->env; 618 riscv_cpu_set_misa_ext(env, RVI); 619 } 620 621 static void rv64e_bare_cpu_init(Object *obj) 622 { 623 CPURISCVState *env = &RISCV_CPU(obj)->env; 624 riscv_cpu_set_misa_ext(env, RVE); 625 } 626 #else 627 static void rv32_base_cpu_init(Object *obj) 628 { 629 RISCVCPU *cpu = RISCV_CPU(obj); 630 CPURISCVState *env = &cpu->env; 631 632 cpu->cfg.mmu = true; 633 cpu->cfg.pmp = true; 634 635 /* Set latest version of privileged specification */ 636 env->priv_ver = PRIV_VERSION_LATEST; 637 #ifndef CONFIG_USER_ONLY 638 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 639 #endif 640 } 641 642 static void rv32_sifive_u_cpu_init(Object *obj) 643 { 644 RISCVCPU *cpu = RISCV_CPU(obj); 645 CPURISCVState *env = &cpu->env; 646 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 647 env->priv_ver = PRIV_VERSION_1_10_0; 648 #ifndef CONFIG_USER_ONLY 649 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 650 #endif 651 652 /* inherited from parent obj via riscv_cpu_init() */ 653 cpu->cfg.ext_zifencei = true; 654 cpu->cfg.ext_zicsr = true; 655 cpu->cfg.mmu = true; 656 cpu->cfg.pmp = true; 657 } 658 659 static void rv32_sifive_e_cpu_init(Object *obj) 660 { 661 CPURISCVState *env = &RISCV_CPU(obj)->env; 662 RISCVCPU *cpu = RISCV_CPU(obj); 663 664 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 665 env->priv_ver = PRIV_VERSION_1_10_0; 666 #ifndef CONFIG_USER_ONLY 667 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 668 #endif 669 670 /* inherited from parent obj via riscv_cpu_init() */ 671 cpu->cfg.ext_zifencei = true; 672 cpu->cfg.ext_zicsr = true; 673 cpu->cfg.pmp = true; 674 } 675 676 static void rv32_ibex_cpu_init(Object *obj) 677 { 678 CPURISCVState *env = &RISCV_CPU(obj)->env; 679 RISCVCPU *cpu = RISCV_CPU(obj); 680 681 riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU); 682 env->priv_ver = PRIV_VERSION_1_12_0; 683 #ifndef CONFIG_USER_ONLY 684 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 685 #endif 686 /* inherited from parent obj via riscv_cpu_init() */ 687 cpu->cfg.ext_zifencei = true; 688 cpu->cfg.ext_zicsr = true; 689 cpu->cfg.pmp = true; 690 cpu->cfg.ext_smepmp = true; 691 } 692 693 static void rv32_imafcu_nommu_cpu_init(Object *obj) 694 { 695 CPURISCVState *env = &RISCV_CPU(obj)->env; 696 RISCVCPU *cpu = RISCV_CPU(obj); 697 698 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU); 699 env->priv_ver = PRIV_VERSION_1_10_0; 700 #ifndef CONFIG_USER_ONLY 701 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 702 #endif 703 704 /* inherited from parent obj via riscv_cpu_init() */ 705 cpu->cfg.ext_zifencei = true; 706 cpu->cfg.ext_zicsr = true; 707 cpu->cfg.pmp = true; 708 } 709 710 static void rv32i_bare_cpu_init(Object *obj) 711 { 712 CPURISCVState *env = &RISCV_CPU(obj)->env; 713 riscv_cpu_set_misa_ext(env, RVI); 714 } 715 716 static void rv32e_bare_cpu_init(Object *obj) 717 { 718 CPURISCVState *env = &RISCV_CPU(obj)->env; 719 riscv_cpu_set_misa_ext(env, RVE); 720 } 721 #endif 722 723 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 724 { 725 ObjectClass *oc; 726 char *typename; 727 char **cpuname; 728 729 cpuname = g_strsplit(cpu_model, ",", 1); 730 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 731 oc = object_class_by_name(typename); 732 g_strfreev(cpuname); 733 g_free(typename); 734 735 return oc; 736 } 737 738 char *riscv_cpu_get_name(RISCVCPU *cpu) 739 { 740 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 741 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 742 743 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 744 745 return cpu_model_from_type(typename); 746 } 747 748 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 749 { 750 RISCVCPU *cpu = RISCV_CPU(cs); 751 CPURISCVState *env = &cpu->env; 752 int i, j; 753 uint8_t *p; 754 755 #if !defined(CONFIG_USER_ONLY) 756 if (riscv_has_ext(env, RVH)) { 757 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 758 } 759 #endif 760 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 761 #ifndef CONFIG_USER_ONLY 762 { 763 static const int dump_csrs[] = { 764 CSR_MHARTID, 765 CSR_MSTATUS, 766 CSR_MSTATUSH, 767 /* 768 * CSR_SSTATUS is intentionally omitted here as its value 769 * can be figured out by looking at CSR_MSTATUS 770 */ 771 CSR_HSTATUS, 772 CSR_VSSTATUS, 773 CSR_MIP, 774 CSR_MIE, 775 CSR_MIDELEG, 776 CSR_HIDELEG, 777 CSR_MEDELEG, 778 CSR_HEDELEG, 779 CSR_MTVEC, 780 CSR_STVEC, 781 CSR_VSTVEC, 782 CSR_MEPC, 783 CSR_SEPC, 784 CSR_VSEPC, 785 CSR_MCAUSE, 786 CSR_SCAUSE, 787 CSR_VSCAUSE, 788 CSR_MTVAL, 789 CSR_STVAL, 790 CSR_HTVAL, 791 CSR_MTVAL2, 792 CSR_MSCRATCH, 793 CSR_SSCRATCH, 794 CSR_SATP, 795 CSR_MMTE, 796 CSR_UPMBASE, 797 CSR_UPMMASK, 798 CSR_SPMBASE, 799 CSR_SPMMASK, 800 CSR_MPMBASE, 801 CSR_MPMMASK, 802 }; 803 804 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 805 int csrno = dump_csrs[i]; 806 target_ulong val = 0; 807 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 808 809 /* 810 * Rely on the smode, hmode, etc, predicates within csr.c 811 * to do the filtering of the registers that are present. 812 */ 813 if (res == RISCV_EXCP_NONE) { 814 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 815 csr_ops[csrno].name, val); 816 } 817 } 818 } 819 #endif 820 821 for (i = 0; i < 32; i++) { 822 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 823 riscv_int_regnames[i], env->gpr[i]); 824 if ((i & 3) == 3) { 825 qemu_fprintf(f, "\n"); 826 } 827 } 828 if (flags & CPU_DUMP_FPU) { 829 for (i = 0; i < 32; i++) { 830 qemu_fprintf(f, " %-8s %016" PRIx64, 831 riscv_fpr_regnames[i], env->fpr[i]); 832 if ((i & 3) == 3) { 833 qemu_fprintf(f, "\n"); 834 } 835 } 836 } 837 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 838 static const int dump_rvv_csrs[] = { 839 CSR_VSTART, 840 CSR_VXSAT, 841 CSR_VXRM, 842 CSR_VCSR, 843 CSR_VL, 844 CSR_VTYPE, 845 CSR_VLENB, 846 }; 847 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 848 int csrno = dump_rvv_csrs[i]; 849 target_ulong val = 0; 850 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 851 852 /* 853 * Rely on the smode, hmode, etc, predicates within csr.c 854 * to do the filtering of the registers that are present. 855 */ 856 if (res == RISCV_EXCP_NONE) { 857 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 858 csr_ops[csrno].name, val); 859 } 860 } 861 uint16_t vlenb = cpu->cfg.vlenb; 862 863 for (i = 0; i < 32; i++) { 864 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 865 p = (uint8_t *)env->vreg; 866 for (j = vlenb - 1 ; j >= 0; j--) { 867 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 868 } 869 qemu_fprintf(f, "\n"); 870 } 871 } 872 } 873 874 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 875 { 876 RISCVCPU *cpu = RISCV_CPU(cs); 877 CPURISCVState *env = &cpu->env; 878 879 if (env->xl == MXL_RV32) { 880 env->pc = (int32_t)value; 881 } else { 882 env->pc = value; 883 } 884 } 885 886 static vaddr riscv_cpu_get_pc(CPUState *cs) 887 { 888 RISCVCPU *cpu = RISCV_CPU(cs); 889 CPURISCVState *env = &cpu->env; 890 891 /* Match cpu_get_tb_cpu_state. */ 892 if (env->xl == MXL_RV32) { 893 return env->pc & UINT32_MAX; 894 } 895 return env->pc; 896 } 897 898 static bool riscv_cpu_has_work(CPUState *cs) 899 { 900 #ifndef CONFIG_USER_ONLY 901 RISCVCPU *cpu = RISCV_CPU(cs); 902 CPURISCVState *env = &cpu->env; 903 /* 904 * Definition of the WFI instruction requires it to ignore the privilege 905 * mode and delegation registers, but respect individual enables 906 */ 907 return riscv_cpu_all_pending(env) != 0 || 908 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 909 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 910 #else 911 return true; 912 #endif 913 } 914 915 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) 916 { 917 return riscv_env_mmu_index(cpu_env(cs), ifetch); 918 } 919 920 static void riscv_cpu_reset_hold(Object *obj) 921 { 922 #ifndef CONFIG_USER_ONLY 923 uint8_t iprio; 924 int i, irq, rdzero; 925 #endif 926 CPUState *cs = CPU(obj); 927 RISCVCPU *cpu = RISCV_CPU(cs); 928 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 929 CPURISCVState *env = &cpu->env; 930 931 if (mcc->parent_phases.hold) { 932 mcc->parent_phases.hold(obj); 933 } 934 #ifndef CONFIG_USER_ONLY 935 env->misa_mxl = mcc->misa_mxl_max; 936 env->priv = PRV_M; 937 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 938 if (env->misa_mxl > MXL_RV32) { 939 /* 940 * The reset status of SXL/UXL is undefined, but mstatus is WARL 941 * and we must ensure that the value after init is valid for read. 942 */ 943 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 944 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 945 if (riscv_has_ext(env, RVH)) { 946 env->vsstatus = set_field(env->vsstatus, 947 MSTATUS64_SXL, env->misa_mxl); 948 env->vsstatus = set_field(env->vsstatus, 949 MSTATUS64_UXL, env->misa_mxl); 950 env->mstatus_hs = set_field(env->mstatus_hs, 951 MSTATUS64_SXL, env->misa_mxl); 952 env->mstatus_hs = set_field(env->mstatus_hs, 953 MSTATUS64_UXL, env->misa_mxl); 954 } 955 } 956 env->mcause = 0; 957 env->miclaim = MIP_SGEIP; 958 env->pc = env->resetvec; 959 env->bins = 0; 960 env->two_stage_lookup = false; 961 962 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 963 (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ? 964 MENVCFG_ADUE : 0); 965 env->henvcfg = 0; 966 967 /* Initialized default priorities of local interrupts. */ 968 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 969 iprio = riscv_cpu_default_priority(i); 970 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 971 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 972 env->hviprio[i] = 0; 973 } 974 i = 0; 975 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 976 if (!rdzero) { 977 env->hviprio[irq] = env->miprio[irq]; 978 } 979 i++; 980 } 981 /* mmte is supposed to have pm.current hardwired to 1 */ 982 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 983 984 /* 985 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor 986 * extension is enabled. 987 */ 988 if (riscv_has_ext(env, RVH)) { 989 env->mideleg |= HS_MODE_INTERRUPTS; 990 } 991 992 /* 993 * Clear mseccfg and unlock all the PMP entries upon reset. 994 * This is allowed as per the priv and smepmp specifications 995 * and is needed to clear stale entries across reboots. 996 */ 997 if (riscv_cpu_cfg(env)->ext_smepmp) { 998 env->mseccfg = 0; 999 } 1000 1001 pmp_unlock_entries(env); 1002 #endif 1003 env->xl = riscv_cpu_mxl(env); 1004 riscv_cpu_update_mask(env); 1005 cs->exception_index = RISCV_EXCP_NONE; 1006 env->load_res = -1; 1007 set_default_nan_mode(1, &env->fp_status); 1008 1009 #ifndef CONFIG_USER_ONLY 1010 if (cpu->cfg.debug) { 1011 riscv_trigger_reset_hold(env); 1012 } 1013 1014 if (kvm_enabled()) { 1015 kvm_riscv_reset_vcpu(cpu); 1016 } 1017 #endif 1018 } 1019 1020 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 1021 { 1022 RISCVCPU *cpu = RISCV_CPU(s); 1023 CPURISCVState *env = &cpu->env; 1024 info->target_info = &cpu->cfg; 1025 1026 switch (env->xl) { 1027 case MXL_RV32: 1028 info->print_insn = print_insn_riscv32; 1029 break; 1030 case MXL_RV64: 1031 info->print_insn = print_insn_riscv64; 1032 break; 1033 case MXL_RV128: 1034 info->print_insn = print_insn_riscv128; 1035 break; 1036 default: 1037 g_assert_not_reached(); 1038 } 1039 } 1040 1041 #ifndef CONFIG_USER_ONLY 1042 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1043 { 1044 bool rv32 = riscv_cpu_is_32bit(cpu); 1045 uint8_t satp_mode_map_max, satp_mode_supported_max; 1046 1047 /* The CPU wants the OS to decide which satp mode to use */ 1048 if (cpu->cfg.satp_mode.supported == 0) { 1049 return; 1050 } 1051 1052 satp_mode_supported_max = 1053 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1054 1055 if (cpu->cfg.satp_mode.map == 0) { 1056 if (cpu->cfg.satp_mode.init == 0) { 1057 /* If unset by the user, we fallback to the default satp mode. */ 1058 set_satp_mode_default_map(cpu); 1059 } else { 1060 /* 1061 * Find the lowest level that was disabled and then enable the 1062 * first valid level below which can be found in 1063 * valid_vm_1_10_32/64. 1064 */ 1065 for (int i = 1; i < 16; ++i) { 1066 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1067 (cpu->cfg.satp_mode.supported & (1 << i))) { 1068 for (int j = i - 1; j >= 0; --j) { 1069 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1070 cpu->cfg.satp_mode.map |= (1 << j); 1071 break; 1072 } 1073 } 1074 break; 1075 } 1076 } 1077 } 1078 } 1079 1080 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1081 1082 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1083 if (satp_mode_map_max > satp_mode_supported_max) { 1084 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1085 satp_mode_str(satp_mode_map_max, rv32), 1086 satp_mode_str(satp_mode_supported_max, rv32)); 1087 return; 1088 } 1089 1090 /* 1091 * Make sure the user did not ask for an invalid configuration as per 1092 * the specification. 1093 */ 1094 if (!rv32) { 1095 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1096 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1097 (cpu->cfg.satp_mode.init & (1 << i)) && 1098 (cpu->cfg.satp_mode.supported & (1 << i))) { 1099 error_setg(errp, "cannot disable %s satp mode if %s " 1100 "is enabled", satp_mode_str(i, false), 1101 satp_mode_str(satp_mode_map_max, false)); 1102 return; 1103 } 1104 } 1105 } 1106 1107 /* Finally expand the map so that all valid modes are set */ 1108 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1109 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1110 cpu->cfg.satp_mode.map |= (1 << i); 1111 } 1112 } 1113 } 1114 #endif 1115 1116 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1117 { 1118 Error *local_err = NULL; 1119 1120 #ifndef CONFIG_USER_ONLY 1121 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1122 if (local_err != NULL) { 1123 error_propagate(errp, local_err); 1124 return; 1125 } 1126 #endif 1127 1128 if (tcg_enabled()) { 1129 riscv_tcg_cpu_finalize_features(cpu, &local_err); 1130 if (local_err != NULL) { 1131 error_propagate(errp, local_err); 1132 return; 1133 } 1134 } else if (kvm_enabled()) { 1135 riscv_kvm_cpu_finalize_features(cpu, &local_err); 1136 if (local_err != NULL) { 1137 error_propagate(errp, local_err); 1138 return; 1139 } 1140 } 1141 } 1142 1143 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1144 { 1145 CPUState *cs = CPU(dev); 1146 RISCVCPU *cpu = RISCV_CPU(dev); 1147 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1148 Error *local_err = NULL; 1149 1150 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) { 1151 warn_report("The 'any' CPU is deprecated and will be " 1152 "removed in the future."); 1153 } 1154 1155 cpu_exec_realizefn(cs, &local_err); 1156 if (local_err != NULL) { 1157 error_propagate(errp, local_err); 1158 return; 1159 } 1160 1161 riscv_cpu_finalize_features(cpu, &local_err); 1162 if (local_err != NULL) { 1163 error_propagate(errp, local_err); 1164 return; 1165 } 1166 1167 riscv_cpu_register_gdb_regs_for_features(cs); 1168 1169 #ifndef CONFIG_USER_ONLY 1170 if (cpu->cfg.debug) { 1171 riscv_trigger_realize(&cpu->env); 1172 } 1173 #endif 1174 1175 qemu_init_vcpu(cs); 1176 cpu_reset(cs); 1177 1178 mcc->parent_realize(dev, errp); 1179 } 1180 1181 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 1182 { 1183 if (tcg_enabled()) { 1184 return riscv_cpu_tcg_compatible(cpu); 1185 } 1186 1187 return true; 1188 } 1189 1190 #ifndef CONFIG_USER_ONLY 1191 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1192 void *opaque, Error **errp) 1193 { 1194 RISCVSATPMap *satp_map = opaque; 1195 uint8_t satp = satp_mode_from_str(name); 1196 bool value; 1197 1198 value = satp_map->map & (1 << satp); 1199 1200 visit_type_bool(v, name, &value, errp); 1201 } 1202 1203 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1204 void *opaque, Error **errp) 1205 { 1206 RISCVSATPMap *satp_map = opaque; 1207 uint8_t satp = satp_mode_from_str(name); 1208 bool value; 1209 1210 if (!visit_type_bool(v, name, &value, errp)) { 1211 return; 1212 } 1213 1214 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1215 satp_map->init |= 1 << satp; 1216 } 1217 1218 void riscv_add_satp_mode_properties(Object *obj) 1219 { 1220 RISCVCPU *cpu = RISCV_CPU(obj); 1221 1222 if (cpu->env.misa_mxl == MXL_RV32) { 1223 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1224 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1225 } else { 1226 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1227 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1228 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1229 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1230 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1231 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1232 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1233 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1234 } 1235 } 1236 1237 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1238 { 1239 RISCVCPU *cpu = RISCV_CPU(opaque); 1240 CPURISCVState *env = &cpu->env; 1241 1242 if (irq < IRQ_LOCAL_MAX) { 1243 switch (irq) { 1244 case IRQ_U_SOFT: 1245 case IRQ_S_SOFT: 1246 case IRQ_VS_SOFT: 1247 case IRQ_M_SOFT: 1248 case IRQ_U_TIMER: 1249 case IRQ_S_TIMER: 1250 case IRQ_VS_TIMER: 1251 case IRQ_M_TIMER: 1252 case IRQ_U_EXT: 1253 case IRQ_VS_EXT: 1254 case IRQ_M_EXT: 1255 if (kvm_enabled()) { 1256 kvm_riscv_set_irq(cpu, irq, level); 1257 } else { 1258 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1259 } 1260 break; 1261 case IRQ_S_EXT: 1262 if (kvm_enabled()) { 1263 kvm_riscv_set_irq(cpu, irq, level); 1264 } else { 1265 env->external_seip = level; 1266 riscv_cpu_update_mip(env, 1 << irq, 1267 BOOL_TO_MASK(level | env->software_seip)); 1268 } 1269 break; 1270 default: 1271 g_assert_not_reached(); 1272 } 1273 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1274 /* Require H-extension for handling guest local interrupts */ 1275 if (!riscv_has_ext(env, RVH)) { 1276 g_assert_not_reached(); 1277 } 1278 1279 /* Compute bit position in HGEIP CSR */ 1280 irq = irq - IRQ_LOCAL_MAX + 1; 1281 if (env->geilen < irq) { 1282 g_assert_not_reached(); 1283 } 1284 1285 /* Update HGEIP CSR */ 1286 env->hgeip &= ~((target_ulong)1 << irq); 1287 if (level) { 1288 env->hgeip |= (target_ulong)1 << irq; 1289 } 1290 1291 /* Update mip.SGEIP bit */ 1292 riscv_cpu_update_mip(env, MIP_SGEIP, 1293 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1294 } else { 1295 g_assert_not_reached(); 1296 } 1297 } 1298 #endif /* CONFIG_USER_ONLY */ 1299 1300 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1301 { 1302 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1303 } 1304 1305 static void riscv_cpu_post_init(Object *obj) 1306 { 1307 accel_cpu_instance_init(CPU(obj)); 1308 } 1309 1310 static void riscv_cpu_init(Object *obj) 1311 { 1312 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1313 RISCVCPU *cpu = RISCV_CPU(obj); 1314 CPURISCVState *env = &cpu->env; 1315 1316 env->misa_mxl = mcc->misa_mxl_max; 1317 1318 #ifndef CONFIG_USER_ONLY 1319 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1320 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1321 #endif /* CONFIG_USER_ONLY */ 1322 1323 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal); 1324 1325 /* 1326 * The timer and performance counters extensions were supported 1327 * in QEMU before they were added as discrete extensions in the 1328 * ISA. To keep compatibility we'll always default them to 'true' 1329 * for all CPUs. Each accelerator will decide what to do when 1330 * users disable them. 1331 */ 1332 RISCV_CPU(obj)->cfg.ext_zicntr = true; 1333 RISCV_CPU(obj)->cfg.ext_zihpm = true; 1334 1335 /* Default values for non-bool cpu properties */ 1336 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); 1337 cpu->cfg.vlenb = 128 >> 3; 1338 cpu->cfg.elen = 64; 1339 cpu->cfg.cbom_blocksize = 64; 1340 cpu->cfg.cbop_blocksize = 64; 1341 cpu->cfg.cboz_blocksize = 64; 1342 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1343 } 1344 1345 static void riscv_bare_cpu_init(Object *obj) 1346 { 1347 RISCVCPU *cpu = RISCV_CPU(obj); 1348 1349 /* 1350 * Bare CPUs do not inherit the timer and performance 1351 * counters from the parent class (see riscv_cpu_init() 1352 * for info on why the parent enables them). 1353 * 1354 * Users have to explicitly enable these counters for 1355 * bare CPUs. 1356 */ 1357 cpu->cfg.ext_zicntr = false; 1358 cpu->cfg.ext_zihpm = false; 1359 1360 /* Set to QEMU's first supported priv version */ 1361 cpu->env.priv_ver = PRIV_VERSION_1_10_0; 1362 1363 /* 1364 * Support all available satp_mode settings. The default 1365 * value will be set to MBARE if the user doesn't set 1366 * satp_mode manually (see set_satp_mode_default()). 1367 */ 1368 #ifndef CONFIG_USER_ONLY 1369 set_satp_mode_max_supported(cpu, VM_1_10_SV64); 1370 #endif 1371 } 1372 1373 typedef struct misa_ext_info { 1374 const char *name; 1375 const char *description; 1376 } MISAExtInfo; 1377 1378 #define MISA_INFO_IDX(_bit) \ 1379 __builtin_ctz(_bit) 1380 1381 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1382 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1383 1384 static const MISAExtInfo misa_ext_info_arr[] = { 1385 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1386 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1387 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1388 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1389 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1390 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1391 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1392 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1393 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1394 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1395 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), 1396 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1397 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1398 MISA_EXT_INFO(RVB, "x-b", "Bit manipulation (Zba_Zbb_Zbs)") 1399 }; 1400 1401 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) 1402 { 1403 CPUClass *cc = CPU_CLASS(mcc); 1404 1405 /* Validate that MISA_MXL is set properly. */ 1406 switch (mcc->misa_mxl_max) { 1407 #ifdef TARGET_RISCV64 1408 case MXL_RV64: 1409 case MXL_RV128: 1410 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1411 break; 1412 #endif 1413 case MXL_RV32: 1414 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1415 break; 1416 default: 1417 g_assert_not_reached(); 1418 } 1419 } 1420 1421 static int riscv_validate_misa_info_idx(uint32_t bit) 1422 { 1423 int idx; 1424 1425 /* 1426 * Our lowest valid input (RVA) is 1 and 1427 * __builtin_ctz() is UB with zero. 1428 */ 1429 g_assert(bit != 0); 1430 idx = MISA_INFO_IDX(bit); 1431 1432 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1433 return idx; 1434 } 1435 1436 const char *riscv_get_misa_ext_name(uint32_t bit) 1437 { 1438 int idx = riscv_validate_misa_info_idx(bit); 1439 const char *val = misa_ext_info_arr[idx].name; 1440 1441 g_assert(val != NULL); 1442 return val; 1443 } 1444 1445 const char *riscv_get_misa_ext_description(uint32_t bit) 1446 { 1447 int idx = riscv_validate_misa_info_idx(bit); 1448 const char *val = misa_ext_info_arr[idx].description; 1449 1450 g_assert(val != NULL); 1451 return val; 1452 } 1453 1454 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1455 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1456 .enabled = _defval} 1457 1458 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1459 /* Defaults for standard extensions */ 1460 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1461 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1462 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1463 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1464 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1465 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), 1466 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1467 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1468 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1469 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1470 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1471 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1472 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1473 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1474 1475 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1476 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1477 MULTI_EXT_CFG_BOOL("svade", ext_svade, false), 1478 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1479 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1480 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1481 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1482 1483 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1484 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1485 1486 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1487 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1488 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1489 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1490 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1491 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1492 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1493 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1494 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1495 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1496 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1497 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1498 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1499 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1500 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1501 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1502 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1503 1504 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1505 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1506 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1507 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1508 1509 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1510 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true), 1511 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1512 1513 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1514 1515 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1516 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1517 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1518 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1519 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1520 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1521 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1522 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1523 1524 /* Vector cryptography extensions */ 1525 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1526 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1527 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkg, false), 1528 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1529 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1530 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1531 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1532 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1533 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1534 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1535 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1536 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1537 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1538 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1539 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1540 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1541 1542 DEFINE_PROP_END_OF_LIST(), 1543 }; 1544 1545 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1546 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1547 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1548 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1549 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1550 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1551 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1552 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1553 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1554 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1555 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1556 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1557 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1558 1559 DEFINE_PROP_END_OF_LIST(), 1560 }; 1561 1562 /* These are experimental so mark with 'x-' */ 1563 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1564 MULTI_EXT_CFG_BOOL("x-smaia", ext_smaia, false), 1565 MULTI_EXT_CFG_BOOL("x-ssaia", ext_ssaia, false), 1566 1567 MULTI_EXT_CFG_BOOL("x-zaamo", ext_zaamo, false), 1568 MULTI_EXT_CFG_BOOL("x-zalrsc", ext_zalrsc, false), 1569 1570 MULTI_EXT_CFG_BOOL("x-zvfh", ext_zvfh, false), 1571 MULTI_EXT_CFG_BOOL("x-zvfhmin", ext_zvfhmin, false), 1572 1573 MULTI_EXT_CFG_BOOL("x-zfbfmin", ext_zfbfmin, false), 1574 MULTI_EXT_CFG_BOOL("x-zvfbfmin", ext_zvfbfmin, false), 1575 MULTI_EXT_CFG_BOOL("x-zvfbfwma", ext_zvfbfwma, false), 1576 1577 DEFINE_PROP_END_OF_LIST(), 1578 }; 1579 1580 #define ALWAYS_ENABLED_FEATURE(_name) \ 1581 {.name = _name, \ 1582 .offset = CPU_CFG_OFFSET(ext_always_enabled), \ 1583 .enabled = true} 1584 1585 /* 1586 * 'Named features' is the name we give to extensions that we 1587 * don't want to expose to users. They are either immutable 1588 * (always enabled/disable) or they'll vary depending on 1589 * the resulting CPU state. They have riscv,isa strings 1590 * and priv_ver like regular extensions. 1591 */ 1592 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { 1593 MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true), 1594 1595 /* 1596 * cache-related extensions that are always enabled 1597 * in TCG since QEMU RISC-V does not have a cache 1598 * model. 1599 */ 1600 ALWAYS_ENABLED_FEATURE("za64rs"), 1601 ALWAYS_ENABLED_FEATURE("ziccif"), 1602 ALWAYS_ENABLED_FEATURE("ziccrse"), 1603 ALWAYS_ENABLED_FEATURE("ziccamoa"), 1604 ALWAYS_ENABLED_FEATURE("zicclsm"), 1605 ALWAYS_ENABLED_FEATURE("ssccptr"), 1606 1607 /* Other named features that TCG always implements */ 1608 ALWAYS_ENABLED_FEATURE("sstvecd"), 1609 ALWAYS_ENABLED_FEATURE("sstvala"), 1610 ALWAYS_ENABLED_FEATURE("sscounterenw"), 1611 1612 DEFINE_PROP_END_OF_LIST(), 1613 }; 1614 1615 /* Deprecated entries marked for future removal */ 1616 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1617 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1618 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1619 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1620 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1621 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1622 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1623 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1624 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1625 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1626 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1627 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1628 1629 DEFINE_PROP_END_OF_LIST(), 1630 }; 1631 1632 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname, 1633 Error **errp) 1634 { 1635 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1636 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'", 1637 cpuname, propname); 1638 } 1639 1640 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1641 void *opaque, Error **errp) 1642 { 1643 RISCVCPU *cpu = RISCV_CPU(obj); 1644 uint8_t pmu_num, curr_pmu_num; 1645 uint32_t pmu_mask; 1646 1647 visit_type_uint8(v, name, &pmu_num, errp); 1648 1649 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask); 1650 1651 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) { 1652 cpu_set_prop_err(cpu, name, errp); 1653 error_append_hint(errp, "Current '%s' val: %u\n", 1654 name, curr_pmu_num); 1655 return; 1656 } 1657 1658 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1659 error_setg(errp, "Number of counters exceeds maximum available"); 1660 return; 1661 } 1662 1663 if (pmu_num == 0) { 1664 pmu_mask = 0; 1665 } else { 1666 pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1667 } 1668 1669 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1670 cpu->cfg.pmu_mask = pmu_mask; 1671 cpu_option_add_user_setting("pmu-mask", pmu_mask); 1672 } 1673 1674 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1675 void *opaque, Error **errp) 1676 { 1677 RISCVCPU *cpu = RISCV_CPU(obj); 1678 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1679 1680 visit_type_uint8(v, name, &pmu_num, errp); 1681 } 1682 1683 static const PropertyInfo prop_pmu_num = { 1684 .name = "pmu-num", 1685 .get = prop_pmu_num_get, 1686 .set = prop_pmu_num_set, 1687 }; 1688 1689 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name, 1690 void *opaque, Error **errp) 1691 { 1692 RISCVCPU *cpu = RISCV_CPU(obj); 1693 uint32_t value; 1694 uint8_t pmu_num; 1695 1696 visit_type_uint32(v, name, &value, errp); 1697 1698 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) { 1699 cpu_set_prop_err(cpu, name, errp); 1700 error_append_hint(errp, "Current '%s' val: %x\n", 1701 name, cpu->cfg.pmu_mask); 1702 return; 1703 } 1704 1705 pmu_num = ctpop32(value); 1706 1707 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1708 error_setg(errp, "Number of counters exceeds maximum available"); 1709 return; 1710 } 1711 1712 cpu_option_add_user_setting(name, value); 1713 cpu->cfg.pmu_mask = value; 1714 } 1715 1716 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name, 1717 void *opaque, Error **errp) 1718 { 1719 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask; 1720 1721 visit_type_uint8(v, name, &pmu_mask, errp); 1722 } 1723 1724 static const PropertyInfo prop_pmu_mask = { 1725 .name = "pmu-mask", 1726 .get = prop_pmu_mask_get, 1727 .set = prop_pmu_mask_set, 1728 }; 1729 1730 static void prop_mmu_set(Object *obj, Visitor *v, const char *name, 1731 void *opaque, Error **errp) 1732 { 1733 RISCVCPU *cpu = RISCV_CPU(obj); 1734 bool value; 1735 1736 visit_type_bool(v, name, &value, errp); 1737 1738 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) { 1739 cpu_set_prop_err(cpu, "mmu", errp); 1740 return; 1741 } 1742 1743 cpu_option_add_user_setting(name, value); 1744 cpu->cfg.mmu = value; 1745 } 1746 1747 static void prop_mmu_get(Object *obj, Visitor *v, const char *name, 1748 void *opaque, Error **errp) 1749 { 1750 bool value = RISCV_CPU(obj)->cfg.mmu; 1751 1752 visit_type_bool(v, name, &value, errp); 1753 } 1754 1755 static const PropertyInfo prop_mmu = { 1756 .name = "mmu", 1757 .get = prop_mmu_get, 1758 .set = prop_mmu_set, 1759 }; 1760 1761 static void prop_pmp_set(Object *obj, Visitor *v, const char *name, 1762 void *opaque, Error **errp) 1763 { 1764 RISCVCPU *cpu = RISCV_CPU(obj); 1765 bool value; 1766 1767 visit_type_bool(v, name, &value, errp); 1768 1769 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) { 1770 cpu_set_prop_err(cpu, name, errp); 1771 return; 1772 } 1773 1774 cpu_option_add_user_setting(name, value); 1775 cpu->cfg.pmp = value; 1776 } 1777 1778 static void prop_pmp_get(Object *obj, Visitor *v, const char *name, 1779 void *opaque, Error **errp) 1780 { 1781 bool value = RISCV_CPU(obj)->cfg.pmp; 1782 1783 visit_type_bool(v, name, &value, errp); 1784 } 1785 1786 static const PropertyInfo prop_pmp = { 1787 .name = "pmp", 1788 .get = prop_pmp_get, 1789 .set = prop_pmp_set, 1790 }; 1791 1792 static int priv_spec_from_str(const char *priv_spec_str) 1793 { 1794 int priv_version = -1; 1795 1796 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) { 1797 priv_version = PRIV_VERSION_1_12_0; 1798 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) { 1799 priv_version = PRIV_VERSION_1_11_0; 1800 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) { 1801 priv_version = PRIV_VERSION_1_10_0; 1802 } 1803 1804 return priv_version; 1805 } 1806 1807 static const char *priv_spec_to_str(int priv_version) 1808 { 1809 switch (priv_version) { 1810 case PRIV_VERSION_1_10_0: 1811 return PRIV_VER_1_10_0_STR; 1812 case PRIV_VERSION_1_11_0: 1813 return PRIV_VER_1_11_0_STR; 1814 case PRIV_VERSION_1_12_0: 1815 return PRIV_VER_1_12_0_STR; 1816 default: 1817 return NULL; 1818 } 1819 } 1820 1821 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name, 1822 void *opaque, Error **errp) 1823 { 1824 RISCVCPU *cpu = RISCV_CPU(obj); 1825 g_autofree char *value = NULL; 1826 int priv_version = -1; 1827 1828 visit_type_str(v, name, &value, errp); 1829 1830 priv_version = priv_spec_from_str(value); 1831 if (priv_version < 0) { 1832 error_setg(errp, "Unsupported privilege spec version '%s'", value); 1833 return; 1834 } 1835 1836 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) { 1837 cpu_set_prop_err(cpu, name, errp); 1838 error_append_hint(errp, "Current '%s' val: %s\n", name, 1839 object_property_get_str(obj, name, NULL)); 1840 return; 1841 } 1842 1843 cpu_option_add_user_setting(name, priv_version); 1844 cpu->env.priv_ver = priv_version; 1845 } 1846 1847 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name, 1848 void *opaque, Error **errp) 1849 { 1850 RISCVCPU *cpu = RISCV_CPU(obj); 1851 const char *value = priv_spec_to_str(cpu->env.priv_ver); 1852 1853 visit_type_str(v, name, (char **)&value, errp); 1854 } 1855 1856 static const PropertyInfo prop_priv_spec = { 1857 .name = "priv_spec", 1858 .get = prop_priv_spec_get, 1859 .set = prop_priv_spec_set, 1860 }; 1861 1862 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name, 1863 void *opaque, Error **errp) 1864 { 1865 RISCVCPU *cpu = RISCV_CPU(obj); 1866 g_autofree char *value = NULL; 1867 1868 visit_type_str(v, name, &value, errp); 1869 1870 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) { 1871 error_setg(errp, "Unsupported vector spec version '%s'", value); 1872 return; 1873 } 1874 1875 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0); 1876 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1877 } 1878 1879 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name, 1880 void *opaque, Error **errp) 1881 { 1882 const char *value = VEXT_VER_1_00_0_STR; 1883 1884 visit_type_str(v, name, (char **)&value, errp); 1885 } 1886 1887 static const PropertyInfo prop_vext_spec = { 1888 .name = "vext_spec", 1889 .get = prop_vext_spec_get, 1890 .set = prop_vext_spec_set, 1891 }; 1892 1893 static void prop_vlen_set(Object *obj, Visitor *v, const char *name, 1894 void *opaque, Error **errp) 1895 { 1896 RISCVCPU *cpu = RISCV_CPU(obj); 1897 uint16_t value; 1898 1899 if (!visit_type_uint16(v, name, &value, errp)) { 1900 return; 1901 } 1902 1903 if (!is_power_of_2(value)) { 1904 error_setg(errp, "Vector extension VLEN must be power of 2"); 1905 return; 1906 } 1907 1908 if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) { 1909 cpu_set_prop_err(cpu, name, errp); 1910 error_append_hint(errp, "Current '%s' val: %u\n", 1911 name, cpu->cfg.vlenb << 3); 1912 return; 1913 } 1914 1915 cpu_option_add_user_setting(name, value); 1916 cpu->cfg.vlenb = value >> 3; 1917 } 1918 1919 static void prop_vlen_get(Object *obj, Visitor *v, const char *name, 1920 void *opaque, Error **errp) 1921 { 1922 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3; 1923 1924 visit_type_uint16(v, name, &value, errp); 1925 } 1926 1927 static const PropertyInfo prop_vlen = { 1928 .name = "vlen", 1929 .get = prop_vlen_get, 1930 .set = prop_vlen_set, 1931 }; 1932 1933 static void prop_elen_set(Object *obj, Visitor *v, const char *name, 1934 void *opaque, Error **errp) 1935 { 1936 RISCVCPU *cpu = RISCV_CPU(obj); 1937 uint16_t value; 1938 1939 if (!visit_type_uint16(v, name, &value, errp)) { 1940 return; 1941 } 1942 1943 if (!is_power_of_2(value)) { 1944 error_setg(errp, "Vector extension ELEN must be power of 2"); 1945 return; 1946 } 1947 1948 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) { 1949 cpu_set_prop_err(cpu, name, errp); 1950 error_append_hint(errp, "Current '%s' val: %u\n", 1951 name, cpu->cfg.elen); 1952 return; 1953 } 1954 1955 cpu_option_add_user_setting(name, value); 1956 cpu->cfg.elen = value; 1957 } 1958 1959 static void prop_elen_get(Object *obj, Visitor *v, const char *name, 1960 void *opaque, Error **errp) 1961 { 1962 uint16_t value = RISCV_CPU(obj)->cfg.elen; 1963 1964 visit_type_uint16(v, name, &value, errp); 1965 } 1966 1967 static const PropertyInfo prop_elen = { 1968 .name = "elen", 1969 .get = prop_elen_get, 1970 .set = prop_elen_set, 1971 }; 1972 1973 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name, 1974 void *opaque, Error **errp) 1975 { 1976 RISCVCPU *cpu = RISCV_CPU(obj); 1977 uint16_t value; 1978 1979 if (!visit_type_uint16(v, name, &value, errp)) { 1980 return; 1981 } 1982 1983 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) { 1984 cpu_set_prop_err(cpu, name, errp); 1985 error_append_hint(errp, "Current '%s' val: %u\n", 1986 name, cpu->cfg.cbom_blocksize); 1987 return; 1988 } 1989 1990 cpu_option_add_user_setting(name, value); 1991 cpu->cfg.cbom_blocksize = value; 1992 } 1993 1994 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name, 1995 void *opaque, Error **errp) 1996 { 1997 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize; 1998 1999 visit_type_uint16(v, name, &value, errp); 2000 } 2001 2002 static const PropertyInfo prop_cbom_blksize = { 2003 .name = "cbom_blocksize", 2004 .get = prop_cbom_blksize_get, 2005 .set = prop_cbom_blksize_set, 2006 }; 2007 2008 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name, 2009 void *opaque, Error **errp) 2010 { 2011 RISCVCPU *cpu = RISCV_CPU(obj); 2012 uint16_t value; 2013 2014 if (!visit_type_uint16(v, name, &value, errp)) { 2015 return; 2016 } 2017 2018 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) { 2019 cpu_set_prop_err(cpu, name, errp); 2020 error_append_hint(errp, "Current '%s' val: %u\n", 2021 name, cpu->cfg.cbop_blocksize); 2022 return; 2023 } 2024 2025 cpu_option_add_user_setting(name, value); 2026 cpu->cfg.cbop_blocksize = value; 2027 } 2028 2029 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name, 2030 void *opaque, Error **errp) 2031 { 2032 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize; 2033 2034 visit_type_uint16(v, name, &value, errp); 2035 } 2036 2037 static const PropertyInfo prop_cbop_blksize = { 2038 .name = "cbop_blocksize", 2039 .get = prop_cbop_blksize_get, 2040 .set = prop_cbop_blksize_set, 2041 }; 2042 2043 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name, 2044 void *opaque, Error **errp) 2045 { 2046 RISCVCPU *cpu = RISCV_CPU(obj); 2047 uint16_t value; 2048 2049 if (!visit_type_uint16(v, name, &value, errp)) { 2050 return; 2051 } 2052 2053 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) { 2054 cpu_set_prop_err(cpu, name, errp); 2055 error_append_hint(errp, "Current '%s' val: %u\n", 2056 name, cpu->cfg.cboz_blocksize); 2057 return; 2058 } 2059 2060 cpu_option_add_user_setting(name, value); 2061 cpu->cfg.cboz_blocksize = value; 2062 } 2063 2064 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name, 2065 void *opaque, Error **errp) 2066 { 2067 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize; 2068 2069 visit_type_uint16(v, name, &value, errp); 2070 } 2071 2072 static const PropertyInfo prop_cboz_blksize = { 2073 .name = "cboz_blocksize", 2074 .get = prop_cboz_blksize_get, 2075 .set = prop_cboz_blksize_set, 2076 }; 2077 2078 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name, 2079 void *opaque, Error **errp) 2080 { 2081 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2082 RISCVCPU *cpu = RISCV_CPU(obj); 2083 uint32_t prev_val = cpu->cfg.mvendorid; 2084 uint32_t value; 2085 2086 if (!visit_type_uint32(v, name, &value, errp)) { 2087 return; 2088 } 2089 2090 if (!dynamic_cpu && prev_val != value) { 2091 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2092 object_get_typename(obj), prev_val); 2093 return; 2094 } 2095 2096 cpu->cfg.mvendorid = value; 2097 } 2098 2099 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name, 2100 void *opaque, Error **errp) 2101 { 2102 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; 2103 2104 visit_type_uint32(v, name, &value, errp); 2105 } 2106 2107 static const PropertyInfo prop_mvendorid = { 2108 .name = "mvendorid", 2109 .get = prop_mvendorid_get, 2110 .set = prop_mvendorid_set, 2111 }; 2112 2113 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name, 2114 void *opaque, Error **errp) 2115 { 2116 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2117 RISCVCPU *cpu = RISCV_CPU(obj); 2118 uint64_t prev_val = cpu->cfg.mimpid; 2119 uint64_t value; 2120 2121 if (!visit_type_uint64(v, name, &value, errp)) { 2122 return; 2123 } 2124 2125 if (!dynamic_cpu && prev_val != value) { 2126 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2127 object_get_typename(obj), prev_val); 2128 return; 2129 } 2130 2131 cpu->cfg.mimpid = value; 2132 } 2133 2134 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name, 2135 void *opaque, Error **errp) 2136 { 2137 uint64_t value = RISCV_CPU(obj)->cfg.mimpid; 2138 2139 visit_type_uint64(v, name, &value, errp); 2140 } 2141 2142 static const PropertyInfo prop_mimpid = { 2143 .name = "mimpid", 2144 .get = prop_mimpid_get, 2145 .set = prop_mimpid_set, 2146 }; 2147 2148 static void prop_marchid_set(Object *obj, Visitor *v, const char *name, 2149 void *opaque, Error **errp) 2150 { 2151 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2152 RISCVCPU *cpu = RISCV_CPU(obj); 2153 uint64_t prev_val = cpu->cfg.marchid; 2154 uint64_t value, invalid_val; 2155 uint32_t mxlen = 0; 2156 2157 if (!visit_type_uint64(v, name, &value, errp)) { 2158 return; 2159 } 2160 2161 if (!dynamic_cpu && prev_val != value) { 2162 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2163 object_get_typename(obj), prev_val); 2164 return; 2165 } 2166 2167 switch (riscv_cpu_mxl(&cpu->env)) { 2168 case MXL_RV32: 2169 mxlen = 32; 2170 break; 2171 case MXL_RV64: 2172 case MXL_RV128: 2173 mxlen = 64; 2174 break; 2175 default: 2176 g_assert_not_reached(); 2177 } 2178 2179 invalid_val = 1LL << (mxlen - 1); 2180 2181 if (value == invalid_val) { 2182 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2183 "and the remaining bits zero", mxlen); 2184 return; 2185 } 2186 2187 cpu->cfg.marchid = value; 2188 } 2189 2190 static void prop_marchid_get(Object *obj, Visitor *v, const char *name, 2191 void *opaque, Error **errp) 2192 { 2193 uint64_t value = RISCV_CPU(obj)->cfg.marchid; 2194 2195 visit_type_uint64(v, name, &value, errp); 2196 } 2197 2198 static const PropertyInfo prop_marchid = { 2199 .name = "marchid", 2200 .get = prop_marchid_get, 2201 .set = prop_marchid_set, 2202 }; 2203 2204 /* 2205 * RVA22U64 defines some 'named features' that are cache 2206 * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa 2207 * and Zicclsm. They are always implemented in TCG and 2208 * doesn't need to be manually enabled by the profile. 2209 */ 2210 static RISCVCPUProfile RVA22U64 = { 2211 .parent = NULL, 2212 .name = "rva22u64", 2213 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU, 2214 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2215 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2216 .ext_offsets = { 2217 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause), 2218 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb), 2219 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin), 2220 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr), 2221 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom), 2222 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz), 2223 2224 /* mandatory named features for this profile */ 2225 CPU_CFG_OFFSET(ext_zic64b), 2226 2227 RISCV_PROFILE_EXT_LIST_END 2228 } 2229 }; 2230 2231 /* 2232 * As with RVA22U64, RVA22S64 also defines 'named features'. 2233 * 2234 * Cache related features that we consider enabled since we don't 2235 * implement cache: Ssccptr 2236 * 2237 * Other named features that we already implement: Sstvecd, Sstvala, 2238 * Sscounterenw 2239 * 2240 * The remaining features/extensions comes from RVA22U64. 2241 */ 2242 static RISCVCPUProfile RVA22S64 = { 2243 .parent = &RVA22U64, 2244 .name = "rva22s64", 2245 .misa_ext = RVS, 2246 .priv_spec = PRIV_VERSION_1_12_0, 2247 .satp_mode = VM_1_10_SV39, 2248 .ext_offsets = { 2249 /* rva22s64 exts */ 2250 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt), 2251 CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade), 2252 2253 RISCV_PROFILE_EXT_LIST_END 2254 } 2255 }; 2256 2257 RISCVCPUProfile *riscv_profiles[] = { 2258 &RVA22U64, 2259 &RVA22S64, 2260 NULL, 2261 }; 2262 2263 static Property riscv_cpu_properties[] = { 2264 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 2265 2266 {.name = "pmu-mask", .info = &prop_pmu_mask}, 2267 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 2268 2269 {.name = "mmu", .info = &prop_mmu}, 2270 {.name = "pmp", .info = &prop_pmp}, 2271 2272 {.name = "priv_spec", .info = &prop_priv_spec}, 2273 {.name = "vext_spec", .info = &prop_vext_spec}, 2274 2275 {.name = "vlen", .info = &prop_vlen}, 2276 {.name = "elen", .info = &prop_elen}, 2277 2278 {.name = "cbom_blocksize", .info = &prop_cbom_blksize}, 2279 {.name = "cbop_blocksize", .info = &prop_cbop_blksize}, 2280 {.name = "cboz_blocksize", .info = &prop_cboz_blksize}, 2281 2282 {.name = "mvendorid", .info = &prop_mvendorid}, 2283 {.name = "mimpid", .info = &prop_mimpid}, 2284 {.name = "marchid", .info = &prop_marchid}, 2285 2286 #ifndef CONFIG_USER_ONLY 2287 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 2288 #endif 2289 2290 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 2291 2292 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 2293 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 2294 2295 /* 2296 * write_misa() is marked as experimental for now so mark 2297 * it with -x and default to 'false'. 2298 */ 2299 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 2300 DEFINE_PROP_END_OF_LIST(), 2301 }; 2302 2303 #if defined(TARGET_RISCV64) 2304 static void rva22u64_profile_cpu_init(Object *obj) 2305 { 2306 rv64i_bare_cpu_init(obj); 2307 2308 RVA22U64.enabled = true; 2309 } 2310 2311 static void rva22s64_profile_cpu_init(Object *obj) 2312 { 2313 rv64i_bare_cpu_init(obj); 2314 2315 RVA22S64.enabled = true; 2316 } 2317 #endif 2318 2319 static const gchar *riscv_gdb_arch_name(CPUState *cs) 2320 { 2321 RISCVCPU *cpu = RISCV_CPU(cs); 2322 CPURISCVState *env = &cpu->env; 2323 2324 switch (riscv_cpu_mxl(env)) { 2325 case MXL_RV32: 2326 return "riscv:rv32"; 2327 case MXL_RV64: 2328 case MXL_RV128: 2329 return "riscv:rv64"; 2330 default: 2331 g_assert_not_reached(); 2332 } 2333 } 2334 2335 #ifndef CONFIG_USER_ONLY 2336 static int64_t riscv_get_arch_id(CPUState *cs) 2337 { 2338 RISCVCPU *cpu = RISCV_CPU(cs); 2339 2340 return cpu->env.mhartid; 2341 } 2342 2343 #include "hw/core/sysemu-cpu-ops.h" 2344 2345 static const struct SysemuCPUOps riscv_sysemu_ops = { 2346 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 2347 .write_elf64_note = riscv_cpu_write_elf64_note, 2348 .write_elf32_note = riscv_cpu_write_elf32_note, 2349 .legacy_vmsd = &vmstate_riscv_cpu, 2350 }; 2351 #endif 2352 2353 static void riscv_cpu_common_class_init(ObjectClass *c, void *data) 2354 { 2355 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2356 CPUClass *cc = CPU_CLASS(c); 2357 DeviceClass *dc = DEVICE_CLASS(c); 2358 ResettableClass *rc = RESETTABLE_CLASS(c); 2359 2360 device_class_set_parent_realize(dc, riscv_cpu_realize, 2361 &mcc->parent_realize); 2362 2363 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 2364 &mcc->parent_phases); 2365 2366 cc->class_by_name = riscv_cpu_class_by_name; 2367 cc->has_work = riscv_cpu_has_work; 2368 cc->mmu_index = riscv_cpu_mmu_index; 2369 cc->dump_state = riscv_cpu_dump_state; 2370 cc->set_pc = riscv_cpu_set_pc; 2371 cc->get_pc = riscv_cpu_get_pc; 2372 cc->gdb_read_register = riscv_cpu_gdb_read_register; 2373 cc->gdb_write_register = riscv_cpu_gdb_write_register; 2374 cc->gdb_stop_before_watchpoint = true; 2375 cc->disas_set_info = riscv_cpu_disas_set_info; 2376 #ifndef CONFIG_USER_ONLY 2377 cc->sysemu_ops = &riscv_sysemu_ops; 2378 cc->get_arch_id = riscv_get_arch_id; 2379 #endif 2380 cc->gdb_arch_name = riscv_gdb_arch_name; 2381 2382 device_class_set_props(dc, riscv_cpu_properties); 2383 } 2384 2385 static void riscv_cpu_class_init(ObjectClass *c, void *data) 2386 { 2387 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2388 2389 mcc->misa_mxl_max = (uint32_t)(uintptr_t)data; 2390 riscv_cpu_validate_misa_mxl(mcc); 2391 } 2392 2393 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 2394 int max_str_len) 2395 { 2396 const RISCVIsaExtData *edata; 2397 char *old = *isa_str; 2398 char *new = *isa_str; 2399 2400 for (edata = isa_edata_arr; edata && edata->name; edata++) { 2401 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 2402 new = g_strconcat(old, "_", edata->name, NULL); 2403 g_free(old); 2404 old = new; 2405 } 2406 } 2407 2408 *isa_str = new; 2409 } 2410 2411 char *riscv_isa_string(RISCVCPU *cpu) 2412 { 2413 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 2414 int i; 2415 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 2416 char *isa_str = g_new(char, maxlen); 2417 int xlen = riscv_cpu_max_xlen(mcc); 2418 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen); 2419 2420 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2421 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2422 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 2423 } 2424 } 2425 *p = '\0'; 2426 if (!cpu->cfg.short_isa_string) { 2427 riscv_isa_string_ext(cpu, &isa_str, maxlen); 2428 } 2429 return isa_str; 2430 } 2431 2432 #ifndef CONFIG_USER_ONLY 2433 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count) 2434 { 2435 int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr); 2436 char **extensions = g_new(char *, maxlen); 2437 2438 for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2439 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2440 extensions[*count] = g_new(char, 2); 2441 snprintf(extensions[*count], 2, "%c", 2442 qemu_tolower(riscv_single_letter_exts[i])); 2443 (*count)++; 2444 } 2445 } 2446 2447 for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) { 2448 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 2449 extensions[*count] = g_strdup(edata->name); 2450 (*count)++; 2451 } 2452 } 2453 2454 return extensions; 2455 } 2456 2457 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename) 2458 { 2459 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 2460 const size_t maxlen = sizeof("rv128i"); 2461 g_autofree char *isa_base = g_new(char, maxlen); 2462 g_autofree char *riscv_isa; 2463 char **isa_extensions; 2464 int count = 0; 2465 int xlen = riscv_cpu_max_xlen(mcc); 2466 2467 riscv_isa = riscv_isa_string(cpu); 2468 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa); 2469 2470 snprintf(isa_base, maxlen, "rv%di", xlen); 2471 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base); 2472 2473 isa_extensions = riscv_isa_extensions_list(cpu, &count); 2474 qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions", 2475 isa_extensions, count); 2476 2477 for (int i = 0; i < count; i++) { 2478 g_free(isa_extensions[i]); 2479 } 2480 2481 g_free(isa_extensions); 2482 } 2483 #endif 2484 2485 #define DEFINE_CPU(type_name, misa_mxl_max, initfn) \ 2486 { \ 2487 .name = (type_name), \ 2488 .parent = TYPE_RISCV_CPU, \ 2489 .instance_init = (initfn), \ 2490 .class_init = riscv_cpu_class_init, \ 2491 .class_data = (void *)(misa_mxl_max) \ 2492 } 2493 2494 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \ 2495 { \ 2496 .name = (type_name), \ 2497 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 2498 .instance_init = (initfn), \ 2499 .class_init = riscv_cpu_class_init, \ 2500 .class_data = (void *)(misa_mxl_max) \ 2501 } 2502 2503 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \ 2504 { \ 2505 .name = (type_name), \ 2506 .parent = TYPE_RISCV_VENDOR_CPU, \ 2507 .instance_init = (initfn), \ 2508 .class_init = riscv_cpu_class_init, \ 2509 .class_data = (void *)(misa_mxl_max) \ 2510 } 2511 2512 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \ 2513 { \ 2514 .name = (type_name), \ 2515 .parent = TYPE_RISCV_BARE_CPU, \ 2516 .instance_init = (initfn), \ 2517 .class_init = riscv_cpu_class_init, \ 2518 .class_data = (void *)(misa_mxl_max) \ 2519 } 2520 2521 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \ 2522 { \ 2523 .name = (type_name), \ 2524 .parent = TYPE_RISCV_BARE_CPU, \ 2525 .instance_init = (initfn), \ 2526 .class_init = riscv_cpu_class_init, \ 2527 .class_data = (void *)(misa_mxl_max) \ 2528 } 2529 2530 static const TypeInfo riscv_cpu_type_infos[] = { 2531 { 2532 .name = TYPE_RISCV_CPU, 2533 .parent = TYPE_CPU, 2534 .instance_size = sizeof(RISCVCPU), 2535 .instance_align = __alignof(RISCVCPU), 2536 .instance_init = riscv_cpu_init, 2537 .instance_post_init = riscv_cpu_post_init, 2538 .abstract = true, 2539 .class_size = sizeof(RISCVCPUClass), 2540 .class_init = riscv_cpu_common_class_init, 2541 }, 2542 { 2543 .name = TYPE_RISCV_DYNAMIC_CPU, 2544 .parent = TYPE_RISCV_CPU, 2545 .abstract = true, 2546 }, 2547 { 2548 .name = TYPE_RISCV_VENDOR_CPU, 2549 .parent = TYPE_RISCV_CPU, 2550 .abstract = true, 2551 }, 2552 { 2553 .name = TYPE_RISCV_BARE_CPU, 2554 .parent = TYPE_RISCV_CPU, 2555 .instance_init = riscv_bare_cpu_init, 2556 .abstract = true, 2557 }, 2558 #if defined(TARGET_RISCV32) 2559 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV32, riscv_any_cpu_init), 2560 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), 2561 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), 2562 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), 2563 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), 2564 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init), 2565 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), 2566 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init), 2567 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init), 2568 #elif defined(TARGET_RISCV64) 2569 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV64, riscv_any_cpu_init), 2570 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), 2571 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), 2572 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), 2573 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), 2574 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init), 2575 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init), 2576 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init), 2577 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init), 2578 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init), 2579 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init), 2580 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init), 2581 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init), 2582 #endif 2583 }; 2584 2585 DEFINE_TYPES(riscv_cpu_type_infos) 2586