1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "exec/exec-all.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/error-report.h" 31 #include "hw/qdev-properties.h" 32 #include "hw/core/qdev-prop-internal.h" 33 #include "migration/vmstate.h" 34 #include "fpu/softfloat-helpers.h" 35 #include "sysemu/device_tree.h" 36 #include "sysemu/kvm.h" 37 #include "sysemu/tcg.h" 38 #include "kvm/kvm_riscv.h" 39 #include "tcg/tcg-cpu.h" 40 #include "tcg/tcg.h" 41 42 /* RISC-V CPU definitions */ 43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH"; 44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 45 RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0}; 46 47 /* 48 * From vector_helper.c 49 * Note that vector data is stored in host-endian 64-bit chunks, 50 * so addressing bytes needs a host-endian fixup. 51 */ 52 #if HOST_BIG_ENDIAN 53 #define BYTE(x) ((x) ^ 7) 54 #else 55 #define BYTE(x) (x) 56 #endif 57 58 bool riscv_cpu_is_32bit(RISCVCPU *cpu) 59 { 60 return riscv_cpu_mxl(&cpu->env) == MXL_RV32; 61 } 62 63 /* Hash that stores general user set numeric options */ 64 static GHashTable *general_user_opts; 65 66 static void cpu_option_add_user_setting(const char *optname, uint32_t value) 67 { 68 g_hash_table_insert(general_user_opts, (gpointer)optname, 69 GUINT_TO_POINTER(value)); 70 } 71 72 bool riscv_cpu_option_set(const char *optname) 73 { 74 return g_hash_table_contains(general_user_opts, optname); 75 } 76 77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 78 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 79 80 /* 81 * Here are the ordering rules of extension naming defined by RISC-V 82 * specification : 83 * 1. All extensions should be separated from other multi-letter extensions 84 * by an underscore. 85 * 2. The first letter following the 'Z' conventionally indicates the most 86 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 87 * If multiple 'Z' extensions are named, they should be ordered first 88 * by category, then alphabetically within a category. 89 * 3. Standard supervisor-level extensions (starts with 'S') should be 90 * listed after standard unprivileged extensions. If multiple 91 * supervisor-level extensions are listed, they should be ordered 92 * alphabetically. 93 * 4. Non-standard extensions (starts with 'X') must be listed after all 94 * standard extensions. They must be separated from other multi-letter 95 * extensions by an underscore. 96 * 97 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 98 * instead. 99 */ 100 const RISCVIsaExtData isa_edata_arr[] = { 101 ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b), 102 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 103 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop), 104 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 105 ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, ext_always_enabled), 106 ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, ext_always_enabled), 107 ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, ext_always_enabled), 108 ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, ext_always_enabled), 109 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 110 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 111 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 112 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 113 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 114 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 115 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 116 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 117 ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, ext_always_enabled), 118 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), 119 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), 120 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), 121 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 122 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 123 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 124 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 125 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 126 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 127 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 128 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 129 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 130 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 131 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 132 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 133 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 134 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 135 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 136 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 137 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 138 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 139 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 140 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 141 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 142 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 143 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 144 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 145 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 146 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 147 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 148 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 149 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 150 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 151 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 152 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 153 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 154 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 155 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 156 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 157 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 158 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 159 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 160 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 161 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 162 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 163 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 164 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 165 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 166 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 167 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 168 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 169 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 170 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 171 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 172 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 173 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 174 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 175 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 176 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 177 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 178 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 179 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 180 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 181 ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, ext_always_enabled), 182 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 183 ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, ext_always_enabled), 184 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 185 ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, ext_always_enabled), 186 ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, ext_always_enabled), 187 ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade), 188 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 189 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 190 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 191 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 192 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 193 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 194 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 195 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 196 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 197 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 198 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 199 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 200 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 201 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 202 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 203 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 204 205 DEFINE_PROP_END_OF_LIST(), 206 }; 207 208 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 209 { 210 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 211 212 return *ext_enabled; 213 } 214 215 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 216 { 217 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 218 219 *ext_enabled = en; 220 } 221 222 bool riscv_cpu_is_vendor(Object *cpu_obj) 223 { 224 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; 225 } 226 227 const char * const riscv_int_regnames[] = { 228 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 229 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 230 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 231 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 232 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 233 }; 234 235 const char * const riscv_int_regnamesh[] = { 236 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 237 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 238 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 239 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 240 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 241 "x30h/t5h", "x31h/t6h" 242 }; 243 244 const char * const riscv_fpr_regnames[] = { 245 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 246 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 247 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 248 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 249 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 250 "f30/ft10", "f31/ft11" 251 }; 252 253 const char * const riscv_rvv_regnames[] = { 254 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 255 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 256 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 257 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 258 "v28", "v29", "v30", "v31" 259 }; 260 261 static const char * const riscv_excp_names[] = { 262 "misaligned_fetch", 263 "fault_fetch", 264 "illegal_instruction", 265 "breakpoint", 266 "misaligned_load", 267 "fault_load", 268 "misaligned_store", 269 "fault_store", 270 "user_ecall", 271 "supervisor_ecall", 272 "hypervisor_ecall", 273 "machine_ecall", 274 "exec_page_fault", 275 "load_page_fault", 276 "reserved", 277 "store_page_fault", 278 "reserved", 279 "reserved", 280 "reserved", 281 "reserved", 282 "guest_exec_page_fault", 283 "guest_load_page_fault", 284 "reserved", 285 "guest_store_page_fault", 286 }; 287 288 static const char * const riscv_intr_names[] = { 289 "u_software", 290 "s_software", 291 "vs_software", 292 "m_software", 293 "u_timer", 294 "s_timer", 295 "vs_timer", 296 "m_timer", 297 "u_external", 298 "s_external", 299 "vs_external", 300 "m_external", 301 "reserved", 302 "reserved", 303 "reserved", 304 "reserved" 305 }; 306 307 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 308 { 309 if (async) { 310 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 311 riscv_intr_names[cause] : "(unknown)"; 312 } else { 313 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 314 riscv_excp_names[cause] : "(unknown)"; 315 } 316 } 317 318 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) 319 { 320 env->misa_ext_mask = env->misa_ext = ext; 321 } 322 323 int riscv_cpu_max_xlen(RISCVCPUClass *mcc) 324 { 325 return 16 << mcc->misa_mxl_max; 326 } 327 328 #ifndef CONFIG_USER_ONLY 329 static uint8_t satp_mode_from_str(const char *satp_mode_str) 330 { 331 if (!strncmp(satp_mode_str, "mbare", 5)) { 332 return VM_1_10_MBARE; 333 } 334 335 if (!strncmp(satp_mode_str, "sv32", 4)) { 336 return VM_1_10_SV32; 337 } 338 339 if (!strncmp(satp_mode_str, "sv39", 4)) { 340 return VM_1_10_SV39; 341 } 342 343 if (!strncmp(satp_mode_str, "sv48", 4)) { 344 return VM_1_10_SV48; 345 } 346 347 if (!strncmp(satp_mode_str, "sv57", 4)) { 348 return VM_1_10_SV57; 349 } 350 351 if (!strncmp(satp_mode_str, "sv64", 4)) { 352 return VM_1_10_SV64; 353 } 354 355 g_assert_not_reached(); 356 } 357 358 uint8_t satp_mode_max_from_map(uint32_t map) 359 { 360 /* 361 * 'map = 0' will make us return (31 - 32), which C will 362 * happily overflow to UINT_MAX. There's no good result to 363 * return if 'map = 0' (e.g. returning 0 will be ambiguous 364 * with the result for 'map = 1'). 365 * 366 * Assert out if map = 0. Callers will have to deal with 367 * it outside of this function. 368 */ 369 g_assert(map > 0); 370 371 /* map here has at least one bit set, so no problem with clz */ 372 return 31 - __builtin_clz(map); 373 } 374 375 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 376 { 377 if (is_32_bit) { 378 switch (satp_mode) { 379 case VM_1_10_SV32: 380 return "sv32"; 381 case VM_1_10_MBARE: 382 return "none"; 383 } 384 } else { 385 switch (satp_mode) { 386 case VM_1_10_SV64: 387 return "sv64"; 388 case VM_1_10_SV57: 389 return "sv57"; 390 case VM_1_10_SV48: 391 return "sv48"; 392 case VM_1_10_SV39: 393 return "sv39"; 394 case VM_1_10_MBARE: 395 return "none"; 396 } 397 } 398 399 g_assert_not_reached(); 400 } 401 402 static void set_satp_mode_max_supported(RISCVCPU *cpu, 403 uint8_t satp_mode) 404 { 405 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 406 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 407 408 for (int i = 0; i <= satp_mode; ++i) { 409 if (valid_vm[i]) { 410 cpu->cfg.satp_mode.supported |= (1 << i); 411 } 412 } 413 } 414 415 /* Set the satp mode to the max supported */ 416 static void set_satp_mode_default_map(RISCVCPU *cpu) 417 { 418 /* 419 * Bare CPUs do not default to the max available. 420 * Users must set a valid satp_mode in the command 421 * line. 422 */ 423 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { 424 warn_report("No satp mode set. Defaulting to 'bare'"); 425 cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE); 426 return; 427 } 428 429 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 430 } 431 #endif 432 433 static void riscv_any_cpu_init(Object *obj) 434 { 435 RISCVCPU *cpu = RISCV_CPU(obj); 436 CPURISCVState *env = &cpu->env; 437 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 438 439 #ifndef CONFIG_USER_ONLY 440 set_satp_mode_max_supported(RISCV_CPU(obj), 441 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 442 VM_1_10_SV32 : VM_1_10_SV57); 443 #endif 444 445 env->priv_ver = PRIV_VERSION_LATEST; 446 447 /* inherited from parent obj via riscv_cpu_init() */ 448 cpu->cfg.ext_zifencei = true; 449 cpu->cfg.ext_zicsr = true; 450 cpu->cfg.mmu = true; 451 cpu->cfg.pmp = true; 452 } 453 454 static void riscv_max_cpu_init(Object *obj) 455 { 456 RISCVCPU *cpu = RISCV_CPU(obj); 457 CPURISCVState *env = &cpu->env; 458 459 cpu->cfg.mmu = true; 460 cpu->cfg.pmp = true; 461 462 env->priv_ver = PRIV_VERSION_LATEST; 463 #ifndef CONFIG_USER_ONLY 464 #ifdef TARGET_RISCV32 465 set_satp_mode_max_supported(cpu, VM_1_10_SV32); 466 #else 467 set_satp_mode_max_supported(cpu, VM_1_10_SV57); 468 #endif 469 #endif 470 } 471 472 #if defined(TARGET_RISCV64) 473 static void rv64_base_cpu_init(Object *obj) 474 { 475 RISCVCPU *cpu = RISCV_CPU(obj); 476 CPURISCVState *env = &cpu->env; 477 478 cpu->cfg.mmu = true; 479 cpu->cfg.pmp = true; 480 481 /* Set latest version of privileged specification */ 482 env->priv_ver = PRIV_VERSION_LATEST; 483 #ifndef CONFIG_USER_ONLY 484 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 485 #endif 486 } 487 488 static void rv64_sifive_u_cpu_init(Object *obj) 489 { 490 RISCVCPU *cpu = RISCV_CPU(obj); 491 CPURISCVState *env = &cpu->env; 492 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 493 env->priv_ver = PRIV_VERSION_1_10_0; 494 #ifndef CONFIG_USER_ONLY 495 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 496 #endif 497 498 /* inherited from parent obj via riscv_cpu_init() */ 499 cpu->cfg.ext_zifencei = true; 500 cpu->cfg.ext_zicsr = true; 501 cpu->cfg.mmu = true; 502 cpu->cfg.pmp = true; 503 } 504 505 static void rv64_sifive_e_cpu_init(Object *obj) 506 { 507 CPURISCVState *env = &RISCV_CPU(obj)->env; 508 RISCVCPU *cpu = RISCV_CPU(obj); 509 510 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 511 env->priv_ver = PRIV_VERSION_1_10_0; 512 #ifndef CONFIG_USER_ONLY 513 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 514 #endif 515 516 /* inherited from parent obj via riscv_cpu_init() */ 517 cpu->cfg.ext_zifencei = true; 518 cpu->cfg.ext_zicsr = true; 519 cpu->cfg.pmp = true; 520 } 521 522 static void rv64_thead_c906_cpu_init(Object *obj) 523 { 524 CPURISCVState *env = &RISCV_CPU(obj)->env; 525 RISCVCPU *cpu = RISCV_CPU(obj); 526 527 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU); 528 env->priv_ver = PRIV_VERSION_1_11_0; 529 530 cpu->cfg.ext_zfa = true; 531 cpu->cfg.ext_zfh = true; 532 cpu->cfg.mmu = true; 533 cpu->cfg.ext_xtheadba = true; 534 cpu->cfg.ext_xtheadbb = true; 535 cpu->cfg.ext_xtheadbs = true; 536 cpu->cfg.ext_xtheadcmo = true; 537 cpu->cfg.ext_xtheadcondmov = true; 538 cpu->cfg.ext_xtheadfmemidx = true; 539 cpu->cfg.ext_xtheadmac = true; 540 cpu->cfg.ext_xtheadmemidx = true; 541 cpu->cfg.ext_xtheadmempair = true; 542 cpu->cfg.ext_xtheadsync = true; 543 544 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 545 #ifndef CONFIG_USER_ONLY 546 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 547 #endif 548 549 /* inherited from parent obj via riscv_cpu_init() */ 550 cpu->cfg.pmp = true; 551 } 552 553 static void rv64_veyron_v1_cpu_init(Object *obj) 554 { 555 CPURISCVState *env = &RISCV_CPU(obj)->env; 556 RISCVCPU *cpu = RISCV_CPU(obj); 557 558 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH); 559 env->priv_ver = PRIV_VERSION_1_12_0; 560 561 /* Enable ISA extensions */ 562 cpu->cfg.mmu = true; 563 cpu->cfg.ext_zifencei = true; 564 cpu->cfg.ext_zicsr = true; 565 cpu->cfg.pmp = true; 566 cpu->cfg.ext_zicbom = true; 567 cpu->cfg.cbom_blocksize = 64; 568 cpu->cfg.cboz_blocksize = 64; 569 cpu->cfg.ext_zicboz = true; 570 cpu->cfg.ext_smaia = true; 571 cpu->cfg.ext_ssaia = true; 572 cpu->cfg.ext_sscofpmf = true; 573 cpu->cfg.ext_sstc = true; 574 cpu->cfg.ext_svinval = true; 575 cpu->cfg.ext_svnapot = true; 576 cpu->cfg.ext_svpbmt = true; 577 cpu->cfg.ext_smstateen = true; 578 cpu->cfg.ext_zba = true; 579 cpu->cfg.ext_zbb = true; 580 cpu->cfg.ext_zbc = true; 581 cpu->cfg.ext_zbs = true; 582 cpu->cfg.ext_XVentanaCondOps = true; 583 584 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 585 cpu->cfg.marchid = VEYRON_V1_MARCHID; 586 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 587 588 #ifndef CONFIG_USER_ONLY 589 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 590 #endif 591 } 592 593 static void rv128_base_cpu_init(Object *obj) 594 { 595 RISCVCPU *cpu = RISCV_CPU(obj); 596 CPURISCVState *env = &cpu->env; 597 598 if (qemu_tcg_mttcg_enabled()) { 599 /* Missing 128-bit aligned atomics */ 600 error_report("128-bit RISC-V currently does not work with Multi " 601 "Threaded TCG. Please use: -accel tcg,thread=single"); 602 exit(EXIT_FAILURE); 603 } 604 605 cpu->cfg.mmu = true; 606 cpu->cfg.pmp = true; 607 608 /* Set latest version of privileged specification */ 609 env->priv_ver = PRIV_VERSION_LATEST; 610 #ifndef CONFIG_USER_ONLY 611 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 612 #endif 613 } 614 615 static void rv64i_bare_cpu_init(Object *obj) 616 { 617 CPURISCVState *env = &RISCV_CPU(obj)->env; 618 riscv_cpu_set_misa_ext(env, RVI); 619 } 620 621 static void rv64e_bare_cpu_init(Object *obj) 622 { 623 CPURISCVState *env = &RISCV_CPU(obj)->env; 624 riscv_cpu_set_misa_ext(env, RVE); 625 } 626 #else 627 static void rv32_base_cpu_init(Object *obj) 628 { 629 RISCVCPU *cpu = RISCV_CPU(obj); 630 CPURISCVState *env = &cpu->env; 631 632 cpu->cfg.mmu = true; 633 cpu->cfg.pmp = true; 634 635 /* Set latest version of privileged specification */ 636 env->priv_ver = PRIV_VERSION_LATEST; 637 #ifndef CONFIG_USER_ONLY 638 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 639 #endif 640 } 641 642 static void rv32_sifive_u_cpu_init(Object *obj) 643 { 644 RISCVCPU *cpu = RISCV_CPU(obj); 645 CPURISCVState *env = &cpu->env; 646 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 647 env->priv_ver = PRIV_VERSION_1_10_0; 648 #ifndef CONFIG_USER_ONLY 649 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 650 #endif 651 652 /* inherited from parent obj via riscv_cpu_init() */ 653 cpu->cfg.ext_zifencei = true; 654 cpu->cfg.ext_zicsr = true; 655 cpu->cfg.mmu = true; 656 cpu->cfg.pmp = true; 657 } 658 659 static void rv32_sifive_e_cpu_init(Object *obj) 660 { 661 CPURISCVState *env = &RISCV_CPU(obj)->env; 662 RISCVCPU *cpu = RISCV_CPU(obj); 663 664 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 665 env->priv_ver = PRIV_VERSION_1_10_0; 666 #ifndef CONFIG_USER_ONLY 667 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 668 #endif 669 670 /* inherited from parent obj via riscv_cpu_init() */ 671 cpu->cfg.ext_zifencei = true; 672 cpu->cfg.ext_zicsr = true; 673 cpu->cfg.pmp = true; 674 } 675 676 static void rv32_ibex_cpu_init(Object *obj) 677 { 678 CPURISCVState *env = &RISCV_CPU(obj)->env; 679 RISCVCPU *cpu = RISCV_CPU(obj); 680 681 riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU); 682 env->priv_ver = PRIV_VERSION_1_12_0; 683 #ifndef CONFIG_USER_ONLY 684 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 685 #endif 686 /* inherited from parent obj via riscv_cpu_init() */ 687 cpu->cfg.ext_zifencei = true; 688 cpu->cfg.ext_zicsr = true; 689 cpu->cfg.pmp = true; 690 cpu->cfg.ext_smepmp = true; 691 } 692 693 static void rv32_imafcu_nommu_cpu_init(Object *obj) 694 { 695 CPURISCVState *env = &RISCV_CPU(obj)->env; 696 RISCVCPU *cpu = RISCV_CPU(obj); 697 698 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU); 699 env->priv_ver = PRIV_VERSION_1_10_0; 700 #ifndef CONFIG_USER_ONLY 701 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 702 #endif 703 704 /* inherited from parent obj via riscv_cpu_init() */ 705 cpu->cfg.ext_zifencei = true; 706 cpu->cfg.ext_zicsr = true; 707 cpu->cfg.pmp = true; 708 } 709 710 static void rv32i_bare_cpu_init(Object *obj) 711 { 712 CPURISCVState *env = &RISCV_CPU(obj)->env; 713 riscv_cpu_set_misa_ext(env, RVI); 714 } 715 716 static void rv32e_bare_cpu_init(Object *obj) 717 { 718 CPURISCVState *env = &RISCV_CPU(obj)->env; 719 riscv_cpu_set_misa_ext(env, RVE); 720 } 721 #endif 722 723 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 724 { 725 ObjectClass *oc; 726 char *typename; 727 char **cpuname; 728 729 cpuname = g_strsplit(cpu_model, ",", 1); 730 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 731 oc = object_class_by_name(typename); 732 g_strfreev(cpuname); 733 g_free(typename); 734 735 return oc; 736 } 737 738 char *riscv_cpu_get_name(RISCVCPU *cpu) 739 { 740 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 741 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 742 743 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 744 745 return cpu_model_from_type(typename); 746 } 747 748 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 749 { 750 RISCVCPU *cpu = RISCV_CPU(cs); 751 CPURISCVState *env = &cpu->env; 752 int i, j; 753 uint8_t *p; 754 755 #if !defined(CONFIG_USER_ONLY) 756 if (riscv_has_ext(env, RVH)) { 757 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 758 } 759 #endif 760 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 761 #ifndef CONFIG_USER_ONLY 762 { 763 static const int dump_csrs[] = { 764 CSR_MHARTID, 765 CSR_MSTATUS, 766 CSR_MSTATUSH, 767 /* 768 * CSR_SSTATUS is intentionally omitted here as its value 769 * can be figured out by looking at CSR_MSTATUS 770 */ 771 CSR_HSTATUS, 772 CSR_VSSTATUS, 773 CSR_MIP, 774 CSR_MIE, 775 CSR_MIDELEG, 776 CSR_HIDELEG, 777 CSR_MEDELEG, 778 CSR_HEDELEG, 779 CSR_MTVEC, 780 CSR_STVEC, 781 CSR_VSTVEC, 782 CSR_MEPC, 783 CSR_SEPC, 784 CSR_VSEPC, 785 CSR_MCAUSE, 786 CSR_SCAUSE, 787 CSR_VSCAUSE, 788 CSR_MTVAL, 789 CSR_STVAL, 790 CSR_HTVAL, 791 CSR_MTVAL2, 792 CSR_MSCRATCH, 793 CSR_SSCRATCH, 794 CSR_SATP, 795 CSR_MMTE, 796 CSR_UPMBASE, 797 CSR_UPMMASK, 798 CSR_SPMBASE, 799 CSR_SPMMASK, 800 CSR_MPMBASE, 801 CSR_MPMMASK, 802 }; 803 804 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 805 int csrno = dump_csrs[i]; 806 target_ulong val = 0; 807 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 808 809 /* 810 * Rely on the smode, hmode, etc, predicates within csr.c 811 * to do the filtering of the registers that are present. 812 */ 813 if (res == RISCV_EXCP_NONE) { 814 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 815 csr_ops[csrno].name, val); 816 } 817 } 818 } 819 #endif 820 821 for (i = 0; i < 32; i++) { 822 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 823 riscv_int_regnames[i], env->gpr[i]); 824 if ((i & 3) == 3) { 825 qemu_fprintf(f, "\n"); 826 } 827 } 828 if (flags & CPU_DUMP_FPU) { 829 for (i = 0; i < 32; i++) { 830 qemu_fprintf(f, " %-8s %016" PRIx64, 831 riscv_fpr_regnames[i], env->fpr[i]); 832 if ((i & 3) == 3) { 833 qemu_fprintf(f, "\n"); 834 } 835 } 836 } 837 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 838 static const int dump_rvv_csrs[] = { 839 CSR_VSTART, 840 CSR_VXSAT, 841 CSR_VXRM, 842 CSR_VCSR, 843 CSR_VL, 844 CSR_VTYPE, 845 CSR_VLENB, 846 }; 847 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 848 int csrno = dump_rvv_csrs[i]; 849 target_ulong val = 0; 850 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 851 852 /* 853 * Rely on the smode, hmode, etc, predicates within csr.c 854 * to do the filtering of the registers that are present. 855 */ 856 if (res == RISCV_EXCP_NONE) { 857 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 858 csr_ops[csrno].name, val); 859 } 860 } 861 uint16_t vlenb = cpu->cfg.vlenb; 862 863 for (i = 0; i < 32; i++) { 864 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 865 p = (uint8_t *)env->vreg; 866 for (j = vlenb - 1 ; j >= 0; j--) { 867 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 868 } 869 qemu_fprintf(f, "\n"); 870 } 871 } 872 } 873 874 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 875 { 876 RISCVCPU *cpu = RISCV_CPU(cs); 877 CPURISCVState *env = &cpu->env; 878 879 if (env->xl == MXL_RV32) { 880 env->pc = (int32_t)value; 881 } else { 882 env->pc = value; 883 } 884 } 885 886 static vaddr riscv_cpu_get_pc(CPUState *cs) 887 { 888 RISCVCPU *cpu = RISCV_CPU(cs); 889 CPURISCVState *env = &cpu->env; 890 891 /* Match cpu_get_tb_cpu_state. */ 892 if (env->xl == MXL_RV32) { 893 return env->pc & UINT32_MAX; 894 } 895 return env->pc; 896 } 897 898 static bool riscv_cpu_has_work(CPUState *cs) 899 { 900 #ifndef CONFIG_USER_ONLY 901 RISCVCPU *cpu = RISCV_CPU(cs); 902 CPURISCVState *env = &cpu->env; 903 /* 904 * Definition of the WFI instruction requires it to ignore the privilege 905 * mode and delegation registers, but respect individual enables 906 */ 907 return riscv_cpu_all_pending(env) != 0 || 908 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 909 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 910 #else 911 return true; 912 #endif 913 } 914 915 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) 916 { 917 return riscv_env_mmu_index(cpu_env(cs), ifetch); 918 } 919 920 static void riscv_cpu_reset_hold(Object *obj) 921 { 922 #ifndef CONFIG_USER_ONLY 923 uint8_t iprio; 924 int i, irq, rdzero; 925 #endif 926 CPUState *cs = CPU(obj); 927 RISCVCPU *cpu = RISCV_CPU(cs); 928 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 929 CPURISCVState *env = &cpu->env; 930 931 if (mcc->parent_phases.hold) { 932 mcc->parent_phases.hold(obj); 933 } 934 #ifndef CONFIG_USER_ONLY 935 env->misa_mxl = mcc->misa_mxl_max; 936 env->priv = PRV_M; 937 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 938 if (env->misa_mxl > MXL_RV32) { 939 /* 940 * The reset status of SXL/UXL is undefined, but mstatus is WARL 941 * and we must ensure that the value after init is valid for read. 942 */ 943 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 944 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 945 if (riscv_has_ext(env, RVH)) { 946 env->vsstatus = set_field(env->vsstatus, 947 MSTATUS64_SXL, env->misa_mxl); 948 env->vsstatus = set_field(env->vsstatus, 949 MSTATUS64_UXL, env->misa_mxl); 950 env->mstatus_hs = set_field(env->mstatus_hs, 951 MSTATUS64_SXL, env->misa_mxl); 952 env->mstatus_hs = set_field(env->mstatus_hs, 953 MSTATUS64_UXL, env->misa_mxl); 954 } 955 } 956 env->mcause = 0; 957 env->miclaim = MIP_SGEIP; 958 env->pc = env->resetvec; 959 env->bins = 0; 960 env->two_stage_lookup = false; 961 962 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 963 (cpu->cfg.ext_svadu ? MENVCFG_ADUE : 0); 964 env->henvcfg = 0; 965 966 /* Initialized default priorities of local interrupts. */ 967 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 968 iprio = riscv_cpu_default_priority(i); 969 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 970 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 971 env->hviprio[i] = 0; 972 } 973 i = 0; 974 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 975 if (!rdzero) { 976 env->hviprio[irq] = env->miprio[irq]; 977 } 978 i++; 979 } 980 /* mmte is supposed to have pm.current hardwired to 1 */ 981 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 982 983 /* 984 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor 985 * extension is enabled. 986 */ 987 if (riscv_has_ext(env, RVH)) { 988 env->mideleg |= HS_MODE_INTERRUPTS; 989 } 990 991 /* 992 * Clear mseccfg and unlock all the PMP entries upon reset. 993 * This is allowed as per the priv and smepmp specifications 994 * and is needed to clear stale entries across reboots. 995 */ 996 if (riscv_cpu_cfg(env)->ext_smepmp) { 997 env->mseccfg = 0; 998 } 999 1000 pmp_unlock_entries(env); 1001 #endif 1002 env->xl = riscv_cpu_mxl(env); 1003 riscv_cpu_update_mask(env); 1004 cs->exception_index = RISCV_EXCP_NONE; 1005 env->load_res = -1; 1006 set_default_nan_mode(1, &env->fp_status); 1007 1008 #ifndef CONFIG_USER_ONLY 1009 if (cpu->cfg.debug) { 1010 riscv_trigger_reset_hold(env); 1011 } 1012 1013 if (kvm_enabled()) { 1014 kvm_riscv_reset_vcpu(cpu); 1015 } 1016 #endif 1017 } 1018 1019 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 1020 { 1021 RISCVCPU *cpu = RISCV_CPU(s); 1022 CPURISCVState *env = &cpu->env; 1023 info->target_info = &cpu->cfg; 1024 1025 switch (env->xl) { 1026 case MXL_RV32: 1027 info->print_insn = print_insn_riscv32; 1028 break; 1029 case MXL_RV64: 1030 info->print_insn = print_insn_riscv64; 1031 break; 1032 case MXL_RV128: 1033 info->print_insn = print_insn_riscv128; 1034 break; 1035 default: 1036 g_assert_not_reached(); 1037 } 1038 } 1039 1040 #ifndef CONFIG_USER_ONLY 1041 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1042 { 1043 bool rv32 = riscv_cpu_is_32bit(cpu); 1044 uint8_t satp_mode_map_max, satp_mode_supported_max; 1045 1046 /* The CPU wants the OS to decide which satp mode to use */ 1047 if (cpu->cfg.satp_mode.supported == 0) { 1048 return; 1049 } 1050 1051 satp_mode_supported_max = 1052 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1053 1054 if (cpu->cfg.satp_mode.map == 0) { 1055 if (cpu->cfg.satp_mode.init == 0) { 1056 /* If unset by the user, we fallback to the default satp mode. */ 1057 set_satp_mode_default_map(cpu); 1058 } else { 1059 /* 1060 * Find the lowest level that was disabled and then enable the 1061 * first valid level below which can be found in 1062 * valid_vm_1_10_32/64. 1063 */ 1064 for (int i = 1; i < 16; ++i) { 1065 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1066 (cpu->cfg.satp_mode.supported & (1 << i))) { 1067 for (int j = i - 1; j >= 0; --j) { 1068 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1069 cpu->cfg.satp_mode.map |= (1 << j); 1070 break; 1071 } 1072 } 1073 break; 1074 } 1075 } 1076 } 1077 } 1078 1079 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1080 1081 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1082 if (satp_mode_map_max > satp_mode_supported_max) { 1083 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1084 satp_mode_str(satp_mode_map_max, rv32), 1085 satp_mode_str(satp_mode_supported_max, rv32)); 1086 return; 1087 } 1088 1089 /* 1090 * Make sure the user did not ask for an invalid configuration as per 1091 * the specification. 1092 */ 1093 if (!rv32) { 1094 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1095 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1096 (cpu->cfg.satp_mode.init & (1 << i)) && 1097 (cpu->cfg.satp_mode.supported & (1 << i))) { 1098 error_setg(errp, "cannot disable %s satp mode if %s " 1099 "is enabled", satp_mode_str(i, false), 1100 satp_mode_str(satp_mode_map_max, false)); 1101 return; 1102 } 1103 } 1104 } 1105 1106 /* Finally expand the map so that all valid modes are set */ 1107 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1108 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1109 cpu->cfg.satp_mode.map |= (1 << i); 1110 } 1111 } 1112 } 1113 #endif 1114 1115 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1116 { 1117 Error *local_err = NULL; 1118 1119 #ifndef CONFIG_USER_ONLY 1120 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1121 if (local_err != NULL) { 1122 error_propagate(errp, local_err); 1123 return; 1124 } 1125 #endif 1126 1127 if (tcg_enabled()) { 1128 riscv_tcg_cpu_finalize_features(cpu, &local_err); 1129 if (local_err != NULL) { 1130 error_propagate(errp, local_err); 1131 return; 1132 } 1133 } else if (kvm_enabled()) { 1134 riscv_kvm_cpu_finalize_features(cpu, &local_err); 1135 if (local_err != NULL) { 1136 error_propagate(errp, local_err); 1137 return; 1138 } 1139 } 1140 } 1141 1142 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1143 { 1144 CPUState *cs = CPU(dev); 1145 RISCVCPU *cpu = RISCV_CPU(dev); 1146 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1147 Error *local_err = NULL; 1148 1149 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) { 1150 warn_report("The 'any' CPU is deprecated and will be " 1151 "removed in the future."); 1152 } 1153 1154 cpu_exec_realizefn(cs, &local_err); 1155 if (local_err != NULL) { 1156 error_propagate(errp, local_err); 1157 return; 1158 } 1159 1160 riscv_cpu_finalize_features(cpu, &local_err); 1161 if (local_err != NULL) { 1162 error_propagate(errp, local_err); 1163 return; 1164 } 1165 1166 riscv_cpu_register_gdb_regs_for_features(cs); 1167 1168 #ifndef CONFIG_USER_ONLY 1169 if (cpu->cfg.debug) { 1170 riscv_trigger_realize(&cpu->env); 1171 } 1172 #endif 1173 1174 qemu_init_vcpu(cs); 1175 cpu_reset(cs); 1176 1177 mcc->parent_realize(dev, errp); 1178 } 1179 1180 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 1181 { 1182 if (tcg_enabled()) { 1183 return riscv_cpu_tcg_compatible(cpu); 1184 } 1185 1186 return true; 1187 } 1188 1189 #ifndef CONFIG_USER_ONLY 1190 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1191 void *opaque, Error **errp) 1192 { 1193 RISCVSATPMap *satp_map = opaque; 1194 uint8_t satp = satp_mode_from_str(name); 1195 bool value; 1196 1197 value = satp_map->map & (1 << satp); 1198 1199 visit_type_bool(v, name, &value, errp); 1200 } 1201 1202 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1203 void *opaque, Error **errp) 1204 { 1205 RISCVSATPMap *satp_map = opaque; 1206 uint8_t satp = satp_mode_from_str(name); 1207 bool value; 1208 1209 if (!visit_type_bool(v, name, &value, errp)) { 1210 return; 1211 } 1212 1213 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1214 satp_map->init |= 1 << satp; 1215 } 1216 1217 void riscv_add_satp_mode_properties(Object *obj) 1218 { 1219 RISCVCPU *cpu = RISCV_CPU(obj); 1220 1221 if (cpu->env.misa_mxl == MXL_RV32) { 1222 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1223 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1224 } else { 1225 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1226 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1227 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1228 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1229 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1230 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1231 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1232 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1233 } 1234 } 1235 1236 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1237 { 1238 RISCVCPU *cpu = RISCV_CPU(opaque); 1239 CPURISCVState *env = &cpu->env; 1240 1241 if (irq < IRQ_LOCAL_MAX) { 1242 switch (irq) { 1243 case IRQ_U_SOFT: 1244 case IRQ_S_SOFT: 1245 case IRQ_VS_SOFT: 1246 case IRQ_M_SOFT: 1247 case IRQ_U_TIMER: 1248 case IRQ_S_TIMER: 1249 case IRQ_VS_TIMER: 1250 case IRQ_M_TIMER: 1251 case IRQ_U_EXT: 1252 case IRQ_VS_EXT: 1253 case IRQ_M_EXT: 1254 if (kvm_enabled()) { 1255 kvm_riscv_set_irq(cpu, irq, level); 1256 } else { 1257 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1258 } 1259 break; 1260 case IRQ_S_EXT: 1261 if (kvm_enabled()) { 1262 kvm_riscv_set_irq(cpu, irq, level); 1263 } else { 1264 env->external_seip = level; 1265 riscv_cpu_update_mip(env, 1 << irq, 1266 BOOL_TO_MASK(level | env->software_seip)); 1267 } 1268 break; 1269 default: 1270 g_assert_not_reached(); 1271 } 1272 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1273 /* Require H-extension for handling guest local interrupts */ 1274 if (!riscv_has_ext(env, RVH)) { 1275 g_assert_not_reached(); 1276 } 1277 1278 /* Compute bit position in HGEIP CSR */ 1279 irq = irq - IRQ_LOCAL_MAX + 1; 1280 if (env->geilen < irq) { 1281 g_assert_not_reached(); 1282 } 1283 1284 /* Update HGEIP CSR */ 1285 env->hgeip &= ~((target_ulong)1 << irq); 1286 if (level) { 1287 env->hgeip |= (target_ulong)1 << irq; 1288 } 1289 1290 /* Update mip.SGEIP bit */ 1291 riscv_cpu_update_mip(env, MIP_SGEIP, 1292 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1293 } else { 1294 g_assert_not_reached(); 1295 } 1296 } 1297 #endif /* CONFIG_USER_ONLY */ 1298 1299 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1300 { 1301 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1302 } 1303 1304 static void riscv_cpu_post_init(Object *obj) 1305 { 1306 accel_cpu_instance_init(CPU(obj)); 1307 } 1308 1309 static void riscv_cpu_init(Object *obj) 1310 { 1311 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1312 RISCVCPU *cpu = RISCV_CPU(obj); 1313 CPURISCVState *env = &cpu->env; 1314 1315 env->misa_mxl = mcc->misa_mxl_max; 1316 1317 #ifndef CONFIG_USER_ONLY 1318 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1319 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1320 #endif /* CONFIG_USER_ONLY */ 1321 1322 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal); 1323 1324 /* 1325 * The timer and performance counters extensions were supported 1326 * in QEMU before they were added as discrete extensions in the 1327 * ISA. To keep compatibility we'll always default them to 'true' 1328 * for all CPUs. Each accelerator will decide what to do when 1329 * users disable them. 1330 */ 1331 RISCV_CPU(obj)->cfg.ext_zicntr = true; 1332 RISCV_CPU(obj)->cfg.ext_zihpm = true; 1333 1334 /* Default values for non-bool cpu properties */ 1335 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); 1336 cpu->cfg.vlenb = 128 >> 3; 1337 cpu->cfg.elen = 64; 1338 cpu->cfg.cbom_blocksize = 64; 1339 cpu->cfg.cbop_blocksize = 64; 1340 cpu->cfg.cboz_blocksize = 64; 1341 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1342 } 1343 1344 static void riscv_bare_cpu_init(Object *obj) 1345 { 1346 RISCVCPU *cpu = RISCV_CPU(obj); 1347 1348 /* 1349 * Bare CPUs do not inherit the timer and performance 1350 * counters from the parent class (see riscv_cpu_init() 1351 * for info on why the parent enables them). 1352 * 1353 * Users have to explicitly enable these counters for 1354 * bare CPUs. 1355 */ 1356 cpu->cfg.ext_zicntr = false; 1357 cpu->cfg.ext_zihpm = false; 1358 1359 /* Set to QEMU's first supported priv version */ 1360 cpu->env.priv_ver = PRIV_VERSION_1_10_0; 1361 1362 /* 1363 * Support all available satp_mode settings. The default 1364 * value will be set to MBARE if the user doesn't set 1365 * satp_mode manually (see set_satp_mode_default()). 1366 */ 1367 #ifndef CONFIG_USER_ONLY 1368 set_satp_mode_max_supported(cpu, VM_1_10_SV64); 1369 #endif 1370 } 1371 1372 typedef struct misa_ext_info { 1373 const char *name; 1374 const char *description; 1375 } MISAExtInfo; 1376 1377 #define MISA_INFO_IDX(_bit) \ 1378 __builtin_ctz(_bit) 1379 1380 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1381 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1382 1383 static const MISAExtInfo misa_ext_info_arr[] = { 1384 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1385 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1386 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1387 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1388 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1389 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1390 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1391 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1392 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1393 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1394 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), 1395 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1396 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1397 MISA_EXT_INFO(RVB, "x-b", "Bit manipulation (Zba_Zbb_Zbs)") 1398 }; 1399 1400 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) 1401 { 1402 CPUClass *cc = CPU_CLASS(mcc); 1403 1404 /* Validate that MISA_MXL is set properly. */ 1405 switch (mcc->misa_mxl_max) { 1406 #ifdef TARGET_RISCV64 1407 case MXL_RV64: 1408 case MXL_RV128: 1409 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1410 break; 1411 #endif 1412 case MXL_RV32: 1413 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1414 break; 1415 default: 1416 g_assert_not_reached(); 1417 } 1418 } 1419 1420 static int riscv_validate_misa_info_idx(uint32_t bit) 1421 { 1422 int idx; 1423 1424 /* 1425 * Our lowest valid input (RVA) is 1 and 1426 * __builtin_ctz() is UB with zero. 1427 */ 1428 g_assert(bit != 0); 1429 idx = MISA_INFO_IDX(bit); 1430 1431 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1432 return idx; 1433 } 1434 1435 const char *riscv_get_misa_ext_name(uint32_t bit) 1436 { 1437 int idx = riscv_validate_misa_info_idx(bit); 1438 const char *val = misa_ext_info_arr[idx].name; 1439 1440 g_assert(val != NULL); 1441 return val; 1442 } 1443 1444 const char *riscv_get_misa_ext_description(uint32_t bit) 1445 { 1446 int idx = riscv_validate_misa_info_idx(bit); 1447 const char *val = misa_ext_info_arr[idx].description; 1448 1449 g_assert(val != NULL); 1450 return val; 1451 } 1452 1453 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1454 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1455 .enabled = _defval} 1456 1457 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1458 /* Defaults for standard extensions */ 1459 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1460 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1461 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1462 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1463 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1464 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), 1465 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1466 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1467 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1468 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1469 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1470 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1471 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1472 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1473 1474 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1475 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1476 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1477 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1478 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1479 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1480 1481 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1482 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1483 1484 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1485 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1486 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1487 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1488 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1489 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1490 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1491 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1492 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1493 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1494 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1495 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1496 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1497 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1498 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1499 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1500 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1501 1502 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1503 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1504 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1505 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1506 1507 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1508 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true), 1509 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1510 1511 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1512 1513 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1514 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1515 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1516 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1517 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1518 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1519 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1520 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1521 1522 /* Vector cryptography extensions */ 1523 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1524 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1525 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkg, false), 1526 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1527 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1528 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1529 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1530 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1531 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1532 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1533 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1534 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1535 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1536 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1537 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1538 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1539 1540 DEFINE_PROP_END_OF_LIST(), 1541 }; 1542 1543 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1544 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1545 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1546 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1547 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1548 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1549 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1550 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1551 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1552 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1553 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1554 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1555 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1556 1557 DEFINE_PROP_END_OF_LIST(), 1558 }; 1559 1560 /* These are experimental so mark with 'x-' */ 1561 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1562 MULTI_EXT_CFG_BOOL("x-smaia", ext_smaia, false), 1563 MULTI_EXT_CFG_BOOL("x-ssaia", ext_ssaia, false), 1564 1565 MULTI_EXT_CFG_BOOL("x-zaamo", ext_zaamo, false), 1566 MULTI_EXT_CFG_BOOL("x-zalrsc", ext_zalrsc, false), 1567 1568 MULTI_EXT_CFG_BOOL("x-zvfh", ext_zvfh, false), 1569 MULTI_EXT_CFG_BOOL("x-zvfhmin", ext_zvfhmin, false), 1570 1571 MULTI_EXT_CFG_BOOL("x-zfbfmin", ext_zfbfmin, false), 1572 MULTI_EXT_CFG_BOOL("x-zvfbfmin", ext_zvfbfmin, false), 1573 MULTI_EXT_CFG_BOOL("x-zvfbfwma", ext_zvfbfwma, false), 1574 1575 DEFINE_PROP_END_OF_LIST(), 1576 }; 1577 1578 #define ALWAYS_ENABLED_FEATURE(_name) \ 1579 {.name = _name, \ 1580 .offset = CPU_CFG_OFFSET(ext_always_enabled), \ 1581 .enabled = true} 1582 1583 /* 1584 * 'Named features' is the name we give to extensions that we 1585 * don't want to expose to users. They are either immutable 1586 * (always enabled/disable) or they'll vary depending on 1587 * the resulting CPU state. They have riscv,isa strings 1588 * and priv_ver like regular extensions. 1589 */ 1590 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { 1591 MULTI_EXT_CFG_BOOL("svade", ext_svade, true), 1592 MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true), 1593 1594 /* 1595 * cache-related extensions that are always enabled 1596 * in TCG since QEMU RISC-V does not have a cache 1597 * model. 1598 */ 1599 ALWAYS_ENABLED_FEATURE("za64rs"), 1600 ALWAYS_ENABLED_FEATURE("ziccif"), 1601 ALWAYS_ENABLED_FEATURE("ziccrse"), 1602 ALWAYS_ENABLED_FEATURE("ziccamoa"), 1603 ALWAYS_ENABLED_FEATURE("zicclsm"), 1604 ALWAYS_ENABLED_FEATURE("ssccptr"), 1605 1606 /* Other named features that TCG always implements */ 1607 ALWAYS_ENABLED_FEATURE("sstvecd"), 1608 ALWAYS_ENABLED_FEATURE("sstvala"), 1609 ALWAYS_ENABLED_FEATURE("sscounterenw"), 1610 1611 DEFINE_PROP_END_OF_LIST(), 1612 }; 1613 1614 /* Deprecated entries marked for future removal */ 1615 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1616 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1617 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1618 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1619 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1620 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1621 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1622 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1623 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1624 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1625 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1626 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1627 1628 DEFINE_PROP_END_OF_LIST(), 1629 }; 1630 1631 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname, 1632 Error **errp) 1633 { 1634 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1635 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'", 1636 cpuname, propname); 1637 } 1638 1639 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1640 void *opaque, Error **errp) 1641 { 1642 RISCVCPU *cpu = RISCV_CPU(obj); 1643 uint8_t pmu_num, curr_pmu_num; 1644 uint32_t pmu_mask; 1645 1646 visit_type_uint8(v, name, &pmu_num, errp); 1647 1648 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask); 1649 1650 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) { 1651 cpu_set_prop_err(cpu, name, errp); 1652 error_append_hint(errp, "Current '%s' val: %u\n", 1653 name, curr_pmu_num); 1654 return; 1655 } 1656 1657 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1658 error_setg(errp, "Number of counters exceeds maximum available"); 1659 return; 1660 } 1661 1662 if (pmu_num == 0) { 1663 pmu_mask = 0; 1664 } else { 1665 pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1666 } 1667 1668 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1669 cpu->cfg.pmu_mask = pmu_mask; 1670 cpu_option_add_user_setting("pmu-mask", pmu_mask); 1671 } 1672 1673 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1674 void *opaque, Error **errp) 1675 { 1676 RISCVCPU *cpu = RISCV_CPU(obj); 1677 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1678 1679 visit_type_uint8(v, name, &pmu_num, errp); 1680 } 1681 1682 static const PropertyInfo prop_pmu_num = { 1683 .name = "pmu-num", 1684 .get = prop_pmu_num_get, 1685 .set = prop_pmu_num_set, 1686 }; 1687 1688 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name, 1689 void *opaque, Error **errp) 1690 { 1691 RISCVCPU *cpu = RISCV_CPU(obj); 1692 uint32_t value; 1693 uint8_t pmu_num; 1694 1695 visit_type_uint32(v, name, &value, errp); 1696 1697 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) { 1698 cpu_set_prop_err(cpu, name, errp); 1699 error_append_hint(errp, "Current '%s' val: %x\n", 1700 name, cpu->cfg.pmu_mask); 1701 return; 1702 } 1703 1704 pmu_num = ctpop32(value); 1705 1706 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1707 error_setg(errp, "Number of counters exceeds maximum available"); 1708 return; 1709 } 1710 1711 cpu_option_add_user_setting(name, value); 1712 cpu->cfg.pmu_mask = value; 1713 } 1714 1715 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name, 1716 void *opaque, Error **errp) 1717 { 1718 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask; 1719 1720 visit_type_uint8(v, name, &pmu_mask, errp); 1721 } 1722 1723 static const PropertyInfo prop_pmu_mask = { 1724 .name = "pmu-mask", 1725 .get = prop_pmu_mask_get, 1726 .set = prop_pmu_mask_set, 1727 }; 1728 1729 static void prop_mmu_set(Object *obj, Visitor *v, const char *name, 1730 void *opaque, Error **errp) 1731 { 1732 RISCVCPU *cpu = RISCV_CPU(obj); 1733 bool value; 1734 1735 visit_type_bool(v, name, &value, errp); 1736 1737 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) { 1738 cpu_set_prop_err(cpu, "mmu", errp); 1739 return; 1740 } 1741 1742 cpu_option_add_user_setting(name, value); 1743 cpu->cfg.mmu = value; 1744 } 1745 1746 static void prop_mmu_get(Object *obj, Visitor *v, const char *name, 1747 void *opaque, Error **errp) 1748 { 1749 bool value = RISCV_CPU(obj)->cfg.mmu; 1750 1751 visit_type_bool(v, name, &value, errp); 1752 } 1753 1754 static const PropertyInfo prop_mmu = { 1755 .name = "mmu", 1756 .get = prop_mmu_get, 1757 .set = prop_mmu_set, 1758 }; 1759 1760 static void prop_pmp_set(Object *obj, Visitor *v, const char *name, 1761 void *opaque, Error **errp) 1762 { 1763 RISCVCPU *cpu = RISCV_CPU(obj); 1764 bool value; 1765 1766 visit_type_bool(v, name, &value, errp); 1767 1768 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) { 1769 cpu_set_prop_err(cpu, name, errp); 1770 return; 1771 } 1772 1773 cpu_option_add_user_setting(name, value); 1774 cpu->cfg.pmp = value; 1775 } 1776 1777 static void prop_pmp_get(Object *obj, Visitor *v, const char *name, 1778 void *opaque, Error **errp) 1779 { 1780 bool value = RISCV_CPU(obj)->cfg.pmp; 1781 1782 visit_type_bool(v, name, &value, errp); 1783 } 1784 1785 static const PropertyInfo prop_pmp = { 1786 .name = "pmp", 1787 .get = prop_pmp_get, 1788 .set = prop_pmp_set, 1789 }; 1790 1791 static int priv_spec_from_str(const char *priv_spec_str) 1792 { 1793 int priv_version = -1; 1794 1795 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) { 1796 priv_version = PRIV_VERSION_1_12_0; 1797 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) { 1798 priv_version = PRIV_VERSION_1_11_0; 1799 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) { 1800 priv_version = PRIV_VERSION_1_10_0; 1801 } 1802 1803 return priv_version; 1804 } 1805 1806 static const char *priv_spec_to_str(int priv_version) 1807 { 1808 switch (priv_version) { 1809 case PRIV_VERSION_1_10_0: 1810 return PRIV_VER_1_10_0_STR; 1811 case PRIV_VERSION_1_11_0: 1812 return PRIV_VER_1_11_0_STR; 1813 case PRIV_VERSION_1_12_0: 1814 return PRIV_VER_1_12_0_STR; 1815 default: 1816 return NULL; 1817 } 1818 } 1819 1820 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name, 1821 void *opaque, Error **errp) 1822 { 1823 RISCVCPU *cpu = RISCV_CPU(obj); 1824 g_autofree char *value = NULL; 1825 int priv_version = -1; 1826 1827 visit_type_str(v, name, &value, errp); 1828 1829 priv_version = priv_spec_from_str(value); 1830 if (priv_version < 0) { 1831 error_setg(errp, "Unsupported privilege spec version '%s'", value); 1832 return; 1833 } 1834 1835 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) { 1836 cpu_set_prop_err(cpu, name, errp); 1837 error_append_hint(errp, "Current '%s' val: %s\n", name, 1838 object_property_get_str(obj, name, NULL)); 1839 return; 1840 } 1841 1842 cpu_option_add_user_setting(name, priv_version); 1843 cpu->env.priv_ver = priv_version; 1844 } 1845 1846 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name, 1847 void *opaque, Error **errp) 1848 { 1849 RISCVCPU *cpu = RISCV_CPU(obj); 1850 const char *value = priv_spec_to_str(cpu->env.priv_ver); 1851 1852 visit_type_str(v, name, (char **)&value, errp); 1853 } 1854 1855 static const PropertyInfo prop_priv_spec = { 1856 .name = "priv_spec", 1857 .get = prop_priv_spec_get, 1858 .set = prop_priv_spec_set, 1859 }; 1860 1861 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name, 1862 void *opaque, Error **errp) 1863 { 1864 RISCVCPU *cpu = RISCV_CPU(obj); 1865 g_autofree char *value = NULL; 1866 1867 visit_type_str(v, name, &value, errp); 1868 1869 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) { 1870 error_setg(errp, "Unsupported vector spec version '%s'", value); 1871 return; 1872 } 1873 1874 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0); 1875 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1876 } 1877 1878 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name, 1879 void *opaque, Error **errp) 1880 { 1881 const char *value = VEXT_VER_1_00_0_STR; 1882 1883 visit_type_str(v, name, (char **)&value, errp); 1884 } 1885 1886 static const PropertyInfo prop_vext_spec = { 1887 .name = "vext_spec", 1888 .get = prop_vext_spec_get, 1889 .set = prop_vext_spec_set, 1890 }; 1891 1892 static void prop_vlen_set(Object *obj, Visitor *v, const char *name, 1893 void *opaque, Error **errp) 1894 { 1895 RISCVCPU *cpu = RISCV_CPU(obj); 1896 uint16_t value; 1897 1898 if (!visit_type_uint16(v, name, &value, errp)) { 1899 return; 1900 } 1901 1902 if (!is_power_of_2(value)) { 1903 error_setg(errp, "Vector extension VLEN must be power of 2"); 1904 return; 1905 } 1906 1907 if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) { 1908 cpu_set_prop_err(cpu, name, errp); 1909 error_append_hint(errp, "Current '%s' val: %u\n", 1910 name, cpu->cfg.vlenb << 3); 1911 return; 1912 } 1913 1914 cpu_option_add_user_setting(name, value); 1915 cpu->cfg.vlenb = value >> 3; 1916 } 1917 1918 static void prop_vlen_get(Object *obj, Visitor *v, const char *name, 1919 void *opaque, Error **errp) 1920 { 1921 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3; 1922 1923 visit_type_uint16(v, name, &value, errp); 1924 } 1925 1926 static const PropertyInfo prop_vlen = { 1927 .name = "vlen", 1928 .get = prop_vlen_get, 1929 .set = prop_vlen_set, 1930 }; 1931 1932 static void prop_elen_set(Object *obj, Visitor *v, const char *name, 1933 void *opaque, Error **errp) 1934 { 1935 RISCVCPU *cpu = RISCV_CPU(obj); 1936 uint16_t value; 1937 1938 if (!visit_type_uint16(v, name, &value, errp)) { 1939 return; 1940 } 1941 1942 if (!is_power_of_2(value)) { 1943 error_setg(errp, "Vector extension ELEN must be power of 2"); 1944 return; 1945 } 1946 1947 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) { 1948 cpu_set_prop_err(cpu, name, errp); 1949 error_append_hint(errp, "Current '%s' val: %u\n", 1950 name, cpu->cfg.elen); 1951 return; 1952 } 1953 1954 cpu_option_add_user_setting(name, value); 1955 cpu->cfg.elen = value; 1956 } 1957 1958 static void prop_elen_get(Object *obj, Visitor *v, const char *name, 1959 void *opaque, Error **errp) 1960 { 1961 uint16_t value = RISCV_CPU(obj)->cfg.elen; 1962 1963 visit_type_uint16(v, name, &value, errp); 1964 } 1965 1966 static const PropertyInfo prop_elen = { 1967 .name = "elen", 1968 .get = prop_elen_get, 1969 .set = prop_elen_set, 1970 }; 1971 1972 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name, 1973 void *opaque, Error **errp) 1974 { 1975 RISCVCPU *cpu = RISCV_CPU(obj); 1976 uint16_t value; 1977 1978 if (!visit_type_uint16(v, name, &value, errp)) { 1979 return; 1980 } 1981 1982 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) { 1983 cpu_set_prop_err(cpu, name, errp); 1984 error_append_hint(errp, "Current '%s' val: %u\n", 1985 name, cpu->cfg.cbom_blocksize); 1986 return; 1987 } 1988 1989 cpu_option_add_user_setting(name, value); 1990 cpu->cfg.cbom_blocksize = value; 1991 } 1992 1993 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name, 1994 void *opaque, Error **errp) 1995 { 1996 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize; 1997 1998 visit_type_uint16(v, name, &value, errp); 1999 } 2000 2001 static const PropertyInfo prop_cbom_blksize = { 2002 .name = "cbom_blocksize", 2003 .get = prop_cbom_blksize_get, 2004 .set = prop_cbom_blksize_set, 2005 }; 2006 2007 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name, 2008 void *opaque, Error **errp) 2009 { 2010 RISCVCPU *cpu = RISCV_CPU(obj); 2011 uint16_t value; 2012 2013 if (!visit_type_uint16(v, name, &value, errp)) { 2014 return; 2015 } 2016 2017 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) { 2018 cpu_set_prop_err(cpu, name, errp); 2019 error_append_hint(errp, "Current '%s' val: %u\n", 2020 name, cpu->cfg.cbop_blocksize); 2021 return; 2022 } 2023 2024 cpu_option_add_user_setting(name, value); 2025 cpu->cfg.cbop_blocksize = value; 2026 } 2027 2028 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name, 2029 void *opaque, Error **errp) 2030 { 2031 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize; 2032 2033 visit_type_uint16(v, name, &value, errp); 2034 } 2035 2036 static const PropertyInfo prop_cbop_blksize = { 2037 .name = "cbop_blocksize", 2038 .get = prop_cbop_blksize_get, 2039 .set = prop_cbop_blksize_set, 2040 }; 2041 2042 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name, 2043 void *opaque, Error **errp) 2044 { 2045 RISCVCPU *cpu = RISCV_CPU(obj); 2046 uint16_t value; 2047 2048 if (!visit_type_uint16(v, name, &value, errp)) { 2049 return; 2050 } 2051 2052 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) { 2053 cpu_set_prop_err(cpu, name, errp); 2054 error_append_hint(errp, "Current '%s' val: %u\n", 2055 name, cpu->cfg.cboz_blocksize); 2056 return; 2057 } 2058 2059 cpu_option_add_user_setting(name, value); 2060 cpu->cfg.cboz_blocksize = value; 2061 } 2062 2063 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name, 2064 void *opaque, Error **errp) 2065 { 2066 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize; 2067 2068 visit_type_uint16(v, name, &value, errp); 2069 } 2070 2071 static const PropertyInfo prop_cboz_blksize = { 2072 .name = "cboz_blocksize", 2073 .get = prop_cboz_blksize_get, 2074 .set = prop_cboz_blksize_set, 2075 }; 2076 2077 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name, 2078 void *opaque, Error **errp) 2079 { 2080 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2081 RISCVCPU *cpu = RISCV_CPU(obj); 2082 uint32_t prev_val = cpu->cfg.mvendorid; 2083 uint32_t value; 2084 2085 if (!visit_type_uint32(v, name, &value, errp)) { 2086 return; 2087 } 2088 2089 if (!dynamic_cpu && prev_val != value) { 2090 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2091 object_get_typename(obj), prev_val); 2092 return; 2093 } 2094 2095 cpu->cfg.mvendorid = value; 2096 } 2097 2098 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name, 2099 void *opaque, Error **errp) 2100 { 2101 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; 2102 2103 visit_type_uint32(v, name, &value, errp); 2104 } 2105 2106 static const PropertyInfo prop_mvendorid = { 2107 .name = "mvendorid", 2108 .get = prop_mvendorid_get, 2109 .set = prop_mvendorid_set, 2110 }; 2111 2112 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name, 2113 void *opaque, Error **errp) 2114 { 2115 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2116 RISCVCPU *cpu = RISCV_CPU(obj); 2117 uint64_t prev_val = cpu->cfg.mimpid; 2118 uint64_t value; 2119 2120 if (!visit_type_uint64(v, name, &value, errp)) { 2121 return; 2122 } 2123 2124 if (!dynamic_cpu && prev_val != value) { 2125 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2126 object_get_typename(obj), prev_val); 2127 return; 2128 } 2129 2130 cpu->cfg.mimpid = value; 2131 } 2132 2133 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name, 2134 void *opaque, Error **errp) 2135 { 2136 uint64_t value = RISCV_CPU(obj)->cfg.mimpid; 2137 2138 visit_type_uint64(v, name, &value, errp); 2139 } 2140 2141 static const PropertyInfo prop_mimpid = { 2142 .name = "mimpid", 2143 .get = prop_mimpid_get, 2144 .set = prop_mimpid_set, 2145 }; 2146 2147 static void prop_marchid_set(Object *obj, Visitor *v, const char *name, 2148 void *opaque, Error **errp) 2149 { 2150 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2151 RISCVCPU *cpu = RISCV_CPU(obj); 2152 uint64_t prev_val = cpu->cfg.marchid; 2153 uint64_t value, invalid_val; 2154 uint32_t mxlen = 0; 2155 2156 if (!visit_type_uint64(v, name, &value, errp)) { 2157 return; 2158 } 2159 2160 if (!dynamic_cpu && prev_val != value) { 2161 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2162 object_get_typename(obj), prev_val); 2163 return; 2164 } 2165 2166 switch (riscv_cpu_mxl(&cpu->env)) { 2167 case MXL_RV32: 2168 mxlen = 32; 2169 break; 2170 case MXL_RV64: 2171 case MXL_RV128: 2172 mxlen = 64; 2173 break; 2174 default: 2175 g_assert_not_reached(); 2176 } 2177 2178 invalid_val = 1LL << (mxlen - 1); 2179 2180 if (value == invalid_val) { 2181 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2182 "and the remaining bits zero", mxlen); 2183 return; 2184 } 2185 2186 cpu->cfg.marchid = value; 2187 } 2188 2189 static void prop_marchid_get(Object *obj, Visitor *v, const char *name, 2190 void *opaque, Error **errp) 2191 { 2192 uint64_t value = RISCV_CPU(obj)->cfg.marchid; 2193 2194 visit_type_uint64(v, name, &value, errp); 2195 } 2196 2197 static const PropertyInfo prop_marchid = { 2198 .name = "marchid", 2199 .get = prop_marchid_get, 2200 .set = prop_marchid_set, 2201 }; 2202 2203 /* 2204 * RVA22U64 defines some 'named features' that are cache 2205 * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa 2206 * and Zicclsm. They are always implemented in TCG and 2207 * doesn't need to be manually enabled by the profile. 2208 */ 2209 static RISCVCPUProfile RVA22U64 = { 2210 .parent = NULL, 2211 .name = "rva22u64", 2212 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU, 2213 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2214 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2215 .ext_offsets = { 2216 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause), 2217 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb), 2218 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin), 2219 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr), 2220 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom), 2221 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz), 2222 2223 /* mandatory named features for this profile */ 2224 CPU_CFG_OFFSET(ext_zic64b), 2225 2226 RISCV_PROFILE_EXT_LIST_END 2227 } 2228 }; 2229 2230 /* 2231 * As with RVA22U64, RVA22S64 also defines 'named features'. 2232 * 2233 * Cache related features that we consider enabled since we don't 2234 * implement cache: Ssccptr 2235 * 2236 * Other named features that we already implement: Sstvecd, Sstvala, 2237 * Sscounterenw 2238 * 2239 * Named features that we need to enable: svade 2240 * 2241 * The remaining features/extensions comes from RVA22U64. 2242 */ 2243 static RISCVCPUProfile RVA22S64 = { 2244 .parent = &RVA22U64, 2245 .name = "rva22s64", 2246 .misa_ext = RVS, 2247 .priv_spec = PRIV_VERSION_1_12_0, 2248 .satp_mode = VM_1_10_SV39, 2249 .ext_offsets = { 2250 /* rva22s64 exts */ 2251 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt), 2252 CPU_CFG_OFFSET(ext_svinval), 2253 2254 /* rva22s64 named features */ 2255 CPU_CFG_OFFSET(ext_svade), 2256 2257 RISCV_PROFILE_EXT_LIST_END 2258 } 2259 }; 2260 2261 RISCVCPUProfile *riscv_profiles[] = { 2262 &RVA22U64, 2263 &RVA22S64, 2264 NULL, 2265 }; 2266 2267 static Property riscv_cpu_properties[] = { 2268 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 2269 2270 {.name = "pmu-mask", .info = &prop_pmu_mask}, 2271 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 2272 2273 {.name = "mmu", .info = &prop_mmu}, 2274 {.name = "pmp", .info = &prop_pmp}, 2275 2276 {.name = "priv_spec", .info = &prop_priv_spec}, 2277 {.name = "vext_spec", .info = &prop_vext_spec}, 2278 2279 {.name = "vlen", .info = &prop_vlen}, 2280 {.name = "elen", .info = &prop_elen}, 2281 2282 {.name = "cbom_blocksize", .info = &prop_cbom_blksize}, 2283 {.name = "cbop_blocksize", .info = &prop_cbop_blksize}, 2284 {.name = "cboz_blocksize", .info = &prop_cboz_blksize}, 2285 2286 {.name = "mvendorid", .info = &prop_mvendorid}, 2287 {.name = "mimpid", .info = &prop_mimpid}, 2288 {.name = "marchid", .info = &prop_marchid}, 2289 2290 #ifndef CONFIG_USER_ONLY 2291 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 2292 #endif 2293 2294 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 2295 2296 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 2297 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 2298 2299 /* 2300 * write_misa() is marked as experimental for now so mark 2301 * it with -x and default to 'false'. 2302 */ 2303 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 2304 DEFINE_PROP_END_OF_LIST(), 2305 }; 2306 2307 #if defined(TARGET_RISCV64) 2308 static void rva22u64_profile_cpu_init(Object *obj) 2309 { 2310 rv64i_bare_cpu_init(obj); 2311 2312 RVA22U64.enabled = true; 2313 } 2314 2315 static void rva22s64_profile_cpu_init(Object *obj) 2316 { 2317 rv64i_bare_cpu_init(obj); 2318 2319 RVA22S64.enabled = true; 2320 } 2321 #endif 2322 2323 static const gchar *riscv_gdb_arch_name(CPUState *cs) 2324 { 2325 RISCVCPU *cpu = RISCV_CPU(cs); 2326 CPURISCVState *env = &cpu->env; 2327 2328 switch (riscv_cpu_mxl(env)) { 2329 case MXL_RV32: 2330 return "riscv:rv32"; 2331 case MXL_RV64: 2332 case MXL_RV128: 2333 return "riscv:rv64"; 2334 default: 2335 g_assert_not_reached(); 2336 } 2337 } 2338 2339 #ifndef CONFIG_USER_ONLY 2340 static int64_t riscv_get_arch_id(CPUState *cs) 2341 { 2342 RISCVCPU *cpu = RISCV_CPU(cs); 2343 2344 return cpu->env.mhartid; 2345 } 2346 2347 #include "hw/core/sysemu-cpu-ops.h" 2348 2349 static const struct SysemuCPUOps riscv_sysemu_ops = { 2350 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 2351 .write_elf64_note = riscv_cpu_write_elf64_note, 2352 .write_elf32_note = riscv_cpu_write_elf32_note, 2353 .legacy_vmsd = &vmstate_riscv_cpu, 2354 }; 2355 #endif 2356 2357 static void riscv_cpu_common_class_init(ObjectClass *c, void *data) 2358 { 2359 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2360 CPUClass *cc = CPU_CLASS(c); 2361 DeviceClass *dc = DEVICE_CLASS(c); 2362 ResettableClass *rc = RESETTABLE_CLASS(c); 2363 2364 device_class_set_parent_realize(dc, riscv_cpu_realize, 2365 &mcc->parent_realize); 2366 2367 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 2368 &mcc->parent_phases); 2369 2370 cc->class_by_name = riscv_cpu_class_by_name; 2371 cc->has_work = riscv_cpu_has_work; 2372 cc->mmu_index = riscv_cpu_mmu_index; 2373 cc->dump_state = riscv_cpu_dump_state; 2374 cc->set_pc = riscv_cpu_set_pc; 2375 cc->get_pc = riscv_cpu_get_pc; 2376 cc->gdb_read_register = riscv_cpu_gdb_read_register; 2377 cc->gdb_write_register = riscv_cpu_gdb_write_register; 2378 cc->gdb_stop_before_watchpoint = true; 2379 cc->disas_set_info = riscv_cpu_disas_set_info; 2380 #ifndef CONFIG_USER_ONLY 2381 cc->sysemu_ops = &riscv_sysemu_ops; 2382 cc->get_arch_id = riscv_get_arch_id; 2383 #endif 2384 cc->gdb_arch_name = riscv_gdb_arch_name; 2385 2386 device_class_set_props(dc, riscv_cpu_properties); 2387 } 2388 2389 static void riscv_cpu_class_init(ObjectClass *c, void *data) 2390 { 2391 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2392 2393 mcc->misa_mxl_max = (uint32_t)(uintptr_t)data; 2394 riscv_cpu_validate_misa_mxl(mcc); 2395 } 2396 2397 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 2398 int max_str_len) 2399 { 2400 const RISCVIsaExtData *edata; 2401 char *old = *isa_str; 2402 char *new = *isa_str; 2403 2404 for (edata = isa_edata_arr; edata && edata->name; edata++) { 2405 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 2406 new = g_strconcat(old, "_", edata->name, NULL); 2407 g_free(old); 2408 old = new; 2409 } 2410 } 2411 2412 *isa_str = new; 2413 } 2414 2415 char *riscv_isa_string(RISCVCPU *cpu) 2416 { 2417 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 2418 int i; 2419 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 2420 char *isa_str = g_new(char, maxlen); 2421 int xlen = riscv_cpu_max_xlen(mcc); 2422 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen); 2423 2424 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2425 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2426 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 2427 } 2428 } 2429 *p = '\0'; 2430 if (!cpu->cfg.short_isa_string) { 2431 riscv_isa_string_ext(cpu, &isa_str, maxlen); 2432 } 2433 return isa_str; 2434 } 2435 2436 #ifndef CONFIG_USER_ONLY 2437 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count) 2438 { 2439 int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr); 2440 char **extensions = g_new(char *, maxlen); 2441 2442 for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2443 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2444 extensions[*count] = g_new(char, 2); 2445 snprintf(extensions[*count], 2, "%c", 2446 qemu_tolower(riscv_single_letter_exts[i])); 2447 (*count)++; 2448 } 2449 } 2450 2451 for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) { 2452 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 2453 extensions[*count] = g_strdup(edata->name); 2454 (*count)++; 2455 } 2456 } 2457 2458 return extensions; 2459 } 2460 2461 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename) 2462 { 2463 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 2464 const size_t maxlen = sizeof("rv128i"); 2465 g_autofree char *isa_base = g_new(char, maxlen); 2466 g_autofree char *riscv_isa; 2467 char **isa_extensions; 2468 int count = 0; 2469 int xlen = riscv_cpu_max_xlen(mcc); 2470 2471 riscv_isa = riscv_isa_string(cpu); 2472 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa); 2473 2474 snprintf(isa_base, maxlen, "rv%di", xlen); 2475 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base); 2476 2477 isa_extensions = riscv_isa_extensions_list(cpu, &count); 2478 qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions", 2479 isa_extensions, count); 2480 2481 for (int i = 0; i < count; i++) { 2482 g_free(isa_extensions[i]); 2483 } 2484 2485 g_free(isa_extensions); 2486 } 2487 #endif 2488 2489 #define DEFINE_CPU(type_name, misa_mxl_max, initfn) \ 2490 { \ 2491 .name = (type_name), \ 2492 .parent = TYPE_RISCV_CPU, \ 2493 .instance_init = (initfn), \ 2494 .class_init = riscv_cpu_class_init, \ 2495 .class_data = (void *)(misa_mxl_max) \ 2496 } 2497 2498 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \ 2499 { \ 2500 .name = (type_name), \ 2501 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 2502 .instance_init = (initfn), \ 2503 .class_init = riscv_cpu_class_init, \ 2504 .class_data = (void *)(misa_mxl_max) \ 2505 } 2506 2507 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \ 2508 { \ 2509 .name = (type_name), \ 2510 .parent = TYPE_RISCV_VENDOR_CPU, \ 2511 .instance_init = (initfn), \ 2512 .class_init = riscv_cpu_class_init, \ 2513 .class_data = (void *)(misa_mxl_max) \ 2514 } 2515 2516 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \ 2517 { \ 2518 .name = (type_name), \ 2519 .parent = TYPE_RISCV_BARE_CPU, \ 2520 .instance_init = (initfn), \ 2521 .class_init = riscv_cpu_class_init, \ 2522 .class_data = (void *)(misa_mxl_max) \ 2523 } 2524 2525 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \ 2526 { \ 2527 .name = (type_name), \ 2528 .parent = TYPE_RISCV_BARE_CPU, \ 2529 .instance_init = (initfn), \ 2530 .class_init = riscv_cpu_class_init, \ 2531 .class_data = (void *)(misa_mxl_max) \ 2532 } 2533 2534 static const TypeInfo riscv_cpu_type_infos[] = { 2535 { 2536 .name = TYPE_RISCV_CPU, 2537 .parent = TYPE_CPU, 2538 .instance_size = sizeof(RISCVCPU), 2539 .instance_align = __alignof(RISCVCPU), 2540 .instance_init = riscv_cpu_init, 2541 .instance_post_init = riscv_cpu_post_init, 2542 .abstract = true, 2543 .class_size = sizeof(RISCVCPUClass), 2544 .class_init = riscv_cpu_common_class_init, 2545 }, 2546 { 2547 .name = TYPE_RISCV_DYNAMIC_CPU, 2548 .parent = TYPE_RISCV_CPU, 2549 .abstract = true, 2550 }, 2551 { 2552 .name = TYPE_RISCV_VENDOR_CPU, 2553 .parent = TYPE_RISCV_CPU, 2554 .abstract = true, 2555 }, 2556 { 2557 .name = TYPE_RISCV_BARE_CPU, 2558 .parent = TYPE_RISCV_CPU, 2559 .instance_init = riscv_bare_cpu_init, 2560 .abstract = true, 2561 }, 2562 #if defined(TARGET_RISCV32) 2563 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV32, riscv_any_cpu_init), 2564 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), 2565 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), 2566 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), 2567 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), 2568 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init), 2569 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), 2570 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init), 2571 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init), 2572 #elif defined(TARGET_RISCV64) 2573 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV64, riscv_any_cpu_init), 2574 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), 2575 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), 2576 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), 2577 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), 2578 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init), 2579 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init), 2580 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init), 2581 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init), 2582 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init), 2583 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init), 2584 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init), 2585 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init), 2586 #endif 2587 }; 2588 2589 DEFINE_TYPES(riscv_cpu_type_infos) 2590