1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "exec/exec-all.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/error-report.h" 31 #include "hw/qdev-properties.h" 32 #include "hw/core/qdev-prop-internal.h" 33 #include "migration/vmstate.h" 34 #include "fpu/softfloat-helpers.h" 35 #include "sysemu/device_tree.h" 36 #include "sysemu/kvm.h" 37 #include "sysemu/tcg.h" 38 #include "kvm/kvm_riscv.h" 39 #include "tcg/tcg-cpu.h" 40 #include "tcg/tcg.h" 41 42 /* RISC-V CPU definitions */ 43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH"; 44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 45 RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0}; 46 47 /* 48 * From vector_helper.c 49 * Note that vector data is stored in host-endian 64-bit chunks, 50 * so addressing bytes needs a host-endian fixup. 51 */ 52 #if HOST_BIG_ENDIAN 53 #define BYTE(x) ((x) ^ 7) 54 #else 55 #define BYTE(x) (x) 56 #endif 57 58 bool riscv_cpu_is_32bit(RISCVCPU *cpu) 59 { 60 return riscv_cpu_mxl(&cpu->env) == MXL_RV32; 61 } 62 63 /* Hash that stores general user set numeric options */ 64 static GHashTable *general_user_opts; 65 66 static void cpu_option_add_user_setting(const char *optname, uint32_t value) 67 { 68 g_hash_table_insert(general_user_opts, (gpointer)optname, 69 GUINT_TO_POINTER(value)); 70 } 71 72 bool riscv_cpu_option_set(const char *optname) 73 { 74 return g_hash_table_contains(general_user_opts, optname); 75 } 76 77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 78 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 79 80 /* 81 * Here are the ordering rules of extension naming defined by RISC-V 82 * specification : 83 * 1. All extensions should be separated from other multi-letter extensions 84 * by an underscore. 85 * 2. The first letter following the 'Z' conventionally indicates the most 86 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 87 * If multiple 'Z' extensions are named, they should be ordered first 88 * by category, then alphabetically within a category. 89 * 3. Standard supervisor-level extensions (starts with 'S') should be 90 * listed after standard unprivileged extensions. If multiple 91 * supervisor-level extensions are listed, they should be ordered 92 * alphabetically. 93 * 4. Non-standard extensions (starts with 'X') must be listed after all 94 * standard extensions. They must be separated from other multi-letter 95 * extensions by an underscore. 96 * 97 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 98 * instead. 99 */ 100 const RISCVIsaExtData isa_edata_arr[] = { 101 ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b), 102 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 103 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop), 104 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 105 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 106 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 107 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 108 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 109 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 110 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 111 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 112 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 113 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), 114 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), 115 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), 116 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 117 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 118 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 119 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 120 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 121 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 122 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 123 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 124 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 125 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 126 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 127 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 128 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 129 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 130 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 131 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 132 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 133 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 134 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 135 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 136 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 137 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 138 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 139 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 140 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 141 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 142 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 143 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 144 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 145 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 146 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 147 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 148 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 149 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 150 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 151 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 152 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 153 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 154 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 155 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 156 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 157 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 158 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 159 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 160 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 161 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 162 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 163 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 164 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 165 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 166 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 167 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 168 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 169 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 170 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 171 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 172 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 173 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 174 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 175 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 176 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 177 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 178 ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade), 179 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 180 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 181 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 182 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 183 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 184 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 185 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 186 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 187 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 188 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 189 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 190 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 191 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 192 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 193 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 194 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 195 196 DEFINE_PROP_END_OF_LIST(), 197 }; 198 199 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 200 { 201 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 202 203 return *ext_enabled; 204 } 205 206 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 207 { 208 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 209 210 *ext_enabled = en; 211 } 212 213 bool riscv_cpu_is_vendor(Object *cpu_obj) 214 { 215 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; 216 } 217 218 const char * const riscv_int_regnames[] = { 219 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 220 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 221 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 222 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 223 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 224 }; 225 226 const char * const riscv_int_regnamesh[] = { 227 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 228 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 229 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 230 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 231 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 232 "x30h/t5h", "x31h/t6h" 233 }; 234 235 const char * const riscv_fpr_regnames[] = { 236 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 237 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 238 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 239 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 240 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 241 "f30/ft10", "f31/ft11" 242 }; 243 244 const char * const riscv_rvv_regnames[] = { 245 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 246 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 247 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 248 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 249 "v28", "v29", "v30", "v31" 250 }; 251 252 static const char * const riscv_excp_names[] = { 253 "misaligned_fetch", 254 "fault_fetch", 255 "illegal_instruction", 256 "breakpoint", 257 "misaligned_load", 258 "fault_load", 259 "misaligned_store", 260 "fault_store", 261 "user_ecall", 262 "supervisor_ecall", 263 "hypervisor_ecall", 264 "machine_ecall", 265 "exec_page_fault", 266 "load_page_fault", 267 "reserved", 268 "store_page_fault", 269 "reserved", 270 "reserved", 271 "reserved", 272 "reserved", 273 "guest_exec_page_fault", 274 "guest_load_page_fault", 275 "reserved", 276 "guest_store_page_fault", 277 }; 278 279 static const char * const riscv_intr_names[] = { 280 "u_software", 281 "s_software", 282 "vs_software", 283 "m_software", 284 "u_timer", 285 "s_timer", 286 "vs_timer", 287 "m_timer", 288 "u_external", 289 "s_external", 290 "vs_external", 291 "m_external", 292 "reserved", 293 "reserved", 294 "reserved", 295 "reserved" 296 }; 297 298 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 299 { 300 if (async) { 301 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 302 riscv_intr_names[cause] : "(unknown)"; 303 } else { 304 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 305 riscv_excp_names[cause] : "(unknown)"; 306 } 307 } 308 309 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) 310 { 311 env->misa_ext_mask = env->misa_ext = ext; 312 } 313 314 int riscv_cpu_max_xlen(RISCVCPUClass *mcc) 315 { 316 return 16 << mcc->misa_mxl_max; 317 } 318 319 #ifndef CONFIG_USER_ONLY 320 static uint8_t satp_mode_from_str(const char *satp_mode_str) 321 { 322 if (!strncmp(satp_mode_str, "mbare", 5)) { 323 return VM_1_10_MBARE; 324 } 325 326 if (!strncmp(satp_mode_str, "sv32", 4)) { 327 return VM_1_10_SV32; 328 } 329 330 if (!strncmp(satp_mode_str, "sv39", 4)) { 331 return VM_1_10_SV39; 332 } 333 334 if (!strncmp(satp_mode_str, "sv48", 4)) { 335 return VM_1_10_SV48; 336 } 337 338 if (!strncmp(satp_mode_str, "sv57", 4)) { 339 return VM_1_10_SV57; 340 } 341 342 if (!strncmp(satp_mode_str, "sv64", 4)) { 343 return VM_1_10_SV64; 344 } 345 346 g_assert_not_reached(); 347 } 348 349 uint8_t satp_mode_max_from_map(uint32_t map) 350 { 351 /* 352 * 'map = 0' will make us return (31 - 32), which C will 353 * happily overflow to UINT_MAX. There's no good result to 354 * return if 'map = 0' (e.g. returning 0 will be ambiguous 355 * with the result for 'map = 1'). 356 * 357 * Assert out if map = 0. Callers will have to deal with 358 * it outside of this function. 359 */ 360 g_assert(map > 0); 361 362 /* map here has at least one bit set, so no problem with clz */ 363 return 31 - __builtin_clz(map); 364 } 365 366 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 367 { 368 if (is_32_bit) { 369 switch (satp_mode) { 370 case VM_1_10_SV32: 371 return "sv32"; 372 case VM_1_10_MBARE: 373 return "none"; 374 } 375 } else { 376 switch (satp_mode) { 377 case VM_1_10_SV64: 378 return "sv64"; 379 case VM_1_10_SV57: 380 return "sv57"; 381 case VM_1_10_SV48: 382 return "sv48"; 383 case VM_1_10_SV39: 384 return "sv39"; 385 case VM_1_10_MBARE: 386 return "none"; 387 } 388 } 389 390 g_assert_not_reached(); 391 } 392 393 static void set_satp_mode_max_supported(RISCVCPU *cpu, 394 uint8_t satp_mode) 395 { 396 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 397 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 398 399 for (int i = 0; i <= satp_mode; ++i) { 400 if (valid_vm[i]) { 401 cpu->cfg.satp_mode.supported |= (1 << i); 402 } 403 } 404 } 405 406 /* Set the satp mode to the max supported */ 407 static void set_satp_mode_default_map(RISCVCPU *cpu) 408 { 409 /* 410 * Bare CPUs do not default to the max available. 411 * Users must set a valid satp_mode in the command 412 * line. 413 */ 414 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { 415 warn_report("No satp mode set. Defaulting to 'bare'"); 416 cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE); 417 return; 418 } 419 420 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 421 } 422 #endif 423 424 static void riscv_any_cpu_init(Object *obj) 425 { 426 RISCVCPU *cpu = RISCV_CPU(obj); 427 CPURISCVState *env = &cpu->env; 428 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 429 430 #ifndef CONFIG_USER_ONLY 431 set_satp_mode_max_supported(RISCV_CPU(obj), 432 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 433 VM_1_10_SV32 : VM_1_10_SV57); 434 #endif 435 436 env->priv_ver = PRIV_VERSION_LATEST; 437 438 /* inherited from parent obj via riscv_cpu_init() */ 439 cpu->cfg.ext_zifencei = true; 440 cpu->cfg.ext_zicsr = true; 441 cpu->cfg.mmu = true; 442 cpu->cfg.pmp = true; 443 } 444 445 static void riscv_max_cpu_init(Object *obj) 446 { 447 RISCVCPU *cpu = RISCV_CPU(obj); 448 CPURISCVState *env = &cpu->env; 449 450 cpu->cfg.mmu = true; 451 cpu->cfg.pmp = true; 452 453 env->priv_ver = PRIV_VERSION_LATEST; 454 #ifndef CONFIG_USER_ONLY 455 #ifdef TARGET_RISCV32 456 set_satp_mode_max_supported(cpu, VM_1_10_SV32); 457 #else 458 set_satp_mode_max_supported(cpu, VM_1_10_SV57); 459 #endif 460 #endif 461 } 462 463 #if defined(TARGET_RISCV64) 464 static void rv64_base_cpu_init(Object *obj) 465 { 466 RISCVCPU *cpu = RISCV_CPU(obj); 467 CPURISCVState *env = &cpu->env; 468 469 cpu->cfg.mmu = true; 470 cpu->cfg.pmp = true; 471 472 /* Set latest version of privileged specification */ 473 env->priv_ver = PRIV_VERSION_LATEST; 474 #ifndef CONFIG_USER_ONLY 475 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 476 #endif 477 } 478 479 static void rv64_sifive_u_cpu_init(Object *obj) 480 { 481 RISCVCPU *cpu = RISCV_CPU(obj); 482 CPURISCVState *env = &cpu->env; 483 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 484 env->priv_ver = PRIV_VERSION_1_10_0; 485 #ifndef CONFIG_USER_ONLY 486 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 487 #endif 488 489 /* inherited from parent obj via riscv_cpu_init() */ 490 cpu->cfg.ext_zifencei = true; 491 cpu->cfg.ext_zicsr = true; 492 cpu->cfg.mmu = true; 493 cpu->cfg.pmp = true; 494 } 495 496 static void rv64_sifive_e_cpu_init(Object *obj) 497 { 498 CPURISCVState *env = &RISCV_CPU(obj)->env; 499 RISCVCPU *cpu = RISCV_CPU(obj); 500 501 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 502 env->priv_ver = PRIV_VERSION_1_10_0; 503 #ifndef CONFIG_USER_ONLY 504 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 505 #endif 506 507 /* inherited from parent obj via riscv_cpu_init() */ 508 cpu->cfg.ext_zifencei = true; 509 cpu->cfg.ext_zicsr = true; 510 cpu->cfg.pmp = true; 511 } 512 513 static void rv64_thead_c906_cpu_init(Object *obj) 514 { 515 CPURISCVState *env = &RISCV_CPU(obj)->env; 516 RISCVCPU *cpu = RISCV_CPU(obj); 517 518 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU); 519 env->priv_ver = PRIV_VERSION_1_11_0; 520 521 cpu->cfg.ext_zfa = true; 522 cpu->cfg.ext_zfh = true; 523 cpu->cfg.mmu = true; 524 cpu->cfg.ext_xtheadba = true; 525 cpu->cfg.ext_xtheadbb = true; 526 cpu->cfg.ext_xtheadbs = true; 527 cpu->cfg.ext_xtheadcmo = true; 528 cpu->cfg.ext_xtheadcondmov = true; 529 cpu->cfg.ext_xtheadfmemidx = true; 530 cpu->cfg.ext_xtheadmac = true; 531 cpu->cfg.ext_xtheadmemidx = true; 532 cpu->cfg.ext_xtheadmempair = true; 533 cpu->cfg.ext_xtheadsync = true; 534 535 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 536 #ifndef CONFIG_USER_ONLY 537 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 538 #endif 539 540 /* inherited from parent obj via riscv_cpu_init() */ 541 cpu->cfg.pmp = true; 542 } 543 544 static void rv64_veyron_v1_cpu_init(Object *obj) 545 { 546 CPURISCVState *env = &RISCV_CPU(obj)->env; 547 RISCVCPU *cpu = RISCV_CPU(obj); 548 549 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH); 550 env->priv_ver = PRIV_VERSION_1_12_0; 551 552 /* Enable ISA extensions */ 553 cpu->cfg.mmu = true; 554 cpu->cfg.ext_zifencei = true; 555 cpu->cfg.ext_zicsr = true; 556 cpu->cfg.pmp = true; 557 cpu->cfg.ext_zicbom = true; 558 cpu->cfg.cbom_blocksize = 64; 559 cpu->cfg.cboz_blocksize = 64; 560 cpu->cfg.ext_zicboz = true; 561 cpu->cfg.ext_smaia = true; 562 cpu->cfg.ext_ssaia = true; 563 cpu->cfg.ext_sscofpmf = true; 564 cpu->cfg.ext_sstc = true; 565 cpu->cfg.ext_svinval = true; 566 cpu->cfg.ext_svnapot = true; 567 cpu->cfg.ext_svpbmt = true; 568 cpu->cfg.ext_smstateen = true; 569 cpu->cfg.ext_zba = true; 570 cpu->cfg.ext_zbb = true; 571 cpu->cfg.ext_zbc = true; 572 cpu->cfg.ext_zbs = true; 573 cpu->cfg.ext_XVentanaCondOps = true; 574 575 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 576 cpu->cfg.marchid = VEYRON_V1_MARCHID; 577 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 578 579 #ifndef CONFIG_USER_ONLY 580 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 581 #endif 582 } 583 584 static void rv128_base_cpu_init(Object *obj) 585 { 586 RISCVCPU *cpu = RISCV_CPU(obj); 587 CPURISCVState *env = &cpu->env; 588 589 if (qemu_tcg_mttcg_enabled()) { 590 /* Missing 128-bit aligned atomics */ 591 error_report("128-bit RISC-V currently does not work with Multi " 592 "Threaded TCG. Please use: -accel tcg,thread=single"); 593 exit(EXIT_FAILURE); 594 } 595 596 cpu->cfg.mmu = true; 597 cpu->cfg.pmp = true; 598 599 /* Set latest version of privileged specification */ 600 env->priv_ver = PRIV_VERSION_LATEST; 601 #ifndef CONFIG_USER_ONLY 602 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 603 #endif 604 } 605 606 static void rv64i_bare_cpu_init(Object *obj) 607 { 608 CPURISCVState *env = &RISCV_CPU(obj)->env; 609 riscv_cpu_set_misa_ext(env, RVI); 610 } 611 612 static void rv64e_bare_cpu_init(Object *obj) 613 { 614 CPURISCVState *env = &RISCV_CPU(obj)->env; 615 riscv_cpu_set_misa_ext(env, RVE); 616 } 617 #else 618 static void rv32_base_cpu_init(Object *obj) 619 { 620 RISCVCPU *cpu = RISCV_CPU(obj); 621 CPURISCVState *env = &cpu->env; 622 623 cpu->cfg.mmu = true; 624 cpu->cfg.pmp = true; 625 626 /* Set latest version of privileged specification */ 627 env->priv_ver = PRIV_VERSION_LATEST; 628 #ifndef CONFIG_USER_ONLY 629 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 630 #endif 631 } 632 633 static void rv32_sifive_u_cpu_init(Object *obj) 634 { 635 RISCVCPU *cpu = RISCV_CPU(obj); 636 CPURISCVState *env = &cpu->env; 637 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 638 env->priv_ver = PRIV_VERSION_1_10_0; 639 #ifndef CONFIG_USER_ONLY 640 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 641 #endif 642 643 /* inherited from parent obj via riscv_cpu_init() */ 644 cpu->cfg.ext_zifencei = true; 645 cpu->cfg.ext_zicsr = true; 646 cpu->cfg.mmu = true; 647 cpu->cfg.pmp = true; 648 } 649 650 static void rv32_sifive_e_cpu_init(Object *obj) 651 { 652 CPURISCVState *env = &RISCV_CPU(obj)->env; 653 RISCVCPU *cpu = RISCV_CPU(obj); 654 655 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 656 env->priv_ver = PRIV_VERSION_1_10_0; 657 #ifndef CONFIG_USER_ONLY 658 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 659 #endif 660 661 /* inherited from parent obj via riscv_cpu_init() */ 662 cpu->cfg.ext_zifencei = true; 663 cpu->cfg.ext_zicsr = true; 664 cpu->cfg.pmp = true; 665 } 666 667 static void rv32_ibex_cpu_init(Object *obj) 668 { 669 CPURISCVState *env = &RISCV_CPU(obj)->env; 670 RISCVCPU *cpu = RISCV_CPU(obj); 671 672 riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU); 673 env->priv_ver = PRIV_VERSION_1_12_0; 674 #ifndef CONFIG_USER_ONLY 675 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 676 #endif 677 /* inherited from parent obj via riscv_cpu_init() */ 678 cpu->cfg.ext_zifencei = true; 679 cpu->cfg.ext_zicsr = true; 680 cpu->cfg.pmp = true; 681 cpu->cfg.ext_smepmp = true; 682 } 683 684 static void rv32_imafcu_nommu_cpu_init(Object *obj) 685 { 686 CPURISCVState *env = &RISCV_CPU(obj)->env; 687 RISCVCPU *cpu = RISCV_CPU(obj); 688 689 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU); 690 env->priv_ver = PRIV_VERSION_1_10_0; 691 #ifndef CONFIG_USER_ONLY 692 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 693 #endif 694 695 /* inherited from parent obj via riscv_cpu_init() */ 696 cpu->cfg.ext_zifencei = true; 697 cpu->cfg.ext_zicsr = true; 698 cpu->cfg.pmp = true; 699 } 700 701 static void rv32i_bare_cpu_init(Object *obj) 702 { 703 CPURISCVState *env = &RISCV_CPU(obj)->env; 704 riscv_cpu_set_misa_ext(env, RVI); 705 } 706 707 static void rv32e_bare_cpu_init(Object *obj) 708 { 709 CPURISCVState *env = &RISCV_CPU(obj)->env; 710 riscv_cpu_set_misa_ext(env, RVE); 711 } 712 #endif 713 714 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 715 { 716 ObjectClass *oc; 717 char *typename; 718 char **cpuname; 719 720 cpuname = g_strsplit(cpu_model, ",", 1); 721 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 722 oc = object_class_by_name(typename); 723 g_strfreev(cpuname); 724 g_free(typename); 725 726 return oc; 727 } 728 729 char *riscv_cpu_get_name(RISCVCPU *cpu) 730 { 731 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 732 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 733 734 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 735 736 return cpu_model_from_type(typename); 737 } 738 739 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 740 { 741 RISCVCPU *cpu = RISCV_CPU(cs); 742 CPURISCVState *env = &cpu->env; 743 int i, j; 744 uint8_t *p; 745 746 #if !defined(CONFIG_USER_ONLY) 747 if (riscv_has_ext(env, RVH)) { 748 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 749 } 750 #endif 751 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 752 #ifndef CONFIG_USER_ONLY 753 { 754 static const int dump_csrs[] = { 755 CSR_MHARTID, 756 CSR_MSTATUS, 757 CSR_MSTATUSH, 758 /* 759 * CSR_SSTATUS is intentionally omitted here as its value 760 * can be figured out by looking at CSR_MSTATUS 761 */ 762 CSR_HSTATUS, 763 CSR_VSSTATUS, 764 CSR_MIP, 765 CSR_MIE, 766 CSR_MIDELEG, 767 CSR_HIDELEG, 768 CSR_MEDELEG, 769 CSR_HEDELEG, 770 CSR_MTVEC, 771 CSR_STVEC, 772 CSR_VSTVEC, 773 CSR_MEPC, 774 CSR_SEPC, 775 CSR_VSEPC, 776 CSR_MCAUSE, 777 CSR_SCAUSE, 778 CSR_VSCAUSE, 779 CSR_MTVAL, 780 CSR_STVAL, 781 CSR_HTVAL, 782 CSR_MTVAL2, 783 CSR_MSCRATCH, 784 CSR_SSCRATCH, 785 CSR_SATP, 786 CSR_MMTE, 787 CSR_UPMBASE, 788 CSR_UPMMASK, 789 CSR_SPMBASE, 790 CSR_SPMMASK, 791 CSR_MPMBASE, 792 CSR_MPMMASK, 793 }; 794 795 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 796 int csrno = dump_csrs[i]; 797 target_ulong val = 0; 798 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 799 800 /* 801 * Rely on the smode, hmode, etc, predicates within csr.c 802 * to do the filtering of the registers that are present. 803 */ 804 if (res == RISCV_EXCP_NONE) { 805 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 806 csr_ops[csrno].name, val); 807 } 808 } 809 } 810 #endif 811 812 for (i = 0; i < 32; i++) { 813 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 814 riscv_int_regnames[i], env->gpr[i]); 815 if ((i & 3) == 3) { 816 qemu_fprintf(f, "\n"); 817 } 818 } 819 if (flags & CPU_DUMP_FPU) { 820 for (i = 0; i < 32; i++) { 821 qemu_fprintf(f, " %-8s %016" PRIx64, 822 riscv_fpr_regnames[i], env->fpr[i]); 823 if ((i & 3) == 3) { 824 qemu_fprintf(f, "\n"); 825 } 826 } 827 } 828 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 829 static const int dump_rvv_csrs[] = { 830 CSR_VSTART, 831 CSR_VXSAT, 832 CSR_VXRM, 833 CSR_VCSR, 834 CSR_VL, 835 CSR_VTYPE, 836 CSR_VLENB, 837 }; 838 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 839 int csrno = dump_rvv_csrs[i]; 840 target_ulong val = 0; 841 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 842 843 /* 844 * Rely on the smode, hmode, etc, predicates within csr.c 845 * to do the filtering of the registers that are present. 846 */ 847 if (res == RISCV_EXCP_NONE) { 848 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 849 csr_ops[csrno].name, val); 850 } 851 } 852 uint16_t vlenb = cpu->cfg.vlenb; 853 854 for (i = 0; i < 32; i++) { 855 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 856 p = (uint8_t *)env->vreg; 857 for (j = vlenb - 1 ; j >= 0; j--) { 858 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 859 } 860 qemu_fprintf(f, "\n"); 861 } 862 } 863 } 864 865 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 866 { 867 RISCVCPU *cpu = RISCV_CPU(cs); 868 CPURISCVState *env = &cpu->env; 869 870 if (env->xl == MXL_RV32) { 871 env->pc = (int32_t)value; 872 } else { 873 env->pc = value; 874 } 875 } 876 877 static vaddr riscv_cpu_get_pc(CPUState *cs) 878 { 879 RISCVCPU *cpu = RISCV_CPU(cs); 880 CPURISCVState *env = &cpu->env; 881 882 /* Match cpu_get_tb_cpu_state. */ 883 if (env->xl == MXL_RV32) { 884 return env->pc & UINT32_MAX; 885 } 886 return env->pc; 887 } 888 889 static bool riscv_cpu_has_work(CPUState *cs) 890 { 891 #ifndef CONFIG_USER_ONLY 892 RISCVCPU *cpu = RISCV_CPU(cs); 893 CPURISCVState *env = &cpu->env; 894 /* 895 * Definition of the WFI instruction requires it to ignore the privilege 896 * mode and delegation registers, but respect individual enables 897 */ 898 return riscv_cpu_all_pending(env) != 0 || 899 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 900 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 901 #else 902 return true; 903 #endif 904 } 905 906 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) 907 { 908 return riscv_env_mmu_index(cpu_env(cs), ifetch); 909 } 910 911 static void riscv_cpu_reset_hold(Object *obj) 912 { 913 #ifndef CONFIG_USER_ONLY 914 uint8_t iprio; 915 int i, irq, rdzero; 916 #endif 917 CPUState *cs = CPU(obj); 918 RISCVCPU *cpu = RISCV_CPU(cs); 919 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 920 CPURISCVState *env = &cpu->env; 921 922 if (mcc->parent_phases.hold) { 923 mcc->parent_phases.hold(obj); 924 } 925 #ifndef CONFIG_USER_ONLY 926 env->misa_mxl = mcc->misa_mxl_max; 927 env->priv = PRV_M; 928 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 929 if (env->misa_mxl > MXL_RV32) { 930 /* 931 * The reset status of SXL/UXL is undefined, but mstatus is WARL 932 * and we must ensure that the value after init is valid for read. 933 */ 934 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 935 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 936 if (riscv_has_ext(env, RVH)) { 937 env->vsstatus = set_field(env->vsstatus, 938 MSTATUS64_SXL, env->misa_mxl); 939 env->vsstatus = set_field(env->vsstatus, 940 MSTATUS64_UXL, env->misa_mxl); 941 env->mstatus_hs = set_field(env->mstatus_hs, 942 MSTATUS64_SXL, env->misa_mxl); 943 env->mstatus_hs = set_field(env->mstatus_hs, 944 MSTATUS64_UXL, env->misa_mxl); 945 } 946 } 947 env->mcause = 0; 948 env->miclaim = MIP_SGEIP; 949 env->pc = env->resetvec; 950 env->bins = 0; 951 env->two_stage_lookup = false; 952 953 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 954 (cpu->cfg.ext_svadu ? MENVCFG_ADUE : 0); 955 env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) | 956 (cpu->cfg.ext_svadu ? HENVCFG_ADUE : 0); 957 958 /* Initialized default priorities of local interrupts. */ 959 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 960 iprio = riscv_cpu_default_priority(i); 961 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 962 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 963 env->hviprio[i] = 0; 964 } 965 i = 0; 966 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 967 if (!rdzero) { 968 env->hviprio[irq] = env->miprio[irq]; 969 } 970 i++; 971 } 972 /* mmte is supposed to have pm.current hardwired to 1 */ 973 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 974 975 /* 976 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor 977 * extension is enabled. 978 */ 979 if (riscv_has_ext(env, RVH)) { 980 env->mideleg |= HS_MODE_INTERRUPTS; 981 } 982 983 /* 984 * Clear mseccfg and unlock all the PMP entries upon reset. 985 * This is allowed as per the priv and smepmp specifications 986 * and is needed to clear stale entries across reboots. 987 */ 988 if (riscv_cpu_cfg(env)->ext_smepmp) { 989 env->mseccfg = 0; 990 } 991 992 pmp_unlock_entries(env); 993 #endif 994 env->xl = riscv_cpu_mxl(env); 995 riscv_cpu_update_mask(env); 996 cs->exception_index = RISCV_EXCP_NONE; 997 env->load_res = -1; 998 set_default_nan_mode(1, &env->fp_status); 999 1000 #ifndef CONFIG_USER_ONLY 1001 if (cpu->cfg.debug) { 1002 riscv_trigger_reset_hold(env); 1003 } 1004 1005 if (kvm_enabled()) { 1006 kvm_riscv_reset_vcpu(cpu); 1007 } 1008 #endif 1009 } 1010 1011 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 1012 { 1013 RISCVCPU *cpu = RISCV_CPU(s); 1014 CPURISCVState *env = &cpu->env; 1015 info->target_info = &cpu->cfg; 1016 1017 switch (env->xl) { 1018 case MXL_RV32: 1019 info->print_insn = print_insn_riscv32; 1020 break; 1021 case MXL_RV64: 1022 info->print_insn = print_insn_riscv64; 1023 break; 1024 case MXL_RV128: 1025 info->print_insn = print_insn_riscv128; 1026 break; 1027 default: 1028 g_assert_not_reached(); 1029 } 1030 } 1031 1032 #ifndef CONFIG_USER_ONLY 1033 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1034 { 1035 bool rv32 = riscv_cpu_is_32bit(cpu); 1036 uint8_t satp_mode_map_max, satp_mode_supported_max; 1037 1038 /* The CPU wants the OS to decide which satp mode to use */ 1039 if (cpu->cfg.satp_mode.supported == 0) { 1040 return; 1041 } 1042 1043 satp_mode_supported_max = 1044 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1045 1046 if (cpu->cfg.satp_mode.map == 0) { 1047 if (cpu->cfg.satp_mode.init == 0) { 1048 /* If unset by the user, we fallback to the default satp mode. */ 1049 set_satp_mode_default_map(cpu); 1050 } else { 1051 /* 1052 * Find the lowest level that was disabled and then enable the 1053 * first valid level below which can be found in 1054 * valid_vm_1_10_32/64. 1055 */ 1056 for (int i = 1; i < 16; ++i) { 1057 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1058 (cpu->cfg.satp_mode.supported & (1 << i))) { 1059 for (int j = i - 1; j >= 0; --j) { 1060 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1061 cpu->cfg.satp_mode.map |= (1 << j); 1062 break; 1063 } 1064 } 1065 break; 1066 } 1067 } 1068 } 1069 } 1070 1071 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1072 1073 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1074 if (satp_mode_map_max > satp_mode_supported_max) { 1075 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1076 satp_mode_str(satp_mode_map_max, rv32), 1077 satp_mode_str(satp_mode_supported_max, rv32)); 1078 return; 1079 } 1080 1081 /* 1082 * Make sure the user did not ask for an invalid configuration as per 1083 * the specification. 1084 */ 1085 if (!rv32) { 1086 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1087 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1088 (cpu->cfg.satp_mode.init & (1 << i)) && 1089 (cpu->cfg.satp_mode.supported & (1 << i))) { 1090 error_setg(errp, "cannot disable %s satp mode if %s " 1091 "is enabled", satp_mode_str(i, false), 1092 satp_mode_str(satp_mode_map_max, false)); 1093 return; 1094 } 1095 } 1096 } 1097 1098 /* Finally expand the map so that all valid modes are set */ 1099 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1100 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1101 cpu->cfg.satp_mode.map |= (1 << i); 1102 } 1103 } 1104 } 1105 #endif 1106 1107 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1108 { 1109 Error *local_err = NULL; 1110 1111 #ifndef CONFIG_USER_ONLY 1112 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1113 if (local_err != NULL) { 1114 error_propagate(errp, local_err); 1115 return; 1116 } 1117 #endif 1118 1119 if (tcg_enabled()) { 1120 riscv_tcg_cpu_finalize_features(cpu, &local_err); 1121 if (local_err != NULL) { 1122 error_propagate(errp, local_err); 1123 return; 1124 } 1125 } else if (kvm_enabled()) { 1126 riscv_kvm_cpu_finalize_features(cpu, &local_err); 1127 if (local_err != NULL) { 1128 error_propagate(errp, local_err); 1129 return; 1130 } 1131 } 1132 } 1133 1134 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1135 { 1136 CPUState *cs = CPU(dev); 1137 RISCVCPU *cpu = RISCV_CPU(dev); 1138 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1139 Error *local_err = NULL; 1140 1141 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) { 1142 warn_report("The 'any' CPU is deprecated and will be " 1143 "removed in the future."); 1144 } 1145 1146 cpu_exec_realizefn(cs, &local_err); 1147 if (local_err != NULL) { 1148 error_propagate(errp, local_err); 1149 return; 1150 } 1151 1152 riscv_cpu_finalize_features(cpu, &local_err); 1153 if (local_err != NULL) { 1154 error_propagate(errp, local_err); 1155 return; 1156 } 1157 1158 riscv_cpu_register_gdb_regs_for_features(cs); 1159 1160 #ifndef CONFIG_USER_ONLY 1161 if (cpu->cfg.debug) { 1162 riscv_trigger_realize(&cpu->env); 1163 } 1164 #endif 1165 1166 qemu_init_vcpu(cs); 1167 cpu_reset(cs); 1168 1169 mcc->parent_realize(dev, errp); 1170 } 1171 1172 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 1173 { 1174 if (tcg_enabled()) { 1175 return riscv_cpu_tcg_compatible(cpu); 1176 } 1177 1178 return true; 1179 } 1180 1181 #ifndef CONFIG_USER_ONLY 1182 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1183 void *opaque, Error **errp) 1184 { 1185 RISCVSATPMap *satp_map = opaque; 1186 uint8_t satp = satp_mode_from_str(name); 1187 bool value; 1188 1189 value = satp_map->map & (1 << satp); 1190 1191 visit_type_bool(v, name, &value, errp); 1192 } 1193 1194 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1195 void *opaque, Error **errp) 1196 { 1197 RISCVSATPMap *satp_map = opaque; 1198 uint8_t satp = satp_mode_from_str(name); 1199 bool value; 1200 1201 if (!visit_type_bool(v, name, &value, errp)) { 1202 return; 1203 } 1204 1205 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1206 satp_map->init |= 1 << satp; 1207 } 1208 1209 void riscv_add_satp_mode_properties(Object *obj) 1210 { 1211 RISCVCPU *cpu = RISCV_CPU(obj); 1212 1213 if (cpu->env.misa_mxl == MXL_RV32) { 1214 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1215 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1216 } else { 1217 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1218 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1219 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1220 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1221 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1222 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1223 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1224 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1225 } 1226 } 1227 1228 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1229 { 1230 RISCVCPU *cpu = RISCV_CPU(opaque); 1231 CPURISCVState *env = &cpu->env; 1232 1233 if (irq < IRQ_LOCAL_MAX) { 1234 switch (irq) { 1235 case IRQ_U_SOFT: 1236 case IRQ_S_SOFT: 1237 case IRQ_VS_SOFT: 1238 case IRQ_M_SOFT: 1239 case IRQ_U_TIMER: 1240 case IRQ_S_TIMER: 1241 case IRQ_VS_TIMER: 1242 case IRQ_M_TIMER: 1243 case IRQ_U_EXT: 1244 case IRQ_VS_EXT: 1245 case IRQ_M_EXT: 1246 if (kvm_enabled()) { 1247 kvm_riscv_set_irq(cpu, irq, level); 1248 } else { 1249 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1250 } 1251 break; 1252 case IRQ_S_EXT: 1253 if (kvm_enabled()) { 1254 kvm_riscv_set_irq(cpu, irq, level); 1255 } else { 1256 env->external_seip = level; 1257 riscv_cpu_update_mip(env, 1 << irq, 1258 BOOL_TO_MASK(level | env->software_seip)); 1259 } 1260 break; 1261 default: 1262 g_assert_not_reached(); 1263 } 1264 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1265 /* Require H-extension for handling guest local interrupts */ 1266 if (!riscv_has_ext(env, RVH)) { 1267 g_assert_not_reached(); 1268 } 1269 1270 /* Compute bit position in HGEIP CSR */ 1271 irq = irq - IRQ_LOCAL_MAX + 1; 1272 if (env->geilen < irq) { 1273 g_assert_not_reached(); 1274 } 1275 1276 /* Update HGEIP CSR */ 1277 env->hgeip &= ~((target_ulong)1 << irq); 1278 if (level) { 1279 env->hgeip |= (target_ulong)1 << irq; 1280 } 1281 1282 /* Update mip.SGEIP bit */ 1283 riscv_cpu_update_mip(env, MIP_SGEIP, 1284 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1285 } else { 1286 g_assert_not_reached(); 1287 } 1288 } 1289 #endif /* CONFIG_USER_ONLY */ 1290 1291 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1292 { 1293 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1294 } 1295 1296 static void riscv_cpu_post_init(Object *obj) 1297 { 1298 accel_cpu_instance_init(CPU(obj)); 1299 } 1300 1301 static void riscv_cpu_init(Object *obj) 1302 { 1303 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1304 RISCVCPU *cpu = RISCV_CPU(obj); 1305 CPURISCVState *env = &cpu->env; 1306 1307 env->misa_mxl = mcc->misa_mxl_max; 1308 1309 #ifndef CONFIG_USER_ONLY 1310 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1311 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1312 #endif /* CONFIG_USER_ONLY */ 1313 1314 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal); 1315 1316 /* 1317 * The timer and performance counters extensions were supported 1318 * in QEMU before they were added as discrete extensions in the 1319 * ISA. To keep compatibility we'll always default them to 'true' 1320 * for all CPUs. Each accelerator will decide what to do when 1321 * users disable them. 1322 */ 1323 RISCV_CPU(obj)->cfg.ext_zicntr = true; 1324 RISCV_CPU(obj)->cfg.ext_zihpm = true; 1325 1326 /* Default values for non-bool cpu properties */ 1327 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); 1328 cpu->cfg.vlenb = 128 >> 3; 1329 cpu->cfg.elen = 64; 1330 cpu->cfg.cbom_blocksize = 64; 1331 cpu->cfg.cbop_blocksize = 64; 1332 cpu->cfg.cboz_blocksize = 64; 1333 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1334 } 1335 1336 static void riscv_bare_cpu_init(Object *obj) 1337 { 1338 RISCVCPU *cpu = RISCV_CPU(obj); 1339 1340 /* 1341 * Bare CPUs do not inherit the timer and performance 1342 * counters from the parent class (see riscv_cpu_init() 1343 * for info on why the parent enables them). 1344 * 1345 * Users have to explicitly enable these counters for 1346 * bare CPUs. 1347 */ 1348 cpu->cfg.ext_zicntr = false; 1349 cpu->cfg.ext_zihpm = false; 1350 1351 /* Set to QEMU's first supported priv version */ 1352 cpu->env.priv_ver = PRIV_VERSION_1_10_0; 1353 1354 /* 1355 * Support all available satp_mode settings. The default 1356 * value will be set to MBARE if the user doesn't set 1357 * satp_mode manually (see set_satp_mode_default()). 1358 */ 1359 #ifndef CONFIG_USER_ONLY 1360 set_satp_mode_max_supported(cpu, VM_1_10_SV64); 1361 #endif 1362 } 1363 1364 typedef struct misa_ext_info { 1365 const char *name; 1366 const char *description; 1367 } MISAExtInfo; 1368 1369 #define MISA_INFO_IDX(_bit) \ 1370 __builtin_ctz(_bit) 1371 1372 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1373 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1374 1375 static const MISAExtInfo misa_ext_info_arr[] = { 1376 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1377 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1378 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1379 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1380 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1381 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1382 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1383 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1384 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1385 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1386 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), 1387 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1388 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1389 MISA_EXT_INFO(RVB, "x-b", "Bit manipulation (Zba_Zbb_Zbs)") 1390 }; 1391 1392 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) 1393 { 1394 CPUClass *cc = CPU_CLASS(mcc); 1395 1396 /* Validate that MISA_MXL is set properly. */ 1397 switch (mcc->misa_mxl_max) { 1398 #ifdef TARGET_RISCV64 1399 case MXL_RV64: 1400 case MXL_RV128: 1401 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1402 break; 1403 #endif 1404 case MXL_RV32: 1405 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1406 break; 1407 default: 1408 g_assert_not_reached(); 1409 } 1410 } 1411 1412 static int riscv_validate_misa_info_idx(uint32_t bit) 1413 { 1414 int idx; 1415 1416 /* 1417 * Our lowest valid input (RVA) is 1 and 1418 * __builtin_ctz() is UB with zero. 1419 */ 1420 g_assert(bit != 0); 1421 idx = MISA_INFO_IDX(bit); 1422 1423 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1424 return idx; 1425 } 1426 1427 const char *riscv_get_misa_ext_name(uint32_t bit) 1428 { 1429 int idx = riscv_validate_misa_info_idx(bit); 1430 const char *val = misa_ext_info_arr[idx].name; 1431 1432 g_assert(val != NULL); 1433 return val; 1434 } 1435 1436 const char *riscv_get_misa_ext_description(uint32_t bit) 1437 { 1438 int idx = riscv_validate_misa_info_idx(bit); 1439 const char *val = misa_ext_info_arr[idx].description; 1440 1441 g_assert(val != NULL); 1442 return val; 1443 } 1444 1445 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1446 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1447 .enabled = _defval} 1448 1449 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1450 /* Defaults for standard extensions */ 1451 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1452 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1453 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1454 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1455 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1456 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), 1457 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1458 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1459 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1460 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1461 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1462 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1463 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1464 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1465 1466 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1467 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1468 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1469 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1470 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1471 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1472 1473 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1474 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1475 1476 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1477 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1478 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1479 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1480 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1481 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1482 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1483 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1484 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1485 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1486 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1487 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1488 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1489 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1490 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1491 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1492 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1493 1494 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1495 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1496 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1497 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1498 1499 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1500 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true), 1501 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1502 1503 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1504 1505 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1506 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1507 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1508 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1509 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1510 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1511 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1512 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1513 1514 /* Vector cryptography extensions */ 1515 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1516 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1517 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkg, false), 1518 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1519 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1520 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1521 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1522 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1523 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1524 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1525 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1526 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1527 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1528 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1529 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1530 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1531 1532 DEFINE_PROP_END_OF_LIST(), 1533 }; 1534 1535 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1536 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1537 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1538 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1539 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1540 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1541 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1542 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1543 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1544 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1545 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1546 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1547 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1548 1549 DEFINE_PROP_END_OF_LIST(), 1550 }; 1551 1552 /* These are experimental so mark with 'x-' */ 1553 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1554 MULTI_EXT_CFG_BOOL("x-smaia", ext_smaia, false), 1555 MULTI_EXT_CFG_BOOL("x-ssaia", ext_ssaia, false), 1556 1557 MULTI_EXT_CFG_BOOL("x-zaamo", ext_zaamo, false), 1558 MULTI_EXT_CFG_BOOL("x-zalrsc", ext_zalrsc, false), 1559 1560 MULTI_EXT_CFG_BOOL("x-zvfh", ext_zvfh, false), 1561 MULTI_EXT_CFG_BOOL("x-zvfhmin", ext_zvfhmin, false), 1562 1563 MULTI_EXT_CFG_BOOL("x-zfbfmin", ext_zfbfmin, false), 1564 MULTI_EXT_CFG_BOOL("x-zvfbfmin", ext_zvfbfmin, false), 1565 MULTI_EXT_CFG_BOOL("x-zvfbfwma", ext_zvfbfwma, false), 1566 1567 DEFINE_PROP_END_OF_LIST(), 1568 }; 1569 1570 /* 1571 * 'Named features' is the name we give to extensions that we 1572 * don't want to expose to users. They are either immutable 1573 * (always enabled/disable) or they'll vary depending on 1574 * the resulting CPU state. They have riscv,isa strings 1575 * and priv_ver like regular extensions. 1576 */ 1577 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { 1578 MULTI_EXT_CFG_BOOL("svade", ext_svade, true), 1579 MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true), 1580 1581 DEFINE_PROP_END_OF_LIST(), 1582 }; 1583 1584 /* Deprecated entries marked for future removal */ 1585 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1586 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1587 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1588 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1589 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1590 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1591 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1592 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1593 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1594 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1595 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1596 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1597 1598 DEFINE_PROP_END_OF_LIST(), 1599 }; 1600 1601 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname, 1602 Error **errp) 1603 { 1604 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1605 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'", 1606 cpuname, propname); 1607 } 1608 1609 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1610 void *opaque, Error **errp) 1611 { 1612 RISCVCPU *cpu = RISCV_CPU(obj); 1613 uint8_t pmu_num, curr_pmu_num; 1614 uint32_t pmu_mask; 1615 1616 visit_type_uint8(v, name, &pmu_num, errp); 1617 1618 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask); 1619 1620 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) { 1621 cpu_set_prop_err(cpu, name, errp); 1622 error_append_hint(errp, "Current '%s' val: %u\n", 1623 name, curr_pmu_num); 1624 return; 1625 } 1626 1627 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1628 error_setg(errp, "Number of counters exceeds maximum available"); 1629 return; 1630 } 1631 1632 if (pmu_num == 0) { 1633 pmu_mask = 0; 1634 } else { 1635 pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1636 } 1637 1638 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1639 cpu->cfg.pmu_mask = pmu_mask; 1640 cpu_option_add_user_setting("pmu-mask", pmu_mask); 1641 } 1642 1643 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1644 void *opaque, Error **errp) 1645 { 1646 RISCVCPU *cpu = RISCV_CPU(obj); 1647 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1648 1649 visit_type_uint8(v, name, &pmu_num, errp); 1650 } 1651 1652 static const PropertyInfo prop_pmu_num = { 1653 .name = "pmu-num", 1654 .get = prop_pmu_num_get, 1655 .set = prop_pmu_num_set, 1656 }; 1657 1658 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name, 1659 void *opaque, Error **errp) 1660 { 1661 RISCVCPU *cpu = RISCV_CPU(obj); 1662 uint32_t value; 1663 uint8_t pmu_num; 1664 1665 visit_type_uint32(v, name, &value, errp); 1666 1667 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) { 1668 cpu_set_prop_err(cpu, name, errp); 1669 error_append_hint(errp, "Current '%s' val: %x\n", 1670 name, cpu->cfg.pmu_mask); 1671 return; 1672 } 1673 1674 pmu_num = ctpop32(value); 1675 1676 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1677 error_setg(errp, "Number of counters exceeds maximum available"); 1678 return; 1679 } 1680 1681 cpu_option_add_user_setting(name, value); 1682 cpu->cfg.pmu_mask = value; 1683 } 1684 1685 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name, 1686 void *opaque, Error **errp) 1687 { 1688 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask; 1689 1690 visit_type_uint8(v, name, &pmu_mask, errp); 1691 } 1692 1693 static const PropertyInfo prop_pmu_mask = { 1694 .name = "pmu-mask", 1695 .get = prop_pmu_mask_get, 1696 .set = prop_pmu_mask_set, 1697 }; 1698 1699 static void prop_mmu_set(Object *obj, Visitor *v, const char *name, 1700 void *opaque, Error **errp) 1701 { 1702 RISCVCPU *cpu = RISCV_CPU(obj); 1703 bool value; 1704 1705 visit_type_bool(v, name, &value, errp); 1706 1707 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) { 1708 cpu_set_prop_err(cpu, "mmu", errp); 1709 return; 1710 } 1711 1712 cpu_option_add_user_setting(name, value); 1713 cpu->cfg.mmu = value; 1714 } 1715 1716 static void prop_mmu_get(Object *obj, Visitor *v, const char *name, 1717 void *opaque, Error **errp) 1718 { 1719 bool value = RISCV_CPU(obj)->cfg.mmu; 1720 1721 visit_type_bool(v, name, &value, errp); 1722 } 1723 1724 static const PropertyInfo prop_mmu = { 1725 .name = "mmu", 1726 .get = prop_mmu_get, 1727 .set = prop_mmu_set, 1728 }; 1729 1730 static void prop_pmp_set(Object *obj, Visitor *v, const char *name, 1731 void *opaque, Error **errp) 1732 { 1733 RISCVCPU *cpu = RISCV_CPU(obj); 1734 bool value; 1735 1736 visit_type_bool(v, name, &value, errp); 1737 1738 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) { 1739 cpu_set_prop_err(cpu, name, errp); 1740 return; 1741 } 1742 1743 cpu_option_add_user_setting(name, value); 1744 cpu->cfg.pmp = value; 1745 } 1746 1747 static void prop_pmp_get(Object *obj, Visitor *v, const char *name, 1748 void *opaque, Error **errp) 1749 { 1750 bool value = RISCV_CPU(obj)->cfg.pmp; 1751 1752 visit_type_bool(v, name, &value, errp); 1753 } 1754 1755 static const PropertyInfo prop_pmp = { 1756 .name = "pmp", 1757 .get = prop_pmp_get, 1758 .set = prop_pmp_set, 1759 }; 1760 1761 static int priv_spec_from_str(const char *priv_spec_str) 1762 { 1763 int priv_version = -1; 1764 1765 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) { 1766 priv_version = PRIV_VERSION_1_12_0; 1767 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) { 1768 priv_version = PRIV_VERSION_1_11_0; 1769 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) { 1770 priv_version = PRIV_VERSION_1_10_0; 1771 } 1772 1773 return priv_version; 1774 } 1775 1776 static const char *priv_spec_to_str(int priv_version) 1777 { 1778 switch (priv_version) { 1779 case PRIV_VERSION_1_10_0: 1780 return PRIV_VER_1_10_0_STR; 1781 case PRIV_VERSION_1_11_0: 1782 return PRIV_VER_1_11_0_STR; 1783 case PRIV_VERSION_1_12_0: 1784 return PRIV_VER_1_12_0_STR; 1785 default: 1786 return NULL; 1787 } 1788 } 1789 1790 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name, 1791 void *opaque, Error **errp) 1792 { 1793 RISCVCPU *cpu = RISCV_CPU(obj); 1794 g_autofree char *value = NULL; 1795 int priv_version = -1; 1796 1797 visit_type_str(v, name, &value, errp); 1798 1799 priv_version = priv_spec_from_str(value); 1800 if (priv_version < 0) { 1801 error_setg(errp, "Unsupported privilege spec version '%s'", value); 1802 return; 1803 } 1804 1805 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) { 1806 cpu_set_prop_err(cpu, name, errp); 1807 error_append_hint(errp, "Current '%s' val: %s\n", name, 1808 object_property_get_str(obj, name, NULL)); 1809 return; 1810 } 1811 1812 cpu_option_add_user_setting(name, priv_version); 1813 cpu->env.priv_ver = priv_version; 1814 } 1815 1816 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name, 1817 void *opaque, Error **errp) 1818 { 1819 RISCVCPU *cpu = RISCV_CPU(obj); 1820 const char *value = priv_spec_to_str(cpu->env.priv_ver); 1821 1822 visit_type_str(v, name, (char **)&value, errp); 1823 } 1824 1825 static const PropertyInfo prop_priv_spec = { 1826 .name = "priv_spec", 1827 .get = prop_priv_spec_get, 1828 .set = prop_priv_spec_set, 1829 }; 1830 1831 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name, 1832 void *opaque, Error **errp) 1833 { 1834 RISCVCPU *cpu = RISCV_CPU(obj); 1835 g_autofree char *value = NULL; 1836 1837 visit_type_str(v, name, &value, errp); 1838 1839 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) { 1840 error_setg(errp, "Unsupported vector spec version '%s'", value); 1841 return; 1842 } 1843 1844 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0); 1845 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1846 } 1847 1848 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name, 1849 void *opaque, Error **errp) 1850 { 1851 const char *value = VEXT_VER_1_00_0_STR; 1852 1853 visit_type_str(v, name, (char **)&value, errp); 1854 } 1855 1856 static const PropertyInfo prop_vext_spec = { 1857 .name = "vext_spec", 1858 .get = prop_vext_spec_get, 1859 .set = prop_vext_spec_set, 1860 }; 1861 1862 static void prop_vlen_set(Object *obj, Visitor *v, const char *name, 1863 void *opaque, Error **errp) 1864 { 1865 RISCVCPU *cpu = RISCV_CPU(obj); 1866 uint16_t value; 1867 1868 if (!visit_type_uint16(v, name, &value, errp)) { 1869 return; 1870 } 1871 1872 if (!is_power_of_2(value)) { 1873 error_setg(errp, "Vector extension VLEN must be power of 2"); 1874 return; 1875 } 1876 1877 if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) { 1878 cpu_set_prop_err(cpu, name, errp); 1879 error_append_hint(errp, "Current '%s' val: %u\n", 1880 name, cpu->cfg.vlenb << 3); 1881 return; 1882 } 1883 1884 cpu_option_add_user_setting(name, value); 1885 cpu->cfg.vlenb = value >> 3; 1886 } 1887 1888 static void prop_vlen_get(Object *obj, Visitor *v, const char *name, 1889 void *opaque, Error **errp) 1890 { 1891 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3; 1892 1893 visit_type_uint16(v, name, &value, errp); 1894 } 1895 1896 static const PropertyInfo prop_vlen = { 1897 .name = "vlen", 1898 .get = prop_vlen_get, 1899 .set = prop_vlen_set, 1900 }; 1901 1902 static void prop_elen_set(Object *obj, Visitor *v, const char *name, 1903 void *opaque, Error **errp) 1904 { 1905 RISCVCPU *cpu = RISCV_CPU(obj); 1906 uint16_t value; 1907 1908 if (!visit_type_uint16(v, name, &value, errp)) { 1909 return; 1910 } 1911 1912 if (!is_power_of_2(value)) { 1913 error_setg(errp, "Vector extension ELEN must be power of 2"); 1914 return; 1915 } 1916 1917 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) { 1918 cpu_set_prop_err(cpu, name, errp); 1919 error_append_hint(errp, "Current '%s' val: %u\n", 1920 name, cpu->cfg.elen); 1921 return; 1922 } 1923 1924 cpu_option_add_user_setting(name, value); 1925 cpu->cfg.elen = value; 1926 } 1927 1928 static void prop_elen_get(Object *obj, Visitor *v, const char *name, 1929 void *opaque, Error **errp) 1930 { 1931 uint16_t value = RISCV_CPU(obj)->cfg.elen; 1932 1933 visit_type_uint16(v, name, &value, errp); 1934 } 1935 1936 static const PropertyInfo prop_elen = { 1937 .name = "elen", 1938 .get = prop_elen_get, 1939 .set = prop_elen_set, 1940 }; 1941 1942 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name, 1943 void *opaque, Error **errp) 1944 { 1945 RISCVCPU *cpu = RISCV_CPU(obj); 1946 uint16_t value; 1947 1948 if (!visit_type_uint16(v, name, &value, errp)) { 1949 return; 1950 } 1951 1952 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) { 1953 cpu_set_prop_err(cpu, name, errp); 1954 error_append_hint(errp, "Current '%s' val: %u\n", 1955 name, cpu->cfg.cbom_blocksize); 1956 return; 1957 } 1958 1959 cpu_option_add_user_setting(name, value); 1960 cpu->cfg.cbom_blocksize = value; 1961 } 1962 1963 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name, 1964 void *opaque, Error **errp) 1965 { 1966 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize; 1967 1968 visit_type_uint16(v, name, &value, errp); 1969 } 1970 1971 static const PropertyInfo prop_cbom_blksize = { 1972 .name = "cbom_blocksize", 1973 .get = prop_cbom_blksize_get, 1974 .set = prop_cbom_blksize_set, 1975 }; 1976 1977 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name, 1978 void *opaque, Error **errp) 1979 { 1980 RISCVCPU *cpu = RISCV_CPU(obj); 1981 uint16_t value; 1982 1983 if (!visit_type_uint16(v, name, &value, errp)) { 1984 return; 1985 } 1986 1987 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) { 1988 cpu_set_prop_err(cpu, name, errp); 1989 error_append_hint(errp, "Current '%s' val: %u\n", 1990 name, cpu->cfg.cbop_blocksize); 1991 return; 1992 } 1993 1994 cpu_option_add_user_setting(name, value); 1995 cpu->cfg.cbop_blocksize = value; 1996 } 1997 1998 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name, 1999 void *opaque, Error **errp) 2000 { 2001 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize; 2002 2003 visit_type_uint16(v, name, &value, errp); 2004 } 2005 2006 static const PropertyInfo prop_cbop_blksize = { 2007 .name = "cbop_blocksize", 2008 .get = prop_cbop_blksize_get, 2009 .set = prop_cbop_blksize_set, 2010 }; 2011 2012 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name, 2013 void *opaque, Error **errp) 2014 { 2015 RISCVCPU *cpu = RISCV_CPU(obj); 2016 uint16_t value; 2017 2018 if (!visit_type_uint16(v, name, &value, errp)) { 2019 return; 2020 } 2021 2022 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) { 2023 cpu_set_prop_err(cpu, name, errp); 2024 error_append_hint(errp, "Current '%s' val: %u\n", 2025 name, cpu->cfg.cboz_blocksize); 2026 return; 2027 } 2028 2029 cpu_option_add_user_setting(name, value); 2030 cpu->cfg.cboz_blocksize = value; 2031 } 2032 2033 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name, 2034 void *opaque, Error **errp) 2035 { 2036 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize; 2037 2038 visit_type_uint16(v, name, &value, errp); 2039 } 2040 2041 static const PropertyInfo prop_cboz_blksize = { 2042 .name = "cboz_blocksize", 2043 .get = prop_cboz_blksize_get, 2044 .set = prop_cboz_blksize_set, 2045 }; 2046 2047 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name, 2048 void *opaque, Error **errp) 2049 { 2050 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2051 RISCVCPU *cpu = RISCV_CPU(obj); 2052 uint32_t prev_val = cpu->cfg.mvendorid; 2053 uint32_t value; 2054 2055 if (!visit_type_uint32(v, name, &value, errp)) { 2056 return; 2057 } 2058 2059 if (!dynamic_cpu && prev_val != value) { 2060 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2061 object_get_typename(obj), prev_val); 2062 return; 2063 } 2064 2065 cpu->cfg.mvendorid = value; 2066 } 2067 2068 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name, 2069 void *opaque, Error **errp) 2070 { 2071 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; 2072 2073 visit_type_uint32(v, name, &value, errp); 2074 } 2075 2076 static const PropertyInfo prop_mvendorid = { 2077 .name = "mvendorid", 2078 .get = prop_mvendorid_get, 2079 .set = prop_mvendorid_set, 2080 }; 2081 2082 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name, 2083 void *opaque, Error **errp) 2084 { 2085 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2086 RISCVCPU *cpu = RISCV_CPU(obj); 2087 uint64_t prev_val = cpu->cfg.mimpid; 2088 uint64_t value; 2089 2090 if (!visit_type_uint64(v, name, &value, errp)) { 2091 return; 2092 } 2093 2094 if (!dynamic_cpu && prev_val != value) { 2095 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2096 object_get_typename(obj), prev_val); 2097 return; 2098 } 2099 2100 cpu->cfg.mimpid = value; 2101 } 2102 2103 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name, 2104 void *opaque, Error **errp) 2105 { 2106 uint64_t value = RISCV_CPU(obj)->cfg.mimpid; 2107 2108 visit_type_uint64(v, name, &value, errp); 2109 } 2110 2111 static const PropertyInfo prop_mimpid = { 2112 .name = "mimpid", 2113 .get = prop_mimpid_get, 2114 .set = prop_mimpid_set, 2115 }; 2116 2117 static void prop_marchid_set(Object *obj, Visitor *v, const char *name, 2118 void *opaque, Error **errp) 2119 { 2120 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2121 RISCVCPU *cpu = RISCV_CPU(obj); 2122 uint64_t prev_val = cpu->cfg.marchid; 2123 uint64_t value, invalid_val; 2124 uint32_t mxlen = 0; 2125 2126 if (!visit_type_uint64(v, name, &value, errp)) { 2127 return; 2128 } 2129 2130 if (!dynamic_cpu && prev_val != value) { 2131 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2132 object_get_typename(obj), prev_val); 2133 return; 2134 } 2135 2136 switch (riscv_cpu_mxl(&cpu->env)) { 2137 case MXL_RV32: 2138 mxlen = 32; 2139 break; 2140 case MXL_RV64: 2141 case MXL_RV128: 2142 mxlen = 64; 2143 break; 2144 default: 2145 g_assert_not_reached(); 2146 } 2147 2148 invalid_val = 1LL << (mxlen - 1); 2149 2150 if (value == invalid_val) { 2151 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2152 "and the remaining bits zero", mxlen); 2153 return; 2154 } 2155 2156 cpu->cfg.marchid = value; 2157 } 2158 2159 static void prop_marchid_get(Object *obj, Visitor *v, const char *name, 2160 void *opaque, Error **errp) 2161 { 2162 uint64_t value = RISCV_CPU(obj)->cfg.marchid; 2163 2164 visit_type_uint64(v, name, &value, errp); 2165 } 2166 2167 static const PropertyInfo prop_marchid = { 2168 .name = "marchid", 2169 .get = prop_marchid_get, 2170 .set = prop_marchid_set, 2171 }; 2172 2173 /* 2174 * RVA22U64 defines some 'named features' or 'synthetic extensions' 2175 * that are cache related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa 2176 * and Zicclsm. We do not implement caching in QEMU so we'll consider 2177 * all these named features as always enabled. 2178 * 2179 * There's no riscv,isa update for them (nor for zic64b, despite it 2180 * having a cfg offset) at this moment. 2181 */ 2182 static RISCVCPUProfile RVA22U64 = { 2183 .parent = NULL, 2184 .name = "rva22u64", 2185 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU, 2186 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2187 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2188 .ext_offsets = { 2189 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause), 2190 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb), 2191 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin), 2192 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr), 2193 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom), 2194 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz), 2195 2196 /* mandatory named features for this profile */ 2197 CPU_CFG_OFFSET(ext_zic64b), 2198 2199 RISCV_PROFILE_EXT_LIST_END 2200 } 2201 }; 2202 2203 /* 2204 * As with RVA22U64, RVA22S64 also defines 'named features'. 2205 * 2206 * Cache related features that we consider enabled since we don't 2207 * implement cache: Ssccptr 2208 * 2209 * Other named features that we already implement: Sstvecd, Sstvala, 2210 * Sscounterenw 2211 * 2212 * Named features that we need to enable: svade 2213 * 2214 * The remaining features/extensions comes from RVA22U64. 2215 */ 2216 static RISCVCPUProfile RVA22S64 = { 2217 .parent = &RVA22U64, 2218 .name = "rva22s64", 2219 .misa_ext = RVS, 2220 .priv_spec = PRIV_VERSION_1_12_0, 2221 .satp_mode = VM_1_10_SV39, 2222 .ext_offsets = { 2223 /* rva22s64 exts */ 2224 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt), 2225 CPU_CFG_OFFSET(ext_svinval), 2226 2227 /* rva22s64 named features */ 2228 CPU_CFG_OFFSET(ext_svade), 2229 2230 RISCV_PROFILE_EXT_LIST_END 2231 } 2232 }; 2233 2234 RISCVCPUProfile *riscv_profiles[] = { 2235 &RVA22U64, 2236 &RVA22S64, 2237 NULL, 2238 }; 2239 2240 static Property riscv_cpu_properties[] = { 2241 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 2242 2243 {.name = "pmu-mask", .info = &prop_pmu_mask}, 2244 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 2245 2246 {.name = "mmu", .info = &prop_mmu}, 2247 {.name = "pmp", .info = &prop_pmp}, 2248 2249 {.name = "priv_spec", .info = &prop_priv_spec}, 2250 {.name = "vext_spec", .info = &prop_vext_spec}, 2251 2252 {.name = "vlen", .info = &prop_vlen}, 2253 {.name = "elen", .info = &prop_elen}, 2254 2255 {.name = "cbom_blocksize", .info = &prop_cbom_blksize}, 2256 {.name = "cbop_blocksize", .info = &prop_cbop_blksize}, 2257 {.name = "cboz_blocksize", .info = &prop_cboz_blksize}, 2258 2259 {.name = "mvendorid", .info = &prop_mvendorid}, 2260 {.name = "mimpid", .info = &prop_mimpid}, 2261 {.name = "marchid", .info = &prop_marchid}, 2262 2263 #ifndef CONFIG_USER_ONLY 2264 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 2265 #endif 2266 2267 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 2268 2269 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 2270 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 2271 2272 /* 2273 * write_misa() is marked as experimental for now so mark 2274 * it with -x and default to 'false'. 2275 */ 2276 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 2277 DEFINE_PROP_END_OF_LIST(), 2278 }; 2279 2280 #if defined(TARGET_RISCV64) 2281 static void rva22u64_profile_cpu_init(Object *obj) 2282 { 2283 rv64i_bare_cpu_init(obj); 2284 2285 RVA22U64.enabled = true; 2286 } 2287 2288 static void rva22s64_profile_cpu_init(Object *obj) 2289 { 2290 rv64i_bare_cpu_init(obj); 2291 2292 RVA22S64.enabled = true; 2293 } 2294 #endif 2295 2296 static const gchar *riscv_gdb_arch_name(CPUState *cs) 2297 { 2298 RISCVCPU *cpu = RISCV_CPU(cs); 2299 CPURISCVState *env = &cpu->env; 2300 2301 switch (riscv_cpu_mxl(env)) { 2302 case MXL_RV32: 2303 return "riscv:rv32"; 2304 case MXL_RV64: 2305 case MXL_RV128: 2306 return "riscv:rv64"; 2307 default: 2308 g_assert_not_reached(); 2309 } 2310 } 2311 2312 #ifndef CONFIG_USER_ONLY 2313 static int64_t riscv_get_arch_id(CPUState *cs) 2314 { 2315 RISCVCPU *cpu = RISCV_CPU(cs); 2316 2317 return cpu->env.mhartid; 2318 } 2319 2320 #include "hw/core/sysemu-cpu-ops.h" 2321 2322 static const struct SysemuCPUOps riscv_sysemu_ops = { 2323 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 2324 .write_elf64_note = riscv_cpu_write_elf64_note, 2325 .write_elf32_note = riscv_cpu_write_elf32_note, 2326 .legacy_vmsd = &vmstate_riscv_cpu, 2327 }; 2328 #endif 2329 2330 static void riscv_cpu_common_class_init(ObjectClass *c, void *data) 2331 { 2332 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2333 CPUClass *cc = CPU_CLASS(c); 2334 DeviceClass *dc = DEVICE_CLASS(c); 2335 ResettableClass *rc = RESETTABLE_CLASS(c); 2336 2337 device_class_set_parent_realize(dc, riscv_cpu_realize, 2338 &mcc->parent_realize); 2339 2340 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 2341 &mcc->parent_phases); 2342 2343 cc->class_by_name = riscv_cpu_class_by_name; 2344 cc->has_work = riscv_cpu_has_work; 2345 cc->mmu_index = riscv_cpu_mmu_index; 2346 cc->dump_state = riscv_cpu_dump_state; 2347 cc->set_pc = riscv_cpu_set_pc; 2348 cc->get_pc = riscv_cpu_get_pc; 2349 cc->gdb_read_register = riscv_cpu_gdb_read_register; 2350 cc->gdb_write_register = riscv_cpu_gdb_write_register; 2351 cc->gdb_stop_before_watchpoint = true; 2352 cc->disas_set_info = riscv_cpu_disas_set_info; 2353 #ifndef CONFIG_USER_ONLY 2354 cc->sysemu_ops = &riscv_sysemu_ops; 2355 cc->get_arch_id = riscv_get_arch_id; 2356 #endif 2357 cc->gdb_arch_name = riscv_gdb_arch_name; 2358 2359 device_class_set_props(dc, riscv_cpu_properties); 2360 } 2361 2362 static void riscv_cpu_class_init(ObjectClass *c, void *data) 2363 { 2364 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2365 2366 mcc->misa_mxl_max = (uint32_t)(uintptr_t)data; 2367 riscv_cpu_validate_misa_mxl(mcc); 2368 } 2369 2370 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 2371 int max_str_len) 2372 { 2373 const RISCVIsaExtData *edata; 2374 char *old = *isa_str; 2375 char *new = *isa_str; 2376 2377 for (edata = isa_edata_arr; edata && edata->name; edata++) { 2378 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 2379 new = g_strconcat(old, "_", edata->name, NULL); 2380 g_free(old); 2381 old = new; 2382 } 2383 } 2384 2385 *isa_str = new; 2386 } 2387 2388 char *riscv_isa_string(RISCVCPU *cpu) 2389 { 2390 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 2391 int i; 2392 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 2393 char *isa_str = g_new(char, maxlen); 2394 int xlen = riscv_cpu_max_xlen(mcc); 2395 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen); 2396 2397 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2398 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2399 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 2400 } 2401 } 2402 *p = '\0'; 2403 if (!cpu->cfg.short_isa_string) { 2404 riscv_isa_string_ext(cpu, &isa_str, maxlen); 2405 } 2406 return isa_str; 2407 } 2408 2409 #ifndef CONFIG_USER_ONLY 2410 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count) 2411 { 2412 int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr); 2413 char **extensions = g_new(char *, maxlen); 2414 2415 for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2416 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2417 extensions[*count] = g_new(char, 2); 2418 snprintf(extensions[*count], 2, "%c", 2419 qemu_tolower(riscv_single_letter_exts[i])); 2420 (*count)++; 2421 } 2422 } 2423 2424 for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) { 2425 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 2426 extensions[*count] = g_strdup(edata->name); 2427 (*count)++; 2428 } 2429 } 2430 2431 return extensions; 2432 } 2433 2434 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename) 2435 { 2436 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 2437 const size_t maxlen = sizeof("rv128i"); 2438 g_autofree char *isa_base = g_new(char, maxlen); 2439 g_autofree char *riscv_isa; 2440 char **isa_extensions; 2441 int count = 0; 2442 int xlen = riscv_cpu_max_xlen(mcc); 2443 2444 riscv_isa = riscv_isa_string(cpu); 2445 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa); 2446 2447 snprintf(isa_base, maxlen, "rv%di", xlen); 2448 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base); 2449 2450 isa_extensions = riscv_isa_extensions_list(cpu, &count); 2451 qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions", 2452 isa_extensions, count); 2453 2454 for (int i = 0; i < count; i++) { 2455 g_free(isa_extensions[i]); 2456 } 2457 2458 g_free(isa_extensions); 2459 } 2460 #endif 2461 2462 #define DEFINE_CPU(type_name, misa_mxl_max, initfn) \ 2463 { \ 2464 .name = (type_name), \ 2465 .parent = TYPE_RISCV_CPU, \ 2466 .instance_init = (initfn), \ 2467 .class_init = riscv_cpu_class_init, \ 2468 .class_data = (void *)(misa_mxl_max) \ 2469 } 2470 2471 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \ 2472 { \ 2473 .name = (type_name), \ 2474 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 2475 .instance_init = (initfn), \ 2476 .class_init = riscv_cpu_class_init, \ 2477 .class_data = (void *)(misa_mxl_max) \ 2478 } 2479 2480 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \ 2481 { \ 2482 .name = (type_name), \ 2483 .parent = TYPE_RISCV_VENDOR_CPU, \ 2484 .instance_init = (initfn), \ 2485 .class_init = riscv_cpu_class_init, \ 2486 .class_data = (void *)(misa_mxl_max) \ 2487 } 2488 2489 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \ 2490 { \ 2491 .name = (type_name), \ 2492 .parent = TYPE_RISCV_BARE_CPU, \ 2493 .instance_init = (initfn), \ 2494 .class_init = riscv_cpu_class_init, \ 2495 .class_data = (void *)(misa_mxl_max) \ 2496 } 2497 2498 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \ 2499 { \ 2500 .name = (type_name), \ 2501 .parent = TYPE_RISCV_BARE_CPU, \ 2502 .instance_init = (initfn), \ 2503 .class_init = riscv_cpu_class_init, \ 2504 .class_data = (void *)(misa_mxl_max) \ 2505 } 2506 2507 static const TypeInfo riscv_cpu_type_infos[] = { 2508 { 2509 .name = TYPE_RISCV_CPU, 2510 .parent = TYPE_CPU, 2511 .instance_size = sizeof(RISCVCPU), 2512 .instance_align = __alignof(RISCVCPU), 2513 .instance_init = riscv_cpu_init, 2514 .instance_post_init = riscv_cpu_post_init, 2515 .abstract = true, 2516 .class_size = sizeof(RISCVCPUClass), 2517 .class_init = riscv_cpu_common_class_init, 2518 }, 2519 { 2520 .name = TYPE_RISCV_DYNAMIC_CPU, 2521 .parent = TYPE_RISCV_CPU, 2522 .abstract = true, 2523 }, 2524 { 2525 .name = TYPE_RISCV_VENDOR_CPU, 2526 .parent = TYPE_RISCV_CPU, 2527 .abstract = true, 2528 }, 2529 { 2530 .name = TYPE_RISCV_BARE_CPU, 2531 .parent = TYPE_RISCV_CPU, 2532 .instance_init = riscv_bare_cpu_init, 2533 .abstract = true, 2534 }, 2535 #if defined(TARGET_RISCV32) 2536 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV32, riscv_any_cpu_init), 2537 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), 2538 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), 2539 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), 2540 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), 2541 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init), 2542 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), 2543 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init), 2544 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init), 2545 #elif defined(TARGET_RISCV64) 2546 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV64, riscv_any_cpu_init), 2547 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), 2548 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), 2549 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), 2550 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), 2551 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init), 2552 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init), 2553 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init), 2554 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init), 2555 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init), 2556 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init), 2557 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init), 2558 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init), 2559 #endif 2560 }; 2561 2562 DEFINE_TYPES(riscv_cpu_type_infos) 2563