1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "exec/exec-all.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/error-report.h" 31 #include "hw/qdev-properties.h" 32 #include "hw/core/qdev-prop-internal.h" 33 #include "migration/vmstate.h" 34 #include "fpu/softfloat-helpers.h" 35 #include "system/device_tree.h" 36 #include "system/kvm.h" 37 #include "system/tcg.h" 38 #include "kvm/kvm_riscv.h" 39 #include "tcg/tcg-cpu.h" 40 #include "tcg/tcg.h" 41 42 /* RISC-V CPU definitions */ 43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH"; 44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 45 RVC, RVS, RVU, RVH, RVG, RVB, 0}; 46 47 /* 48 * From vector_helper.c 49 * Note that vector data is stored in host-endian 64-bit chunks, 50 * so addressing bytes needs a host-endian fixup. 51 */ 52 #if HOST_BIG_ENDIAN 53 #define BYTE(x) ((x) ^ 7) 54 #else 55 #define BYTE(x) (x) 56 #endif 57 58 bool riscv_cpu_is_32bit(RISCVCPU *cpu) 59 { 60 return riscv_cpu_mxl(&cpu->env) == MXL_RV32; 61 } 62 63 /* Hash that stores general user set numeric options */ 64 static GHashTable *general_user_opts; 65 66 static void cpu_option_add_user_setting(const char *optname, uint32_t value) 67 { 68 g_hash_table_insert(general_user_opts, (gpointer)optname, 69 GUINT_TO_POINTER(value)); 70 } 71 72 bool riscv_cpu_option_set(const char *optname) 73 { 74 return g_hash_table_contains(general_user_opts, optname); 75 } 76 77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 78 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 79 80 /* 81 * Here are the ordering rules of extension naming defined by RISC-V 82 * specification : 83 * 1. All extensions should be separated from other multi-letter extensions 84 * by an underscore. 85 * 2. The first letter following the 'Z' conventionally indicates the most 86 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 87 * If multiple 'Z' extensions are named, they should be ordered first 88 * by category, then alphabetically within a category. 89 * 3. Standard supervisor-level extensions (starts with 'S') should be 90 * listed after standard unprivileged extensions. If multiple 91 * supervisor-level extensions are listed, they should be ordered 92 * alphabetically. 93 * 4. Non-standard extensions (starts with 'X') must be listed after all 94 * standard extensions. They must be separated from other multi-letter 95 * extensions by an underscore. 96 * 97 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 98 * instead. 99 */ 100 const RISCVIsaExtData isa_edata_arr[] = { 101 ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b), 102 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 103 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop), 104 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 105 ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11), 106 ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11), 107 ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11), 108 ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11), 109 ISA_EXT_DATA_ENTRY(zicfilp, PRIV_VERSION_1_12_0, ext_zicfilp), 110 ISA_EXT_DATA_ENTRY(zicfiss, PRIV_VERSION_1_13_0, ext_zicfiss), 111 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 112 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 113 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 114 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 115 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 116 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 117 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 118 ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop), 119 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 120 ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_12), 121 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), 122 ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha), 123 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), 124 ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b), 125 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), 126 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 127 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 128 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 129 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 130 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 131 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 132 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 133 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 134 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 135 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 136 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 137 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 138 ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop), 139 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 140 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 141 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 142 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 143 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 144 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 145 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 146 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 147 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 148 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 149 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 150 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 151 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 152 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 153 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 154 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 155 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 156 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 157 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 158 ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso), 159 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 160 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 161 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 162 ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x), 163 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 164 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 165 ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x), 166 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 167 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 168 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 169 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 170 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 171 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 172 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 173 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 174 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 175 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 176 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 177 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 178 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 179 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 180 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 181 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 182 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 183 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 184 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 185 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 186 ISA_EXT_DATA_ENTRY(shcounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 187 ISA_EXT_DATA_ENTRY(sha, PRIV_VERSION_1_12_0, ext_sha), 188 ISA_EXT_DATA_ENTRY(shgatpa, PRIV_VERSION_1_12_0, has_priv_1_12), 189 ISA_EXT_DATA_ENTRY(shtvala, PRIV_VERSION_1_12_0, has_priv_1_12), 190 ISA_EXT_DATA_ENTRY(shvsatpa, PRIV_VERSION_1_12_0, has_priv_1_12), 191 ISA_EXT_DATA_ENTRY(shvstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 192 ISA_EXT_DATA_ENTRY(shvstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 193 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 194 ISA_EXT_DATA_ENTRY(smcdeleg, PRIV_VERSION_1_13_0, ext_smcdeleg), 195 ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf), 196 ISA_EXT_DATA_ENTRY(smcsrind, PRIV_VERSION_1_13_0, ext_smcsrind), 197 ISA_EXT_DATA_ENTRY(smdbltrp, PRIV_VERSION_1_13_0, ext_smdbltrp), 198 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 199 ISA_EXT_DATA_ENTRY(smrnmi, PRIV_VERSION_1_12_0, ext_smrnmi), 200 ISA_EXT_DATA_ENTRY(smmpm, PRIV_VERSION_1_13_0, ext_smmpm), 201 ISA_EXT_DATA_ENTRY(smnpm, PRIV_VERSION_1_13_0, ext_smnpm), 202 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 203 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 204 ISA_EXT_DATA_ENTRY(ssccfg, PRIV_VERSION_1_13_0, ext_ssccfg), 205 ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11), 206 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 207 ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 208 ISA_EXT_DATA_ENTRY(sscsrind, PRIV_VERSION_1_12_0, ext_sscsrind), 209 ISA_EXT_DATA_ENTRY(ssdbltrp, PRIV_VERSION_1_13_0, ext_ssdbltrp), 210 ISA_EXT_DATA_ENTRY(ssnpm, PRIV_VERSION_1_13_0, ext_ssnpm), 211 ISA_EXT_DATA_ENTRY(sspm, PRIV_VERSION_1_13_0, ext_sspm), 212 ISA_EXT_DATA_ENTRY(ssstateen, PRIV_VERSION_1_12_0, ext_ssstateen), 213 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 214 ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 215 ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 216 ISA_EXT_DATA_ENTRY(supm, PRIV_VERSION_1_13_0, ext_supm), 217 ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade), 218 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 219 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 220 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 221 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 222 ISA_EXT_DATA_ENTRY(svukte, PRIV_VERSION_1_13_0, ext_svukte), 223 ISA_EXT_DATA_ENTRY(svvptc, PRIV_VERSION_1_13_0, ext_svvptc), 224 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 225 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 226 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 227 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 228 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 229 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 230 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 231 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 232 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 233 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 234 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 235 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 236 237 { }, 238 }; 239 240 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 241 { 242 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 243 244 return *ext_enabled; 245 } 246 247 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 248 { 249 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 250 251 *ext_enabled = en; 252 } 253 254 bool riscv_cpu_is_vendor(Object *cpu_obj) 255 { 256 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; 257 } 258 259 const char * const riscv_int_regnames[] = { 260 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 261 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 262 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 263 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 264 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 265 }; 266 267 const char * const riscv_int_regnamesh[] = { 268 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 269 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 270 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 271 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 272 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 273 "x30h/t5h", "x31h/t6h" 274 }; 275 276 const char * const riscv_fpr_regnames[] = { 277 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 278 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 279 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 280 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 281 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 282 "f30/ft10", "f31/ft11" 283 }; 284 285 const char * const riscv_rvv_regnames[] = { 286 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 287 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 288 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 289 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 290 "v28", "v29", "v30", "v31" 291 }; 292 293 static const char * const riscv_excp_names[] = { 294 "misaligned_fetch", 295 "fault_fetch", 296 "illegal_instruction", 297 "breakpoint", 298 "misaligned_load", 299 "fault_load", 300 "misaligned_store", 301 "fault_store", 302 "user_ecall", 303 "supervisor_ecall", 304 "hypervisor_ecall", 305 "machine_ecall", 306 "exec_page_fault", 307 "load_page_fault", 308 "reserved", 309 "store_page_fault", 310 "double_trap", 311 "reserved", 312 "reserved", 313 "reserved", 314 "guest_exec_page_fault", 315 "guest_load_page_fault", 316 "reserved", 317 "guest_store_page_fault", 318 }; 319 320 static const char * const riscv_intr_names[] = { 321 "u_software", 322 "s_software", 323 "vs_software", 324 "m_software", 325 "u_timer", 326 "s_timer", 327 "vs_timer", 328 "m_timer", 329 "u_external", 330 "s_external", 331 "vs_external", 332 "m_external", 333 "reserved", 334 "reserved", 335 "reserved", 336 "reserved" 337 }; 338 339 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 340 { 341 if (async) { 342 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 343 riscv_intr_names[cause] : "(unknown)"; 344 } else { 345 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 346 riscv_excp_names[cause] : "(unknown)"; 347 } 348 } 349 350 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) 351 { 352 env->misa_ext_mask = env->misa_ext = ext; 353 } 354 355 int riscv_cpu_max_xlen(RISCVCPUClass *mcc) 356 { 357 return 16 << mcc->misa_mxl_max; 358 } 359 360 #ifndef CONFIG_USER_ONLY 361 static uint8_t satp_mode_from_str(const char *satp_mode_str) 362 { 363 if (!strncmp(satp_mode_str, "mbare", 5)) { 364 return VM_1_10_MBARE; 365 } 366 367 if (!strncmp(satp_mode_str, "sv32", 4)) { 368 return VM_1_10_SV32; 369 } 370 371 if (!strncmp(satp_mode_str, "sv39", 4)) { 372 return VM_1_10_SV39; 373 } 374 375 if (!strncmp(satp_mode_str, "sv48", 4)) { 376 return VM_1_10_SV48; 377 } 378 379 if (!strncmp(satp_mode_str, "sv57", 4)) { 380 return VM_1_10_SV57; 381 } 382 383 if (!strncmp(satp_mode_str, "sv64", 4)) { 384 return VM_1_10_SV64; 385 } 386 387 g_assert_not_reached(); 388 } 389 390 uint8_t satp_mode_max_from_map(uint32_t map) 391 { 392 /* 393 * 'map = 0' will make us return (31 - 32), which C will 394 * happily overflow to UINT_MAX. There's no good result to 395 * return if 'map = 0' (e.g. returning 0 will be ambiguous 396 * with the result for 'map = 1'). 397 * 398 * Assert out if map = 0. Callers will have to deal with 399 * it outside of this function. 400 */ 401 g_assert(map > 0); 402 403 /* map here has at least one bit set, so no problem with clz */ 404 return 31 - __builtin_clz(map); 405 } 406 407 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 408 { 409 if (is_32_bit) { 410 switch (satp_mode) { 411 case VM_1_10_SV32: 412 return "sv32"; 413 case VM_1_10_MBARE: 414 return "none"; 415 } 416 } else { 417 switch (satp_mode) { 418 case VM_1_10_SV64: 419 return "sv64"; 420 case VM_1_10_SV57: 421 return "sv57"; 422 case VM_1_10_SV48: 423 return "sv48"; 424 case VM_1_10_SV39: 425 return "sv39"; 426 case VM_1_10_MBARE: 427 return "none"; 428 } 429 } 430 431 g_assert_not_reached(); 432 } 433 434 static void set_satp_mode_max_supported(RISCVCPU *cpu, 435 uint8_t satp_mode) 436 { 437 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 438 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 439 440 for (int i = 0; i <= satp_mode; ++i) { 441 if (valid_vm[i]) { 442 cpu->cfg.satp_mode.supported |= (1 << i); 443 } 444 } 445 } 446 447 /* Set the satp mode to the max supported */ 448 static void set_satp_mode_default_map(RISCVCPU *cpu) 449 { 450 /* 451 * Bare CPUs do not default to the max available. 452 * Users must set a valid satp_mode in the command 453 * line. 454 */ 455 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { 456 warn_report("No satp mode set. Defaulting to 'bare'"); 457 cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE); 458 return; 459 } 460 461 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 462 } 463 #endif 464 465 static void riscv_max_cpu_init(Object *obj) 466 { 467 RISCVCPU *cpu = RISCV_CPU(obj); 468 CPURISCVState *env = &cpu->env; 469 470 cpu->cfg.mmu = true; 471 cpu->cfg.pmp = true; 472 473 env->priv_ver = PRIV_VERSION_LATEST; 474 #ifndef CONFIG_USER_ONLY 475 set_satp_mode_max_supported(RISCV_CPU(obj), 476 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 477 VM_1_10_SV32 : VM_1_10_SV57); 478 #endif 479 } 480 481 #if defined(TARGET_RISCV64) 482 static void rv64_base_cpu_init(Object *obj) 483 { 484 RISCVCPU *cpu = RISCV_CPU(obj); 485 CPURISCVState *env = &cpu->env; 486 487 cpu->cfg.mmu = true; 488 cpu->cfg.pmp = true; 489 490 /* Set latest version of privileged specification */ 491 env->priv_ver = PRIV_VERSION_LATEST; 492 #ifndef CONFIG_USER_ONLY 493 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 494 #endif 495 } 496 497 static void rv64_sifive_u_cpu_init(Object *obj) 498 { 499 RISCVCPU *cpu = RISCV_CPU(obj); 500 CPURISCVState *env = &cpu->env; 501 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 502 env->priv_ver = PRIV_VERSION_1_10_0; 503 #ifndef CONFIG_USER_ONLY 504 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 505 #endif 506 507 /* inherited from parent obj via riscv_cpu_init() */ 508 cpu->cfg.ext_zifencei = true; 509 cpu->cfg.ext_zicsr = true; 510 cpu->cfg.mmu = true; 511 cpu->cfg.pmp = true; 512 } 513 514 static void rv64_sifive_e_cpu_init(Object *obj) 515 { 516 CPURISCVState *env = &RISCV_CPU(obj)->env; 517 RISCVCPU *cpu = RISCV_CPU(obj); 518 519 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 520 env->priv_ver = PRIV_VERSION_1_10_0; 521 #ifndef CONFIG_USER_ONLY 522 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 523 #endif 524 525 /* inherited from parent obj via riscv_cpu_init() */ 526 cpu->cfg.ext_zifencei = true; 527 cpu->cfg.ext_zicsr = true; 528 cpu->cfg.pmp = true; 529 } 530 531 static void rv64_thead_c906_cpu_init(Object *obj) 532 { 533 CPURISCVState *env = &RISCV_CPU(obj)->env; 534 RISCVCPU *cpu = RISCV_CPU(obj); 535 536 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU); 537 env->priv_ver = PRIV_VERSION_1_11_0; 538 539 cpu->cfg.ext_zfa = true; 540 cpu->cfg.ext_zfh = true; 541 cpu->cfg.mmu = true; 542 cpu->cfg.ext_xtheadba = true; 543 cpu->cfg.ext_xtheadbb = true; 544 cpu->cfg.ext_xtheadbs = true; 545 cpu->cfg.ext_xtheadcmo = true; 546 cpu->cfg.ext_xtheadcondmov = true; 547 cpu->cfg.ext_xtheadfmemidx = true; 548 cpu->cfg.ext_xtheadmac = true; 549 cpu->cfg.ext_xtheadmemidx = true; 550 cpu->cfg.ext_xtheadmempair = true; 551 cpu->cfg.ext_xtheadsync = true; 552 553 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 554 #ifndef CONFIG_USER_ONLY 555 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 556 th_register_custom_csrs(cpu); 557 #endif 558 559 /* inherited from parent obj via riscv_cpu_init() */ 560 cpu->cfg.pmp = true; 561 } 562 563 static void rv64_veyron_v1_cpu_init(Object *obj) 564 { 565 CPURISCVState *env = &RISCV_CPU(obj)->env; 566 RISCVCPU *cpu = RISCV_CPU(obj); 567 568 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH); 569 env->priv_ver = PRIV_VERSION_1_12_0; 570 571 /* Enable ISA extensions */ 572 cpu->cfg.mmu = true; 573 cpu->cfg.ext_zifencei = true; 574 cpu->cfg.ext_zicsr = true; 575 cpu->cfg.pmp = true; 576 cpu->cfg.ext_zicbom = true; 577 cpu->cfg.cbom_blocksize = 64; 578 cpu->cfg.cboz_blocksize = 64; 579 cpu->cfg.ext_zicboz = true; 580 cpu->cfg.ext_smaia = true; 581 cpu->cfg.ext_ssaia = true; 582 cpu->cfg.ext_sscofpmf = true; 583 cpu->cfg.ext_sstc = true; 584 cpu->cfg.ext_svinval = true; 585 cpu->cfg.ext_svnapot = true; 586 cpu->cfg.ext_svpbmt = true; 587 cpu->cfg.ext_smstateen = true; 588 cpu->cfg.ext_zba = true; 589 cpu->cfg.ext_zbb = true; 590 cpu->cfg.ext_zbc = true; 591 cpu->cfg.ext_zbs = true; 592 cpu->cfg.ext_XVentanaCondOps = true; 593 594 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 595 cpu->cfg.marchid = VEYRON_V1_MARCHID; 596 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 597 598 #ifndef CONFIG_USER_ONLY 599 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 600 #endif 601 } 602 603 /* Tenstorrent Ascalon */ 604 static void rv64_tt_ascalon_cpu_init(Object *obj) 605 { 606 CPURISCVState *env = &RISCV_CPU(obj)->env; 607 RISCVCPU *cpu = RISCV_CPU(obj); 608 609 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH | RVV); 610 env->priv_ver = PRIV_VERSION_1_13_0; 611 612 /* Enable ISA extensions */ 613 cpu->cfg.mmu = true; 614 cpu->cfg.vlenb = 256 >> 3; 615 cpu->cfg.elen = 64; 616 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 617 cpu->cfg.rvv_ma_all_1s = true; 618 cpu->cfg.rvv_ta_all_1s = true; 619 cpu->cfg.misa_w = true; 620 cpu->cfg.pmp = true; 621 cpu->cfg.cbom_blocksize = 64; 622 cpu->cfg.cbop_blocksize = 64; 623 cpu->cfg.cboz_blocksize = 64; 624 cpu->cfg.ext_zic64b = true; 625 cpu->cfg.ext_zicbom = true; 626 cpu->cfg.ext_zicbop = true; 627 cpu->cfg.ext_zicboz = true; 628 cpu->cfg.ext_zicntr = true; 629 cpu->cfg.ext_zicond = true; 630 cpu->cfg.ext_zicsr = true; 631 cpu->cfg.ext_zifencei = true; 632 cpu->cfg.ext_zihintntl = true; 633 cpu->cfg.ext_zihintpause = true; 634 cpu->cfg.ext_zihpm = true; 635 cpu->cfg.ext_zimop = true; 636 cpu->cfg.ext_zawrs = true; 637 cpu->cfg.ext_zfa = true; 638 cpu->cfg.ext_zfbfmin = true; 639 cpu->cfg.ext_zfh = true; 640 cpu->cfg.ext_zfhmin = true; 641 cpu->cfg.ext_zcb = true; 642 cpu->cfg.ext_zcmop = true; 643 cpu->cfg.ext_zba = true; 644 cpu->cfg.ext_zbb = true; 645 cpu->cfg.ext_zbs = true; 646 cpu->cfg.ext_zkt = true; 647 cpu->cfg.ext_zvbb = true; 648 cpu->cfg.ext_zvbc = true; 649 cpu->cfg.ext_zvfbfmin = true; 650 cpu->cfg.ext_zvfbfwma = true; 651 cpu->cfg.ext_zvfh = true; 652 cpu->cfg.ext_zvfhmin = true; 653 cpu->cfg.ext_zvkng = true; 654 cpu->cfg.ext_smaia = true; 655 cpu->cfg.ext_smstateen = true; 656 cpu->cfg.ext_ssaia = true; 657 cpu->cfg.ext_sscofpmf = true; 658 cpu->cfg.ext_sstc = true; 659 cpu->cfg.ext_svade = true; 660 cpu->cfg.ext_svinval = true; 661 cpu->cfg.ext_svnapot = true; 662 cpu->cfg.ext_svpbmt = true; 663 664 #ifndef CONFIG_USER_ONLY 665 set_satp_mode_max_supported(cpu, VM_1_10_SV57); 666 #endif 667 } 668 669 static void rv64_xiangshan_nanhu_cpu_init(Object *obj) 670 { 671 CPURISCVState *env = &RISCV_CPU(obj)->env; 672 RISCVCPU *cpu = RISCV_CPU(obj); 673 674 riscv_cpu_set_misa_ext(env, RVG | RVC | RVB | RVS | RVU); 675 env->priv_ver = PRIV_VERSION_1_12_0; 676 677 /* Enable ISA extensions */ 678 cpu->cfg.ext_zbc = true; 679 cpu->cfg.ext_zbkb = true; 680 cpu->cfg.ext_zbkc = true; 681 cpu->cfg.ext_zbkx = true; 682 cpu->cfg.ext_zknd = true; 683 cpu->cfg.ext_zkne = true; 684 cpu->cfg.ext_zknh = true; 685 cpu->cfg.ext_zksed = true; 686 cpu->cfg.ext_zksh = true; 687 cpu->cfg.ext_svinval = true; 688 689 cpu->cfg.mmu = true; 690 cpu->cfg.pmp = true; 691 692 #ifndef CONFIG_USER_ONLY 693 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 694 #endif 695 } 696 697 #ifdef CONFIG_TCG 698 static void rv128_base_cpu_init(Object *obj) 699 { 700 RISCVCPU *cpu = RISCV_CPU(obj); 701 CPURISCVState *env = &cpu->env; 702 703 cpu->cfg.mmu = true; 704 cpu->cfg.pmp = true; 705 706 /* Set latest version of privileged specification */ 707 env->priv_ver = PRIV_VERSION_LATEST; 708 #ifndef CONFIG_USER_ONLY 709 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 710 #endif 711 } 712 #endif /* CONFIG_TCG */ 713 714 static void rv64i_bare_cpu_init(Object *obj) 715 { 716 CPURISCVState *env = &RISCV_CPU(obj)->env; 717 riscv_cpu_set_misa_ext(env, RVI); 718 } 719 720 static void rv64e_bare_cpu_init(Object *obj) 721 { 722 CPURISCVState *env = &RISCV_CPU(obj)->env; 723 riscv_cpu_set_misa_ext(env, RVE); 724 } 725 726 #endif /* !TARGET_RISCV64 */ 727 728 #if defined(TARGET_RISCV32) || \ 729 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 730 731 static void rv32_base_cpu_init(Object *obj) 732 { 733 RISCVCPU *cpu = RISCV_CPU(obj); 734 CPURISCVState *env = &cpu->env; 735 736 cpu->cfg.mmu = true; 737 cpu->cfg.pmp = true; 738 739 /* Set latest version of privileged specification */ 740 env->priv_ver = PRIV_VERSION_LATEST; 741 #ifndef CONFIG_USER_ONLY 742 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 743 #endif 744 } 745 746 static void rv32_sifive_u_cpu_init(Object *obj) 747 { 748 RISCVCPU *cpu = RISCV_CPU(obj); 749 CPURISCVState *env = &cpu->env; 750 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 751 env->priv_ver = PRIV_VERSION_1_10_0; 752 #ifndef CONFIG_USER_ONLY 753 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 754 #endif 755 756 /* inherited from parent obj via riscv_cpu_init() */ 757 cpu->cfg.ext_zifencei = true; 758 cpu->cfg.ext_zicsr = true; 759 cpu->cfg.mmu = true; 760 cpu->cfg.pmp = true; 761 } 762 763 static void rv32_sifive_e_cpu_init(Object *obj) 764 { 765 CPURISCVState *env = &RISCV_CPU(obj)->env; 766 RISCVCPU *cpu = RISCV_CPU(obj); 767 768 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 769 env->priv_ver = PRIV_VERSION_1_10_0; 770 #ifndef CONFIG_USER_ONLY 771 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 772 #endif 773 774 /* inherited from parent obj via riscv_cpu_init() */ 775 cpu->cfg.ext_zifencei = true; 776 cpu->cfg.ext_zicsr = true; 777 cpu->cfg.pmp = true; 778 } 779 780 static void rv32_ibex_cpu_init(Object *obj) 781 { 782 CPURISCVState *env = &RISCV_CPU(obj)->env; 783 RISCVCPU *cpu = RISCV_CPU(obj); 784 785 riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU); 786 env->priv_ver = PRIV_VERSION_1_12_0; 787 #ifndef CONFIG_USER_ONLY 788 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 789 #endif 790 /* inherited from parent obj via riscv_cpu_init() */ 791 cpu->cfg.ext_zifencei = true; 792 cpu->cfg.ext_zicsr = true; 793 cpu->cfg.pmp = true; 794 cpu->cfg.ext_smepmp = true; 795 796 cpu->cfg.ext_zba = true; 797 cpu->cfg.ext_zbb = true; 798 cpu->cfg.ext_zbc = true; 799 cpu->cfg.ext_zbs = true; 800 } 801 802 static void rv32_imafcu_nommu_cpu_init(Object *obj) 803 { 804 CPURISCVState *env = &RISCV_CPU(obj)->env; 805 RISCVCPU *cpu = RISCV_CPU(obj); 806 807 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU); 808 env->priv_ver = PRIV_VERSION_1_10_0; 809 #ifndef CONFIG_USER_ONLY 810 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 811 #endif 812 813 /* inherited from parent obj via riscv_cpu_init() */ 814 cpu->cfg.ext_zifencei = true; 815 cpu->cfg.ext_zicsr = true; 816 cpu->cfg.pmp = true; 817 } 818 819 static void rv32i_bare_cpu_init(Object *obj) 820 { 821 CPURISCVState *env = &RISCV_CPU(obj)->env; 822 riscv_cpu_set_misa_ext(env, RVI); 823 } 824 825 static void rv32e_bare_cpu_init(Object *obj) 826 { 827 CPURISCVState *env = &RISCV_CPU(obj)->env; 828 riscv_cpu_set_misa_ext(env, RVE); 829 } 830 #endif 831 832 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 833 { 834 ObjectClass *oc; 835 char *typename; 836 char **cpuname; 837 838 cpuname = g_strsplit(cpu_model, ",", 1); 839 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 840 oc = object_class_by_name(typename); 841 g_strfreev(cpuname); 842 g_free(typename); 843 844 return oc; 845 } 846 847 char *riscv_cpu_get_name(RISCVCPU *cpu) 848 { 849 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 850 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 851 852 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 853 854 return cpu_model_from_type(typename); 855 } 856 857 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 858 { 859 RISCVCPU *cpu = RISCV_CPU(cs); 860 CPURISCVState *env = &cpu->env; 861 int i, j; 862 uint8_t *p; 863 864 #if !defined(CONFIG_USER_ONLY) 865 if (riscv_has_ext(env, RVH)) { 866 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 867 } 868 #endif 869 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 870 #ifndef CONFIG_USER_ONLY 871 { 872 static const int dump_csrs[] = { 873 CSR_MHARTID, 874 CSR_MSTATUS, 875 CSR_MSTATUSH, 876 /* 877 * CSR_SSTATUS is intentionally omitted here as its value 878 * can be figured out by looking at CSR_MSTATUS 879 */ 880 CSR_HSTATUS, 881 CSR_VSSTATUS, 882 CSR_MIP, 883 CSR_MIE, 884 CSR_MIDELEG, 885 CSR_HIDELEG, 886 CSR_MEDELEG, 887 CSR_HEDELEG, 888 CSR_MTVEC, 889 CSR_STVEC, 890 CSR_VSTVEC, 891 CSR_MEPC, 892 CSR_SEPC, 893 CSR_VSEPC, 894 CSR_MCAUSE, 895 CSR_SCAUSE, 896 CSR_VSCAUSE, 897 CSR_MTVAL, 898 CSR_STVAL, 899 CSR_HTVAL, 900 CSR_MTVAL2, 901 CSR_MSCRATCH, 902 CSR_SSCRATCH, 903 CSR_SATP, 904 }; 905 906 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 907 int csrno = dump_csrs[i]; 908 target_ulong val = 0; 909 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 910 911 /* 912 * Rely on the smode, hmode, etc, predicates within csr.c 913 * to do the filtering of the registers that are present. 914 */ 915 if (res == RISCV_EXCP_NONE) { 916 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 917 csr_ops[csrno].name, val); 918 } 919 } 920 } 921 #endif 922 923 for (i = 0; i < 32; i++) { 924 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 925 riscv_int_regnames[i], env->gpr[i]); 926 if ((i & 3) == 3) { 927 qemu_fprintf(f, "\n"); 928 } 929 } 930 if (flags & CPU_DUMP_FPU) { 931 target_ulong val = 0; 932 RISCVException res = riscv_csrrw_debug(env, CSR_FCSR, &val, 0, 0); 933 if (res == RISCV_EXCP_NONE) { 934 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 935 csr_ops[CSR_FCSR].name, val); 936 } 937 for (i = 0; i < 32; i++) { 938 qemu_fprintf(f, " %-8s %016" PRIx64, 939 riscv_fpr_regnames[i], env->fpr[i]); 940 if ((i & 3) == 3) { 941 qemu_fprintf(f, "\n"); 942 } 943 } 944 } 945 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 946 static const int dump_rvv_csrs[] = { 947 CSR_VSTART, 948 CSR_VXSAT, 949 CSR_VXRM, 950 CSR_VCSR, 951 CSR_VL, 952 CSR_VTYPE, 953 CSR_VLENB, 954 }; 955 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 956 int csrno = dump_rvv_csrs[i]; 957 target_ulong val = 0; 958 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 959 960 /* 961 * Rely on the smode, hmode, etc, predicates within csr.c 962 * to do the filtering of the registers that are present. 963 */ 964 if (res == RISCV_EXCP_NONE) { 965 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 966 csr_ops[csrno].name, val); 967 } 968 } 969 uint16_t vlenb = cpu->cfg.vlenb; 970 971 for (i = 0; i < 32; i++) { 972 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 973 p = (uint8_t *)env->vreg; 974 for (j = vlenb - 1 ; j >= 0; j--) { 975 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 976 } 977 qemu_fprintf(f, "\n"); 978 } 979 } 980 } 981 982 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 983 { 984 RISCVCPU *cpu = RISCV_CPU(cs); 985 CPURISCVState *env = &cpu->env; 986 987 if (env->xl == MXL_RV32) { 988 env->pc = (int32_t)value; 989 } else { 990 env->pc = value; 991 } 992 } 993 994 static vaddr riscv_cpu_get_pc(CPUState *cs) 995 { 996 RISCVCPU *cpu = RISCV_CPU(cs); 997 CPURISCVState *env = &cpu->env; 998 999 /* Match cpu_get_tb_cpu_state. */ 1000 if (env->xl == MXL_RV32) { 1001 return env->pc & UINT32_MAX; 1002 } 1003 return env->pc; 1004 } 1005 1006 bool riscv_cpu_has_work(CPUState *cs) 1007 { 1008 #ifndef CONFIG_USER_ONLY 1009 RISCVCPU *cpu = RISCV_CPU(cs); 1010 CPURISCVState *env = &cpu->env; 1011 /* 1012 * Definition of the WFI instruction requires it to ignore the privilege 1013 * mode and delegation registers, but respect individual enables 1014 */ 1015 return riscv_cpu_all_pending(env) != 0 || 1016 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 1017 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 1018 #else 1019 return true; 1020 #endif 1021 } 1022 1023 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) 1024 { 1025 return riscv_env_mmu_index(cpu_env(cs), ifetch); 1026 } 1027 1028 static void riscv_cpu_reset_hold(Object *obj, ResetType type) 1029 { 1030 #ifndef CONFIG_USER_ONLY 1031 uint8_t iprio; 1032 int i, irq, rdzero; 1033 #endif 1034 CPUState *cs = CPU(obj); 1035 RISCVCPU *cpu = RISCV_CPU(cs); 1036 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1037 CPURISCVState *env = &cpu->env; 1038 1039 if (mcc->parent_phases.hold) { 1040 mcc->parent_phases.hold(obj, type); 1041 } 1042 #ifndef CONFIG_USER_ONLY 1043 env->misa_mxl = mcc->misa_mxl_max; 1044 env->priv = PRV_M; 1045 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 1046 if (env->misa_mxl > MXL_RV32) { 1047 /* 1048 * The reset status of SXL/UXL is undefined, but mstatus is WARL 1049 * and we must ensure that the value after init is valid for read. 1050 */ 1051 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 1052 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 1053 if (riscv_has_ext(env, RVH)) { 1054 env->vsstatus = set_field(env->vsstatus, 1055 MSTATUS64_SXL, env->misa_mxl); 1056 env->vsstatus = set_field(env->vsstatus, 1057 MSTATUS64_UXL, env->misa_mxl); 1058 env->mstatus_hs = set_field(env->mstatus_hs, 1059 MSTATUS64_SXL, env->misa_mxl); 1060 env->mstatus_hs = set_field(env->mstatus_hs, 1061 MSTATUS64_UXL, env->misa_mxl); 1062 } 1063 if (riscv_cpu_cfg(env)->ext_smdbltrp) { 1064 env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 1); 1065 } 1066 } 1067 env->mcause = 0; 1068 env->miclaim = MIP_SGEIP; 1069 env->pc = env->resetvec; 1070 env->bins = 0; 1071 env->two_stage_lookup = false; 1072 1073 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 1074 (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ? 1075 MENVCFG_ADUE : 0); 1076 env->henvcfg = 0; 1077 1078 /* Initialized default priorities of local interrupts. */ 1079 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 1080 iprio = riscv_cpu_default_priority(i); 1081 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 1082 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 1083 env->hviprio[i] = 0; 1084 } 1085 i = 0; 1086 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 1087 if (!rdzero) { 1088 env->hviprio[irq] = env->miprio[irq]; 1089 } 1090 i++; 1091 } 1092 1093 /* 1094 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor 1095 * extension is enabled. 1096 */ 1097 if (riscv_has_ext(env, RVH)) { 1098 env->mideleg |= HS_MODE_INTERRUPTS; 1099 } 1100 1101 /* 1102 * Clear mseccfg and unlock all the PMP entries upon reset. 1103 * This is allowed as per the priv and smepmp specifications 1104 * and is needed to clear stale entries across reboots. 1105 */ 1106 if (riscv_cpu_cfg(env)->ext_smepmp) { 1107 env->mseccfg = 0; 1108 } 1109 1110 pmp_unlock_entries(env); 1111 #else 1112 env->priv = PRV_U; 1113 env->senvcfg = 0; 1114 env->menvcfg = 0; 1115 #endif 1116 1117 /* on reset elp is clear */ 1118 env->elp = false; 1119 /* on reset ssp is set to 0 */ 1120 env->ssp = 0; 1121 1122 env->xl = riscv_cpu_mxl(env); 1123 cs->exception_index = RISCV_EXCP_NONE; 1124 env->load_res = -1; 1125 set_default_nan_mode(1, &env->fp_status); 1126 /* Default NaN value: sign bit clear, frac msb set */ 1127 set_float_default_nan_pattern(0b01000000, &env->fp_status); 1128 env->vill = true; 1129 1130 #ifndef CONFIG_USER_ONLY 1131 if (cpu->cfg.debug) { 1132 riscv_trigger_reset_hold(env); 1133 } 1134 1135 if (cpu->cfg.ext_smrnmi) { 1136 env->rnmip = 0; 1137 env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false); 1138 } 1139 1140 if (kvm_enabled()) { 1141 kvm_riscv_reset_vcpu(cpu); 1142 } 1143 #endif 1144 } 1145 1146 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 1147 { 1148 RISCVCPU *cpu = RISCV_CPU(s); 1149 CPURISCVState *env = &cpu->env; 1150 info->target_info = &cpu->cfg; 1151 1152 switch (env->xl) { 1153 case MXL_RV32: 1154 info->print_insn = print_insn_riscv32; 1155 break; 1156 case MXL_RV64: 1157 info->print_insn = print_insn_riscv64; 1158 break; 1159 case MXL_RV128: 1160 info->print_insn = print_insn_riscv128; 1161 break; 1162 default: 1163 g_assert_not_reached(); 1164 } 1165 } 1166 1167 #ifndef CONFIG_USER_ONLY 1168 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1169 { 1170 bool rv32 = riscv_cpu_is_32bit(cpu); 1171 uint8_t satp_mode_map_max, satp_mode_supported_max; 1172 1173 /* The CPU wants the OS to decide which satp mode to use */ 1174 if (cpu->cfg.satp_mode.supported == 0) { 1175 return; 1176 } 1177 1178 satp_mode_supported_max = 1179 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1180 1181 if (cpu->cfg.satp_mode.map == 0) { 1182 if (cpu->cfg.satp_mode.init == 0) { 1183 /* If unset by the user, we fallback to the default satp mode. */ 1184 set_satp_mode_default_map(cpu); 1185 } else { 1186 /* 1187 * Find the lowest level that was disabled and then enable the 1188 * first valid level below which can be found in 1189 * valid_vm_1_10_32/64. 1190 */ 1191 for (int i = 1; i < 16; ++i) { 1192 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1193 (cpu->cfg.satp_mode.supported & (1 << i))) { 1194 for (int j = i - 1; j >= 0; --j) { 1195 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1196 cpu->cfg.satp_mode.map |= (1 << j); 1197 break; 1198 } 1199 } 1200 break; 1201 } 1202 } 1203 } 1204 } 1205 1206 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1207 1208 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1209 if (satp_mode_map_max > satp_mode_supported_max) { 1210 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1211 satp_mode_str(satp_mode_map_max, rv32), 1212 satp_mode_str(satp_mode_supported_max, rv32)); 1213 return; 1214 } 1215 1216 /* 1217 * Make sure the user did not ask for an invalid configuration as per 1218 * the specification. 1219 */ 1220 if (!rv32) { 1221 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1222 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1223 (cpu->cfg.satp_mode.init & (1 << i)) && 1224 (cpu->cfg.satp_mode.supported & (1 << i))) { 1225 error_setg(errp, "cannot disable %s satp mode if %s " 1226 "is enabled", satp_mode_str(i, false), 1227 satp_mode_str(satp_mode_map_max, false)); 1228 return; 1229 } 1230 } 1231 } 1232 1233 /* Finally expand the map so that all valid modes are set */ 1234 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1235 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1236 cpu->cfg.satp_mode.map |= (1 << i); 1237 } 1238 } 1239 } 1240 #endif 1241 1242 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1243 { 1244 Error *local_err = NULL; 1245 1246 #ifndef CONFIG_USER_ONLY 1247 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1248 if (local_err != NULL) { 1249 error_propagate(errp, local_err); 1250 return; 1251 } 1252 #endif 1253 1254 if (tcg_enabled()) { 1255 riscv_tcg_cpu_finalize_features(cpu, &local_err); 1256 if (local_err != NULL) { 1257 error_propagate(errp, local_err); 1258 return; 1259 } 1260 riscv_tcg_cpu_finalize_dynamic_decoder(cpu); 1261 } else if (kvm_enabled()) { 1262 riscv_kvm_cpu_finalize_features(cpu, &local_err); 1263 if (local_err != NULL) { 1264 error_propagate(errp, local_err); 1265 return; 1266 } 1267 } 1268 } 1269 1270 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1271 { 1272 CPUState *cs = CPU(dev); 1273 RISCVCPU *cpu = RISCV_CPU(dev); 1274 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1275 Error *local_err = NULL; 1276 1277 cpu_exec_realizefn(cs, &local_err); 1278 if (local_err != NULL) { 1279 error_propagate(errp, local_err); 1280 return; 1281 } 1282 1283 riscv_cpu_finalize_features(cpu, &local_err); 1284 if (local_err != NULL) { 1285 error_propagate(errp, local_err); 1286 return; 1287 } 1288 1289 riscv_cpu_register_gdb_regs_for_features(cs); 1290 1291 #ifndef CONFIG_USER_ONLY 1292 if (cpu->cfg.debug) { 1293 riscv_trigger_realize(&cpu->env); 1294 } 1295 #endif 1296 1297 qemu_init_vcpu(cs); 1298 cpu_reset(cs); 1299 1300 mcc->parent_realize(dev, errp); 1301 } 1302 1303 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 1304 { 1305 if (tcg_enabled()) { 1306 return riscv_cpu_tcg_compatible(cpu); 1307 } 1308 1309 return true; 1310 } 1311 1312 #ifndef CONFIG_USER_ONLY 1313 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1314 void *opaque, Error **errp) 1315 { 1316 RISCVSATPMap *satp_map = opaque; 1317 uint8_t satp = satp_mode_from_str(name); 1318 bool value; 1319 1320 value = satp_map->map & (1 << satp); 1321 1322 visit_type_bool(v, name, &value, errp); 1323 } 1324 1325 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1326 void *opaque, Error **errp) 1327 { 1328 RISCVSATPMap *satp_map = opaque; 1329 uint8_t satp = satp_mode_from_str(name); 1330 bool value; 1331 1332 if (!visit_type_bool(v, name, &value, errp)) { 1333 return; 1334 } 1335 1336 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1337 satp_map->init |= 1 << satp; 1338 } 1339 1340 void riscv_add_satp_mode_properties(Object *obj) 1341 { 1342 RISCVCPU *cpu = RISCV_CPU(obj); 1343 1344 if (cpu->env.misa_mxl == MXL_RV32) { 1345 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1346 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1347 } else { 1348 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1349 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1350 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1351 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1352 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1353 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1354 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1355 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1356 } 1357 } 1358 1359 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1360 { 1361 RISCVCPU *cpu = RISCV_CPU(opaque); 1362 CPURISCVState *env = &cpu->env; 1363 1364 if (irq < IRQ_LOCAL_MAX) { 1365 switch (irq) { 1366 case IRQ_U_SOFT: 1367 case IRQ_S_SOFT: 1368 case IRQ_VS_SOFT: 1369 case IRQ_M_SOFT: 1370 case IRQ_U_TIMER: 1371 case IRQ_S_TIMER: 1372 case IRQ_VS_TIMER: 1373 case IRQ_M_TIMER: 1374 case IRQ_U_EXT: 1375 case IRQ_VS_EXT: 1376 case IRQ_M_EXT: 1377 if (kvm_enabled()) { 1378 kvm_riscv_set_irq(cpu, irq, level); 1379 } else { 1380 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1381 } 1382 break; 1383 case IRQ_S_EXT: 1384 if (kvm_enabled()) { 1385 kvm_riscv_set_irq(cpu, irq, level); 1386 } else { 1387 env->external_seip = level; 1388 riscv_cpu_update_mip(env, 1 << irq, 1389 BOOL_TO_MASK(level | env->software_seip)); 1390 } 1391 break; 1392 default: 1393 g_assert_not_reached(); 1394 } 1395 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1396 /* Require H-extension for handling guest local interrupts */ 1397 if (!riscv_has_ext(env, RVH)) { 1398 g_assert_not_reached(); 1399 } 1400 1401 /* Compute bit position in HGEIP CSR */ 1402 irq = irq - IRQ_LOCAL_MAX + 1; 1403 if (env->geilen < irq) { 1404 g_assert_not_reached(); 1405 } 1406 1407 /* Update HGEIP CSR */ 1408 env->hgeip &= ~((target_ulong)1 << irq); 1409 if (level) { 1410 env->hgeip |= (target_ulong)1 << irq; 1411 } 1412 1413 /* Update mip.SGEIP bit */ 1414 riscv_cpu_update_mip(env, MIP_SGEIP, 1415 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1416 } else { 1417 g_assert_not_reached(); 1418 } 1419 } 1420 1421 static void riscv_cpu_set_nmi(void *opaque, int irq, int level) 1422 { 1423 riscv_cpu_set_rnmi(RISCV_CPU(opaque), irq, level); 1424 } 1425 #endif /* CONFIG_USER_ONLY */ 1426 1427 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1428 { 1429 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1430 } 1431 1432 static void riscv_cpu_post_init(Object *obj) 1433 { 1434 accel_cpu_instance_init(CPU(obj)); 1435 } 1436 1437 static void riscv_cpu_init(Object *obj) 1438 { 1439 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1440 RISCVCPU *cpu = RISCV_CPU(obj); 1441 CPURISCVState *env = &cpu->env; 1442 1443 env->misa_mxl = mcc->misa_mxl_max; 1444 1445 #ifndef CONFIG_USER_ONLY 1446 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1447 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1448 qdev_init_gpio_in_named(DEVICE(cpu), riscv_cpu_set_nmi, 1449 "riscv.cpu.rnmi", RNMI_MAX); 1450 #endif /* CONFIG_USER_ONLY */ 1451 1452 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal); 1453 1454 /* 1455 * The timer and performance counters extensions were supported 1456 * in QEMU before they were added as discrete extensions in the 1457 * ISA. To keep compatibility we'll always default them to 'true' 1458 * for all CPUs. Each accelerator will decide what to do when 1459 * users disable them. 1460 */ 1461 RISCV_CPU(obj)->cfg.ext_zicntr = true; 1462 RISCV_CPU(obj)->cfg.ext_zihpm = true; 1463 1464 /* Default values for non-bool cpu properties */ 1465 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); 1466 cpu->cfg.vlenb = 128 >> 3; 1467 cpu->cfg.elen = 64; 1468 cpu->cfg.cbom_blocksize = 64; 1469 cpu->cfg.cbop_blocksize = 64; 1470 cpu->cfg.cboz_blocksize = 64; 1471 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1472 } 1473 1474 static void riscv_bare_cpu_init(Object *obj) 1475 { 1476 RISCVCPU *cpu = RISCV_CPU(obj); 1477 1478 /* 1479 * Bare CPUs do not inherit the timer and performance 1480 * counters from the parent class (see riscv_cpu_init() 1481 * for info on why the parent enables them). 1482 * 1483 * Users have to explicitly enable these counters for 1484 * bare CPUs. 1485 */ 1486 cpu->cfg.ext_zicntr = false; 1487 cpu->cfg.ext_zihpm = false; 1488 1489 /* Set to QEMU's first supported priv version */ 1490 cpu->env.priv_ver = PRIV_VERSION_1_10_0; 1491 1492 /* 1493 * Support all available satp_mode settings. The default 1494 * value will be set to MBARE if the user doesn't set 1495 * satp_mode manually (see set_satp_mode_default()). 1496 */ 1497 #ifndef CONFIG_USER_ONLY 1498 set_satp_mode_max_supported(cpu, VM_1_10_SV64); 1499 #endif 1500 } 1501 1502 typedef struct misa_ext_info { 1503 const char *name; 1504 const char *description; 1505 } MISAExtInfo; 1506 1507 #define MISA_INFO_IDX(_bit) \ 1508 __builtin_ctz(_bit) 1509 1510 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1511 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1512 1513 static const MISAExtInfo misa_ext_info_arr[] = { 1514 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1515 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1516 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1517 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1518 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1519 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1520 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1521 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1522 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1523 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1524 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1525 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1526 MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)") 1527 }; 1528 1529 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) 1530 { 1531 CPUClass *cc = CPU_CLASS(mcc); 1532 1533 /* Validate that MISA_MXL is set properly. */ 1534 switch (mcc->misa_mxl_max) { 1535 #ifdef TARGET_RISCV64 1536 case MXL_RV64: 1537 case MXL_RV128: 1538 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1539 break; 1540 #endif 1541 case MXL_RV32: 1542 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1543 break; 1544 default: 1545 g_assert_not_reached(); 1546 } 1547 } 1548 1549 static int riscv_validate_misa_info_idx(uint32_t bit) 1550 { 1551 int idx; 1552 1553 /* 1554 * Our lowest valid input (RVA) is 1 and 1555 * __builtin_ctz() is UB with zero. 1556 */ 1557 g_assert(bit != 0); 1558 idx = MISA_INFO_IDX(bit); 1559 1560 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1561 return idx; 1562 } 1563 1564 const char *riscv_get_misa_ext_name(uint32_t bit) 1565 { 1566 int idx = riscv_validate_misa_info_idx(bit); 1567 const char *val = misa_ext_info_arr[idx].name; 1568 1569 g_assert(val != NULL); 1570 return val; 1571 } 1572 1573 const char *riscv_get_misa_ext_description(uint32_t bit) 1574 { 1575 int idx = riscv_validate_misa_info_idx(bit); 1576 const char *val = misa_ext_info_arr[idx].description; 1577 1578 g_assert(val != NULL); 1579 return val; 1580 } 1581 1582 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1583 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1584 .enabled = _defval} 1585 1586 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1587 /* Defaults for standard extensions */ 1588 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1589 MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false), 1590 MULTI_EXT_CFG_BOOL("smcsrind", ext_smcsrind, false), 1591 MULTI_EXT_CFG_BOOL("smcdeleg", ext_smcdeleg, false), 1592 MULTI_EXT_CFG_BOOL("sscsrind", ext_sscsrind, false), 1593 MULTI_EXT_CFG_BOOL("ssccfg", ext_ssccfg, false), 1594 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1595 MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false), 1596 MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false), 1597 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1598 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1599 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1600 MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false), 1601 MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false), 1602 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), 1603 MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false), 1604 MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false), 1605 MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), 1606 MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), 1607 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1608 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1609 MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false), 1610 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1611 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1612 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1613 MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false), 1614 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1615 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1616 MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false), 1617 MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false), 1618 MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false), 1619 MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false), 1620 MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false), 1621 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1622 MULTI_EXT_CFG_BOOL("ssnpm", ext_ssnpm, false), 1623 MULTI_EXT_CFG_BOOL("sspm", ext_sspm, false), 1624 MULTI_EXT_CFG_BOOL("supm", ext_supm, false), 1625 1626 MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false), 1627 MULTI_EXT_CFG_BOOL("smdbltrp", ext_smdbltrp, false), 1628 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1629 MULTI_EXT_CFG_BOOL("smrnmi", ext_smrnmi, false), 1630 MULTI_EXT_CFG_BOOL("smmpm", ext_smmpm, false), 1631 MULTI_EXT_CFG_BOOL("smnpm", ext_smnpm, false), 1632 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1633 MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false), 1634 MULTI_EXT_CFG_BOOL("ssdbltrp", ext_ssdbltrp, false), 1635 MULTI_EXT_CFG_BOOL("svade", ext_svade, false), 1636 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1637 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1638 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1639 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1640 MULTI_EXT_CFG_BOOL("svvptc", ext_svvptc, true), 1641 1642 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1643 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1644 1645 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1646 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1647 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1648 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1649 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1650 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1651 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1652 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1653 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1654 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1655 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1656 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1657 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1658 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1659 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1660 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1661 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1662 MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false), 1663 1664 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1665 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1666 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1667 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1668 1669 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1670 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true), 1671 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1672 1673 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1674 1675 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1676 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1677 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1678 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1679 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1680 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1681 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1682 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1683 1684 /* Vector cryptography extensions */ 1685 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1686 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1687 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false), 1688 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1689 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1690 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1691 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1692 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1693 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1694 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1695 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1696 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1697 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1698 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1699 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1700 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1701 1702 { }, 1703 }; 1704 1705 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1706 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1707 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1708 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1709 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1710 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1711 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1712 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1713 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1714 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1715 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1716 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1717 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1718 1719 { }, 1720 }; 1721 1722 /* These are experimental so mark with 'x-' */ 1723 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1724 MULTI_EXT_CFG_BOOL("x-svukte", ext_svukte, false), 1725 1726 { }, 1727 }; 1728 1729 /* 1730 * 'Named features' is the name we give to extensions that we 1731 * don't want to expose to users. They are either immutable 1732 * (always enabled/disable) or they'll vary depending on 1733 * the resulting CPU state. They have riscv,isa strings 1734 * and priv_ver like regular extensions. 1735 */ 1736 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { 1737 MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true), 1738 MULTI_EXT_CFG_BOOL("ssstateen", ext_ssstateen, true), 1739 MULTI_EXT_CFG_BOOL("sha", ext_sha, true), 1740 1741 { }, 1742 }; 1743 1744 /* Deprecated entries marked for future removal */ 1745 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1746 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1747 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1748 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1749 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1750 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1751 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1752 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1753 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1754 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1755 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1756 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1757 1758 { }, 1759 }; 1760 1761 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname, 1762 Error **errp) 1763 { 1764 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1765 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'", 1766 cpuname, propname); 1767 } 1768 1769 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1770 void *opaque, Error **errp) 1771 { 1772 RISCVCPU *cpu = RISCV_CPU(obj); 1773 uint8_t pmu_num, curr_pmu_num; 1774 uint32_t pmu_mask; 1775 1776 visit_type_uint8(v, name, &pmu_num, errp); 1777 1778 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask); 1779 1780 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) { 1781 cpu_set_prop_err(cpu, name, errp); 1782 error_append_hint(errp, "Current '%s' val: %u\n", 1783 name, curr_pmu_num); 1784 return; 1785 } 1786 1787 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1788 error_setg(errp, "Number of counters exceeds maximum available"); 1789 return; 1790 } 1791 1792 if (pmu_num == 0) { 1793 pmu_mask = 0; 1794 } else { 1795 pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1796 } 1797 1798 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1799 cpu->cfg.pmu_mask = pmu_mask; 1800 cpu_option_add_user_setting("pmu-mask", pmu_mask); 1801 } 1802 1803 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1804 void *opaque, Error **errp) 1805 { 1806 RISCVCPU *cpu = RISCV_CPU(obj); 1807 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1808 1809 visit_type_uint8(v, name, &pmu_num, errp); 1810 } 1811 1812 static const PropertyInfo prop_pmu_num = { 1813 .name = "pmu-num", 1814 .get = prop_pmu_num_get, 1815 .set = prop_pmu_num_set, 1816 }; 1817 1818 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name, 1819 void *opaque, Error **errp) 1820 { 1821 RISCVCPU *cpu = RISCV_CPU(obj); 1822 uint32_t value; 1823 uint8_t pmu_num; 1824 1825 visit_type_uint32(v, name, &value, errp); 1826 1827 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) { 1828 cpu_set_prop_err(cpu, name, errp); 1829 error_append_hint(errp, "Current '%s' val: %x\n", 1830 name, cpu->cfg.pmu_mask); 1831 return; 1832 } 1833 1834 pmu_num = ctpop32(value); 1835 1836 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1837 error_setg(errp, "Number of counters exceeds maximum available"); 1838 return; 1839 } 1840 1841 cpu_option_add_user_setting(name, value); 1842 cpu->cfg.pmu_mask = value; 1843 } 1844 1845 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name, 1846 void *opaque, Error **errp) 1847 { 1848 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask; 1849 1850 visit_type_uint8(v, name, &pmu_mask, errp); 1851 } 1852 1853 static const PropertyInfo prop_pmu_mask = { 1854 .name = "pmu-mask", 1855 .get = prop_pmu_mask_get, 1856 .set = prop_pmu_mask_set, 1857 }; 1858 1859 static void prop_mmu_set(Object *obj, Visitor *v, const char *name, 1860 void *opaque, Error **errp) 1861 { 1862 RISCVCPU *cpu = RISCV_CPU(obj); 1863 bool value; 1864 1865 visit_type_bool(v, name, &value, errp); 1866 1867 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) { 1868 cpu_set_prop_err(cpu, "mmu", errp); 1869 return; 1870 } 1871 1872 cpu_option_add_user_setting(name, value); 1873 cpu->cfg.mmu = value; 1874 } 1875 1876 static void prop_mmu_get(Object *obj, Visitor *v, const char *name, 1877 void *opaque, Error **errp) 1878 { 1879 bool value = RISCV_CPU(obj)->cfg.mmu; 1880 1881 visit_type_bool(v, name, &value, errp); 1882 } 1883 1884 static const PropertyInfo prop_mmu = { 1885 .name = "mmu", 1886 .get = prop_mmu_get, 1887 .set = prop_mmu_set, 1888 }; 1889 1890 static void prop_pmp_set(Object *obj, Visitor *v, const char *name, 1891 void *opaque, Error **errp) 1892 { 1893 RISCVCPU *cpu = RISCV_CPU(obj); 1894 bool value; 1895 1896 visit_type_bool(v, name, &value, errp); 1897 1898 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) { 1899 cpu_set_prop_err(cpu, name, errp); 1900 return; 1901 } 1902 1903 cpu_option_add_user_setting(name, value); 1904 cpu->cfg.pmp = value; 1905 } 1906 1907 static void prop_pmp_get(Object *obj, Visitor *v, const char *name, 1908 void *opaque, Error **errp) 1909 { 1910 bool value = RISCV_CPU(obj)->cfg.pmp; 1911 1912 visit_type_bool(v, name, &value, errp); 1913 } 1914 1915 static const PropertyInfo prop_pmp = { 1916 .name = "pmp", 1917 .get = prop_pmp_get, 1918 .set = prop_pmp_set, 1919 }; 1920 1921 static int priv_spec_from_str(const char *priv_spec_str) 1922 { 1923 int priv_version = -1; 1924 1925 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) { 1926 priv_version = PRIV_VERSION_1_13_0; 1927 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) { 1928 priv_version = PRIV_VERSION_1_12_0; 1929 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) { 1930 priv_version = PRIV_VERSION_1_11_0; 1931 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) { 1932 priv_version = PRIV_VERSION_1_10_0; 1933 } 1934 1935 return priv_version; 1936 } 1937 1938 const char *priv_spec_to_str(int priv_version) 1939 { 1940 switch (priv_version) { 1941 case PRIV_VERSION_1_10_0: 1942 return PRIV_VER_1_10_0_STR; 1943 case PRIV_VERSION_1_11_0: 1944 return PRIV_VER_1_11_0_STR; 1945 case PRIV_VERSION_1_12_0: 1946 return PRIV_VER_1_12_0_STR; 1947 case PRIV_VERSION_1_13_0: 1948 return PRIV_VER_1_13_0_STR; 1949 default: 1950 return NULL; 1951 } 1952 } 1953 1954 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name, 1955 void *opaque, Error **errp) 1956 { 1957 RISCVCPU *cpu = RISCV_CPU(obj); 1958 g_autofree char *value = NULL; 1959 int priv_version = -1; 1960 1961 visit_type_str(v, name, &value, errp); 1962 1963 priv_version = priv_spec_from_str(value); 1964 if (priv_version < 0) { 1965 error_setg(errp, "Unsupported privilege spec version '%s'", value); 1966 return; 1967 } 1968 1969 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) { 1970 cpu_set_prop_err(cpu, name, errp); 1971 error_append_hint(errp, "Current '%s' val: %s\n", name, 1972 object_property_get_str(obj, name, NULL)); 1973 return; 1974 } 1975 1976 cpu_option_add_user_setting(name, priv_version); 1977 cpu->env.priv_ver = priv_version; 1978 } 1979 1980 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name, 1981 void *opaque, Error **errp) 1982 { 1983 RISCVCPU *cpu = RISCV_CPU(obj); 1984 const char *value = priv_spec_to_str(cpu->env.priv_ver); 1985 1986 visit_type_str(v, name, (char **)&value, errp); 1987 } 1988 1989 static const PropertyInfo prop_priv_spec = { 1990 .name = "priv_spec", 1991 .get = prop_priv_spec_get, 1992 .set = prop_priv_spec_set, 1993 }; 1994 1995 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name, 1996 void *opaque, Error **errp) 1997 { 1998 RISCVCPU *cpu = RISCV_CPU(obj); 1999 g_autofree char *value = NULL; 2000 2001 visit_type_str(v, name, &value, errp); 2002 2003 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) { 2004 error_setg(errp, "Unsupported vector spec version '%s'", value); 2005 return; 2006 } 2007 2008 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0); 2009 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 2010 } 2011 2012 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name, 2013 void *opaque, Error **errp) 2014 { 2015 const char *value = VEXT_VER_1_00_0_STR; 2016 2017 visit_type_str(v, name, (char **)&value, errp); 2018 } 2019 2020 static const PropertyInfo prop_vext_spec = { 2021 .name = "vext_spec", 2022 .get = prop_vext_spec_get, 2023 .set = prop_vext_spec_set, 2024 }; 2025 2026 static void prop_vlen_set(Object *obj, Visitor *v, const char *name, 2027 void *opaque, Error **errp) 2028 { 2029 RISCVCPU *cpu = RISCV_CPU(obj); 2030 uint16_t cpu_vlen = cpu->cfg.vlenb << 3; 2031 uint16_t value; 2032 2033 if (!visit_type_uint16(v, name, &value, errp)) { 2034 return; 2035 } 2036 2037 if (!is_power_of_2(value)) { 2038 error_setg(errp, "Vector extension VLEN must be power of 2"); 2039 return; 2040 } 2041 2042 if (value != cpu_vlen && riscv_cpu_is_vendor(obj)) { 2043 cpu_set_prop_err(cpu, name, errp); 2044 error_append_hint(errp, "Current '%s' val: %u\n", 2045 name, cpu_vlen); 2046 return; 2047 } 2048 2049 cpu_option_add_user_setting(name, value); 2050 cpu->cfg.vlenb = value >> 3; 2051 } 2052 2053 static void prop_vlen_get(Object *obj, Visitor *v, const char *name, 2054 void *opaque, Error **errp) 2055 { 2056 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3; 2057 2058 visit_type_uint16(v, name, &value, errp); 2059 } 2060 2061 static const PropertyInfo prop_vlen = { 2062 .name = "vlen", 2063 .get = prop_vlen_get, 2064 .set = prop_vlen_set, 2065 }; 2066 2067 static void prop_elen_set(Object *obj, Visitor *v, const char *name, 2068 void *opaque, Error **errp) 2069 { 2070 RISCVCPU *cpu = RISCV_CPU(obj); 2071 uint16_t value; 2072 2073 if (!visit_type_uint16(v, name, &value, errp)) { 2074 return; 2075 } 2076 2077 if (!is_power_of_2(value)) { 2078 error_setg(errp, "Vector extension ELEN must be power of 2"); 2079 return; 2080 } 2081 2082 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) { 2083 cpu_set_prop_err(cpu, name, errp); 2084 error_append_hint(errp, "Current '%s' val: %u\n", 2085 name, cpu->cfg.elen); 2086 return; 2087 } 2088 2089 cpu_option_add_user_setting(name, value); 2090 cpu->cfg.elen = value; 2091 } 2092 2093 static void prop_elen_get(Object *obj, Visitor *v, const char *name, 2094 void *opaque, Error **errp) 2095 { 2096 uint16_t value = RISCV_CPU(obj)->cfg.elen; 2097 2098 visit_type_uint16(v, name, &value, errp); 2099 } 2100 2101 static const PropertyInfo prop_elen = { 2102 .name = "elen", 2103 .get = prop_elen_get, 2104 .set = prop_elen_set, 2105 }; 2106 2107 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name, 2108 void *opaque, Error **errp) 2109 { 2110 RISCVCPU *cpu = RISCV_CPU(obj); 2111 uint16_t value; 2112 2113 if (!visit_type_uint16(v, name, &value, errp)) { 2114 return; 2115 } 2116 2117 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) { 2118 cpu_set_prop_err(cpu, name, errp); 2119 error_append_hint(errp, "Current '%s' val: %u\n", 2120 name, cpu->cfg.cbom_blocksize); 2121 return; 2122 } 2123 2124 cpu_option_add_user_setting(name, value); 2125 cpu->cfg.cbom_blocksize = value; 2126 } 2127 2128 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name, 2129 void *opaque, Error **errp) 2130 { 2131 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize; 2132 2133 visit_type_uint16(v, name, &value, errp); 2134 } 2135 2136 static const PropertyInfo prop_cbom_blksize = { 2137 .name = "cbom_blocksize", 2138 .get = prop_cbom_blksize_get, 2139 .set = prop_cbom_blksize_set, 2140 }; 2141 2142 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name, 2143 void *opaque, Error **errp) 2144 { 2145 RISCVCPU *cpu = RISCV_CPU(obj); 2146 uint16_t value; 2147 2148 if (!visit_type_uint16(v, name, &value, errp)) { 2149 return; 2150 } 2151 2152 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) { 2153 cpu_set_prop_err(cpu, name, errp); 2154 error_append_hint(errp, "Current '%s' val: %u\n", 2155 name, cpu->cfg.cbop_blocksize); 2156 return; 2157 } 2158 2159 cpu_option_add_user_setting(name, value); 2160 cpu->cfg.cbop_blocksize = value; 2161 } 2162 2163 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name, 2164 void *opaque, Error **errp) 2165 { 2166 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize; 2167 2168 visit_type_uint16(v, name, &value, errp); 2169 } 2170 2171 static const PropertyInfo prop_cbop_blksize = { 2172 .name = "cbop_blocksize", 2173 .get = prop_cbop_blksize_get, 2174 .set = prop_cbop_blksize_set, 2175 }; 2176 2177 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name, 2178 void *opaque, Error **errp) 2179 { 2180 RISCVCPU *cpu = RISCV_CPU(obj); 2181 uint16_t value; 2182 2183 if (!visit_type_uint16(v, name, &value, errp)) { 2184 return; 2185 } 2186 2187 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) { 2188 cpu_set_prop_err(cpu, name, errp); 2189 error_append_hint(errp, "Current '%s' val: %u\n", 2190 name, cpu->cfg.cboz_blocksize); 2191 return; 2192 } 2193 2194 cpu_option_add_user_setting(name, value); 2195 cpu->cfg.cboz_blocksize = value; 2196 } 2197 2198 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name, 2199 void *opaque, Error **errp) 2200 { 2201 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize; 2202 2203 visit_type_uint16(v, name, &value, errp); 2204 } 2205 2206 static const PropertyInfo prop_cboz_blksize = { 2207 .name = "cboz_blocksize", 2208 .get = prop_cboz_blksize_get, 2209 .set = prop_cboz_blksize_set, 2210 }; 2211 2212 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name, 2213 void *opaque, Error **errp) 2214 { 2215 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2216 RISCVCPU *cpu = RISCV_CPU(obj); 2217 uint32_t prev_val = cpu->cfg.mvendorid; 2218 uint32_t value; 2219 2220 if (!visit_type_uint32(v, name, &value, errp)) { 2221 return; 2222 } 2223 2224 if (!dynamic_cpu && prev_val != value) { 2225 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2226 object_get_typename(obj), prev_val); 2227 return; 2228 } 2229 2230 cpu->cfg.mvendorid = value; 2231 } 2232 2233 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name, 2234 void *opaque, Error **errp) 2235 { 2236 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; 2237 2238 visit_type_uint32(v, name, &value, errp); 2239 } 2240 2241 static const PropertyInfo prop_mvendorid = { 2242 .name = "mvendorid", 2243 .get = prop_mvendorid_get, 2244 .set = prop_mvendorid_set, 2245 }; 2246 2247 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name, 2248 void *opaque, Error **errp) 2249 { 2250 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2251 RISCVCPU *cpu = RISCV_CPU(obj); 2252 uint64_t prev_val = cpu->cfg.mimpid; 2253 uint64_t value; 2254 2255 if (!visit_type_uint64(v, name, &value, errp)) { 2256 return; 2257 } 2258 2259 if (!dynamic_cpu && prev_val != value) { 2260 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2261 object_get_typename(obj), prev_val); 2262 return; 2263 } 2264 2265 cpu->cfg.mimpid = value; 2266 } 2267 2268 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name, 2269 void *opaque, Error **errp) 2270 { 2271 uint64_t value = RISCV_CPU(obj)->cfg.mimpid; 2272 2273 visit_type_uint64(v, name, &value, errp); 2274 } 2275 2276 static const PropertyInfo prop_mimpid = { 2277 .name = "mimpid", 2278 .get = prop_mimpid_get, 2279 .set = prop_mimpid_set, 2280 }; 2281 2282 static void prop_marchid_set(Object *obj, Visitor *v, const char *name, 2283 void *opaque, Error **errp) 2284 { 2285 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2286 RISCVCPU *cpu = RISCV_CPU(obj); 2287 uint64_t prev_val = cpu->cfg.marchid; 2288 uint64_t value, invalid_val; 2289 uint32_t mxlen = 0; 2290 2291 if (!visit_type_uint64(v, name, &value, errp)) { 2292 return; 2293 } 2294 2295 if (!dynamic_cpu && prev_val != value) { 2296 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2297 object_get_typename(obj), prev_val); 2298 return; 2299 } 2300 2301 switch (riscv_cpu_mxl(&cpu->env)) { 2302 case MXL_RV32: 2303 mxlen = 32; 2304 break; 2305 case MXL_RV64: 2306 case MXL_RV128: 2307 mxlen = 64; 2308 break; 2309 default: 2310 g_assert_not_reached(); 2311 } 2312 2313 invalid_val = 1LL << (mxlen - 1); 2314 2315 if (value == invalid_val) { 2316 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2317 "and the remaining bits zero", mxlen); 2318 return; 2319 } 2320 2321 cpu->cfg.marchid = value; 2322 } 2323 2324 static void prop_marchid_get(Object *obj, Visitor *v, const char *name, 2325 void *opaque, Error **errp) 2326 { 2327 uint64_t value = RISCV_CPU(obj)->cfg.marchid; 2328 2329 visit_type_uint64(v, name, &value, errp); 2330 } 2331 2332 static const PropertyInfo prop_marchid = { 2333 .name = "marchid", 2334 .get = prop_marchid_get, 2335 .set = prop_marchid_set, 2336 }; 2337 2338 /* 2339 * RVA22U64 defines some 'named features' that are cache 2340 * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa 2341 * and Zicclsm. They are always implemented in TCG and 2342 * doesn't need to be manually enabled by the profile. 2343 */ 2344 static RISCVCPUProfile RVA22U64 = { 2345 .parent = NULL, 2346 .name = "rva22u64", 2347 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU, 2348 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2349 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2350 .ext_offsets = { 2351 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause), 2352 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb), 2353 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin), 2354 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr), 2355 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom), 2356 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz), 2357 2358 /* mandatory named features for this profile */ 2359 CPU_CFG_OFFSET(ext_zic64b), 2360 2361 RISCV_PROFILE_EXT_LIST_END 2362 } 2363 }; 2364 2365 /* 2366 * As with RVA22U64, RVA22S64 also defines 'named features'. 2367 * 2368 * Cache related features that we consider enabled since we don't 2369 * implement cache: Ssccptr 2370 * 2371 * Other named features that we already implement: Sstvecd, Sstvala, 2372 * Sscounterenw 2373 * 2374 * The remaining features/extensions comes from RVA22U64. 2375 */ 2376 static RISCVCPUProfile RVA22S64 = { 2377 .parent = &RVA22U64, 2378 .name = "rva22s64", 2379 .misa_ext = RVS, 2380 .priv_spec = PRIV_VERSION_1_12_0, 2381 .satp_mode = VM_1_10_SV39, 2382 .ext_offsets = { 2383 /* rva22s64 exts */ 2384 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt), 2385 CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade), 2386 2387 RISCV_PROFILE_EXT_LIST_END 2388 } 2389 }; 2390 2391 RISCVCPUProfile *riscv_profiles[] = { 2392 &RVA22U64, 2393 &RVA22S64, 2394 NULL, 2395 }; 2396 2397 static RISCVCPUImpliedExtsRule RVA_IMPLIED = { 2398 .is_misa = true, 2399 .ext = RVA, 2400 .implied_multi_exts = { 2401 CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo), 2402 2403 RISCV_IMPLIED_EXTS_RULE_END 2404 }, 2405 }; 2406 2407 static RISCVCPUImpliedExtsRule RVD_IMPLIED = { 2408 .is_misa = true, 2409 .ext = RVD, 2410 .implied_misa_exts = RVF, 2411 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2412 }; 2413 2414 static RISCVCPUImpliedExtsRule RVF_IMPLIED = { 2415 .is_misa = true, 2416 .ext = RVF, 2417 .implied_multi_exts = { 2418 CPU_CFG_OFFSET(ext_zicsr), 2419 2420 RISCV_IMPLIED_EXTS_RULE_END 2421 }, 2422 }; 2423 2424 static RISCVCPUImpliedExtsRule RVM_IMPLIED = { 2425 .is_misa = true, 2426 .ext = RVM, 2427 .implied_multi_exts = { 2428 CPU_CFG_OFFSET(ext_zmmul), 2429 2430 RISCV_IMPLIED_EXTS_RULE_END 2431 }, 2432 }; 2433 2434 static RISCVCPUImpliedExtsRule RVV_IMPLIED = { 2435 .is_misa = true, 2436 .ext = RVV, 2437 .implied_multi_exts = { 2438 CPU_CFG_OFFSET(ext_zve64d), 2439 2440 RISCV_IMPLIED_EXTS_RULE_END 2441 }, 2442 }; 2443 2444 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = { 2445 .ext = CPU_CFG_OFFSET(ext_zcb), 2446 .implied_multi_exts = { 2447 CPU_CFG_OFFSET(ext_zca), 2448 2449 RISCV_IMPLIED_EXTS_RULE_END 2450 }, 2451 }; 2452 2453 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = { 2454 .ext = CPU_CFG_OFFSET(ext_zcd), 2455 .implied_misa_exts = RVD, 2456 .implied_multi_exts = { 2457 CPU_CFG_OFFSET(ext_zca), 2458 2459 RISCV_IMPLIED_EXTS_RULE_END 2460 }, 2461 }; 2462 2463 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = { 2464 .ext = CPU_CFG_OFFSET(ext_zce), 2465 .implied_multi_exts = { 2466 CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp), 2467 CPU_CFG_OFFSET(ext_zcmt), 2468 2469 RISCV_IMPLIED_EXTS_RULE_END 2470 }, 2471 }; 2472 2473 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = { 2474 .ext = CPU_CFG_OFFSET(ext_zcf), 2475 .implied_misa_exts = RVF, 2476 .implied_multi_exts = { 2477 CPU_CFG_OFFSET(ext_zca), 2478 2479 RISCV_IMPLIED_EXTS_RULE_END 2480 }, 2481 }; 2482 2483 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = { 2484 .ext = CPU_CFG_OFFSET(ext_zcmp), 2485 .implied_multi_exts = { 2486 CPU_CFG_OFFSET(ext_zca), 2487 2488 RISCV_IMPLIED_EXTS_RULE_END 2489 }, 2490 }; 2491 2492 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = { 2493 .ext = CPU_CFG_OFFSET(ext_zcmt), 2494 .implied_multi_exts = { 2495 CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr), 2496 2497 RISCV_IMPLIED_EXTS_RULE_END 2498 }, 2499 }; 2500 2501 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = { 2502 .ext = CPU_CFG_OFFSET(ext_zdinx), 2503 .implied_multi_exts = { 2504 CPU_CFG_OFFSET(ext_zfinx), 2505 2506 RISCV_IMPLIED_EXTS_RULE_END 2507 }, 2508 }; 2509 2510 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = { 2511 .ext = CPU_CFG_OFFSET(ext_zfa), 2512 .implied_misa_exts = RVF, 2513 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2514 }; 2515 2516 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = { 2517 .ext = CPU_CFG_OFFSET(ext_zfbfmin), 2518 .implied_misa_exts = RVF, 2519 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2520 }; 2521 2522 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = { 2523 .ext = CPU_CFG_OFFSET(ext_zfh), 2524 .implied_multi_exts = { 2525 CPU_CFG_OFFSET(ext_zfhmin), 2526 2527 RISCV_IMPLIED_EXTS_RULE_END 2528 }, 2529 }; 2530 2531 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = { 2532 .ext = CPU_CFG_OFFSET(ext_zfhmin), 2533 .implied_misa_exts = RVF, 2534 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2535 }; 2536 2537 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = { 2538 .ext = CPU_CFG_OFFSET(ext_zfinx), 2539 .implied_multi_exts = { 2540 CPU_CFG_OFFSET(ext_zicsr), 2541 2542 RISCV_IMPLIED_EXTS_RULE_END 2543 }, 2544 }; 2545 2546 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = { 2547 .ext = CPU_CFG_OFFSET(ext_zhinx), 2548 .implied_multi_exts = { 2549 CPU_CFG_OFFSET(ext_zhinxmin), 2550 2551 RISCV_IMPLIED_EXTS_RULE_END 2552 }, 2553 }; 2554 2555 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = { 2556 .ext = CPU_CFG_OFFSET(ext_zhinxmin), 2557 .implied_multi_exts = { 2558 CPU_CFG_OFFSET(ext_zfinx), 2559 2560 RISCV_IMPLIED_EXTS_RULE_END 2561 }, 2562 }; 2563 2564 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = { 2565 .ext = CPU_CFG_OFFSET(ext_zicntr), 2566 .implied_multi_exts = { 2567 CPU_CFG_OFFSET(ext_zicsr), 2568 2569 RISCV_IMPLIED_EXTS_RULE_END 2570 }, 2571 }; 2572 2573 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = { 2574 .ext = CPU_CFG_OFFSET(ext_zihpm), 2575 .implied_multi_exts = { 2576 CPU_CFG_OFFSET(ext_zicsr), 2577 2578 RISCV_IMPLIED_EXTS_RULE_END 2579 }, 2580 }; 2581 2582 static RISCVCPUImpliedExtsRule ZK_IMPLIED = { 2583 .ext = CPU_CFG_OFFSET(ext_zk), 2584 .implied_multi_exts = { 2585 CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr), 2586 CPU_CFG_OFFSET(ext_zkt), 2587 2588 RISCV_IMPLIED_EXTS_RULE_END 2589 }, 2590 }; 2591 2592 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = { 2593 .ext = CPU_CFG_OFFSET(ext_zkn), 2594 .implied_multi_exts = { 2595 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2596 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne), 2597 CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh), 2598 2599 RISCV_IMPLIED_EXTS_RULE_END 2600 }, 2601 }; 2602 2603 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = { 2604 .ext = CPU_CFG_OFFSET(ext_zks), 2605 .implied_multi_exts = { 2606 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2607 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed), 2608 CPU_CFG_OFFSET(ext_zksh), 2609 2610 RISCV_IMPLIED_EXTS_RULE_END 2611 }, 2612 }; 2613 2614 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = { 2615 .ext = CPU_CFG_OFFSET(ext_zvbb), 2616 .implied_multi_exts = { 2617 CPU_CFG_OFFSET(ext_zvkb), 2618 2619 RISCV_IMPLIED_EXTS_RULE_END 2620 }, 2621 }; 2622 2623 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = { 2624 .ext = CPU_CFG_OFFSET(ext_zve32f), 2625 .implied_misa_exts = RVF, 2626 .implied_multi_exts = { 2627 CPU_CFG_OFFSET(ext_zve32x), 2628 2629 RISCV_IMPLIED_EXTS_RULE_END 2630 }, 2631 }; 2632 2633 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = { 2634 .ext = CPU_CFG_OFFSET(ext_zve32x), 2635 .implied_multi_exts = { 2636 CPU_CFG_OFFSET(ext_zicsr), 2637 2638 RISCV_IMPLIED_EXTS_RULE_END 2639 }, 2640 }; 2641 2642 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = { 2643 .ext = CPU_CFG_OFFSET(ext_zve64d), 2644 .implied_misa_exts = RVD, 2645 .implied_multi_exts = { 2646 CPU_CFG_OFFSET(ext_zve64f), 2647 2648 RISCV_IMPLIED_EXTS_RULE_END 2649 }, 2650 }; 2651 2652 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = { 2653 .ext = CPU_CFG_OFFSET(ext_zve64f), 2654 .implied_misa_exts = RVF, 2655 .implied_multi_exts = { 2656 CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x), 2657 2658 RISCV_IMPLIED_EXTS_RULE_END 2659 }, 2660 }; 2661 2662 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = { 2663 .ext = CPU_CFG_OFFSET(ext_zve64x), 2664 .implied_multi_exts = { 2665 CPU_CFG_OFFSET(ext_zve32x), 2666 2667 RISCV_IMPLIED_EXTS_RULE_END 2668 }, 2669 }; 2670 2671 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = { 2672 .ext = CPU_CFG_OFFSET(ext_zvfbfmin), 2673 .implied_multi_exts = { 2674 CPU_CFG_OFFSET(ext_zve32f), 2675 2676 RISCV_IMPLIED_EXTS_RULE_END 2677 }, 2678 }; 2679 2680 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = { 2681 .ext = CPU_CFG_OFFSET(ext_zvfbfwma), 2682 .implied_multi_exts = { 2683 CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin), 2684 2685 RISCV_IMPLIED_EXTS_RULE_END 2686 }, 2687 }; 2688 2689 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = { 2690 .ext = CPU_CFG_OFFSET(ext_zvfh), 2691 .implied_multi_exts = { 2692 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin), 2693 2694 RISCV_IMPLIED_EXTS_RULE_END 2695 }, 2696 }; 2697 2698 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = { 2699 .ext = CPU_CFG_OFFSET(ext_zvfhmin), 2700 .implied_multi_exts = { 2701 CPU_CFG_OFFSET(ext_zve32f), 2702 2703 RISCV_IMPLIED_EXTS_RULE_END 2704 }, 2705 }; 2706 2707 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = { 2708 .ext = CPU_CFG_OFFSET(ext_zvkn), 2709 .implied_multi_exts = { 2710 CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb), 2711 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2712 2713 RISCV_IMPLIED_EXTS_RULE_END 2714 }, 2715 }; 2716 2717 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = { 2718 .ext = CPU_CFG_OFFSET(ext_zvknc), 2719 .implied_multi_exts = { 2720 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc), 2721 2722 RISCV_IMPLIED_EXTS_RULE_END 2723 }, 2724 }; 2725 2726 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = { 2727 .ext = CPU_CFG_OFFSET(ext_zvkng), 2728 .implied_multi_exts = { 2729 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg), 2730 2731 RISCV_IMPLIED_EXTS_RULE_END 2732 }, 2733 }; 2734 2735 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = { 2736 .ext = CPU_CFG_OFFSET(ext_zvknhb), 2737 .implied_multi_exts = { 2738 CPU_CFG_OFFSET(ext_zve64x), 2739 2740 RISCV_IMPLIED_EXTS_RULE_END 2741 }, 2742 }; 2743 2744 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = { 2745 .ext = CPU_CFG_OFFSET(ext_zvks), 2746 .implied_multi_exts = { 2747 CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh), 2748 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2749 2750 RISCV_IMPLIED_EXTS_RULE_END 2751 }, 2752 }; 2753 2754 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = { 2755 .ext = CPU_CFG_OFFSET(ext_zvksc), 2756 .implied_multi_exts = { 2757 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc), 2758 2759 RISCV_IMPLIED_EXTS_RULE_END 2760 }, 2761 }; 2762 2763 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = { 2764 .ext = CPU_CFG_OFFSET(ext_zvksg), 2765 .implied_multi_exts = { 2766 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg), 2767 2768 RISCV_IMPLIED_EXTS_RULE_END 2769 }, 2770 }; 2771 2772 static RISCVCPUImpliedExtsRule SSCFG_IMPLIED = { 2773 .ext = CPU_CFG_OFFSET(ext_ssccfg), 2774 .implied_multi_exts = { 2775 CPU_CFG_OFFSET(ext_smcsrind), CPU_CFG_OFFSET(ext_sscsrind), 2776 CPU_CFG_OFFSET(ext_smcdeleg), 2777 2778 RISCV_IMPLIED_EXTS_RULE_END 2779 }, 2780 }; 2781 2782 static RISCVCPUImpliedExtsRule SUPM_IMPLIED = { 2783 .ext = CPU_CFG_OFFSET(ext_supm), 2784 .implied_multi_exts = { 2785 CPU_CFG_OFFSET(ext_ssnpm), CPU_CFG_OFFSET(ext_smnpm), 2786 2787 RISCV_IMPLIED_EXTS_RULE_END 2788 }, 2789 }; 2790 2791 static RISCVCPUImpliedExtsRule SSPM_IMPLIED = { 2792 .ext = CPU_CFG_OFFSET(ext_sspm), 2793 .implied_multi_exts = { 2794 CPU_CFG_OFFSET(ext_smnpm), 2795 2796 RISCV_IMPLIED_EXTS_RULE_END 2797 }, 2798 }; 2799 2800 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = { 2801 &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED, 2802 &RVM_IMPLIED, &RVV_IMPLIED, NULL 2803 }; 2804 2805 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = { 2806 &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED, 2807 &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED, 2808 &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED, 2809 &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED, 2810 &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED, 2811 &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED, 2812 &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED, 2813 &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED, 2814 &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED, 2815 &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED, 2816 &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED, 2817 &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, &SSCFG_IMPLIED, 2818 &SUPM_IMPLIED, &SSPM_IMPLIED, 2819 NULL 2820 }; 2821 2822 static const Property riscv_cpu_properties[] = { 2823 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 2824 2825 {.name = "pmu-mask", .info = &prop_pmu_mask}, 2826 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 2827 2828 {.name = "mmu", .info = &prop_mmu}, 2829 {.name = "pmp", .info = &prop_pmp}, 2830 2831 {.name = "priv_spec", .info = &prop_priv_spec}, 2832 {.name = "vext_spec", .info = &prop_vext_spec}, 2833 2834 {.name = "vlen", .info = &prop_vlen}, 2835 {.name = "elen", .info = &prop_elen}, 2836 2837 {.name = "cbom_blocksize", .info = &prop_cbom_blksize}, 2838 {.name = "cbop_blocksize", .info = &prop_cbop_blksize}, 2839 {.name = "cboz_blocksize", .info = &prop_cboz_blksize}, 2840 2841 {.name = "mvendorid", .info = &prop_mvendorid}, 2842 {.name = "mimpid", .info = &prop_mimpid}, 2843 {.name = "marchid", .info = &prop_marchid}, 2844 2845 #ifndef CONFIG_USER_ONLY 2846 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 2847 DEFINE_PROP_UINT64("rnmi-interrupt-vector", RISCVCPU, env.rnmi_irqvec, 2848 DEFAULT_RNMI_IRQVEC), 2849 DEFINE_PROP_UINT64("rnmi-exception-vector", RISCVCPU, env.rnmi_excpvec, 2850 DEFAULT_RNMI_EXCPVEC), 2851 #endif 2852 2853 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 2854 2855 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 2856 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 2857 DEFINE_PROP_BOOL("rvv_vl_half_avl", RISCVCPU, cfg.rvv_vl_half_avl, false), 2858 2859 /* 2860 * write_misa() is marked as experimental for now so mark 2861 * it with -x and default to 'false'. 2862 */ 2863 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 2864 }; 2865 2866 #if defined(TARGET_RISCV64) 2867 static void rva22u64_profile_cpu_init(Object *obj) 2868 { 2869 rv64i_bare_cpu_init(obj); 2870 2871 RVA22U64.enabled = true; 2872 } 2873 2874 static void rva22s64_profile_cpu_init(Object *obj) 2875 { 2876 rv64i_bare_cpu_init(obj); 2877 2878 RVA22S64.enabled = true; 2879 } 2880 #endif 2881 2882 static const gchar *riscv_gdb_arch_name(CPUState *cs) 2883 { 2884 RISCVCPU *cpu = RISCV_CPU(cs); 2885 CPURISCVState *env = &cpu->env; 2886 2887 switch (riscv_cpu_mxl(env)) { 2888 case MXL_RV32: 2889 return "riscv:rv32"; 2890 case MXL_RV64: 2891 case MXL_RV128: 2892 return "riscv:rv64"; 2893 default: 2894 g_assert_not_reached(); 2895 } 2896 } 2897 2898 #ifndef CONFIG_USER_ONLY 2899 static int64_t riscv_get_arch_id(CPUState *cs) 2900 { 2901 RISCVCPU *cpu = RISCV_CPU(cs); 2902 2903 return cpu->env.mhartid; 2904 } 2905 2906 #include "hw/core/sysemu-cpu-ops.h" 2907 2908 static const struct SysemuCPUOps riscv_sysemu_ops = { 2909 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 2910 .write_elf64_note = riscv_cpu_write_elf64_note, 2911 .write_elf32_note = riscv_cpu_write_elf32_note, 2912 .legacy_vmsd = &vmstate_riscv_cpu, 2913 }; 2914 #endif 2915 2916 static void riscv_cpu_common_class_init(ObjectClass *c, void *data) 2917 { 2918 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2919 CPUClass *cc = CPU_CLASS(c); 2920 DeviceClass *dc = DEVICE_CLASS(c); 2921 ResettableClass *rc = RESETTABLE_CLASS(c); 2922 2923 device_class_set_parent_realize(dc, riscv_cpu_realize, 2924 &mcc->parent_realize); 2925 2926 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 2927 &mcc->parent_phases); 2928 2929 cc->class_by_name = riscv_cpu_class_by_name; 2930 cc->has_work = riscv_cpu_has_work; 2931 cc->mmu_index = riscv_cpu_mmu_index; 2932 cc->dump_state = riscv_cpu_dump_state; 2933 cc->set_pc = riscv_cpu_set_pc; 2934 cc->get_pc = riscv_cpu_get_pc; 2935 cc->gdb_read_register = riscv_cpu_gdb_read_register; 2936 cc->gdb_write_register = riscv_cpu_gdb_write_register; 2937 cc->gdb_stop_before_watchpoint = true; 2938 cc->disas_set_info = riscv_cpu_disas_set_info; 2939 #ifndef CONFIG_USER_ONLY 2940 cc->sysemu_ops = &riscv_sysemu_ops; 2941 cc->get_arch_id = riscv_get_arch_id; 2942 #endif 2943 cc->gdb_arch_name = riscv_gdb_arch_name; 2944 2945 device_class_set_props(dc, riscv_cpu_properties); 2946 } 2947 2948 static void riscv_cpu_class_init(ObjectClass *c, void *data) 2949 { 2950 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2951 2952 mcc->misa_mxl_max = (uint32_t)(uintptr_t)data; 2953 riscv_cpu_validate_misa_mxl(mcc); 2954 } 2955 2956 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 2957 int max_str_len) 2958 { 2959 const RISCVIsaExtData *edata; 2960 char *old = *isa_str; 2961 char *new = *isa_str; 2962 2963 for (edata = isa_edata_arr; edata && edata->name; edata++) { 2964 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 2965 new = g_strconcat(old, "_", edata->name, NULL); 2966 g_free(old); 2967 old = new; 2968 } 2969 } 2970 2971 *isa_str = new; 2972 } 2973 2974 char *riscv_isa_string(RISCVCPU *cpu) 2975 { 2976 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 2977 int i; 2978 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 2979 char *isa_str = g_new(char, maxlen); 2980 int xlen = riscv_cpu_max_xlen(mcc); 2981 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen); 2982 2983 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2984 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2985 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 2986 } 2987 } 2988 *p = '\0'; 2989 if (!cpu->cfg.short_isa_string) { 2990 riscv_isa_string_ext(cpu, &isa_str, maxlen); 2991 } 2992 return isa_str; 2993 } 2994 2995 #ifndef CONFIG_USER_ONLY 2996 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count) 2997 { 2998 int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr); 2999 char **extensions = g_new(char *, maxlen); 3000 3001 for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 3002 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 3003 extensions[*count] = g_new(char, 2); 3004 snprintf(extensions[*count], 2, "%c", 3005 qemu_tolower(riscv_single_letter_exts[i])); 3006 (*count)++; 3007 } 3008 } 3009 3010 for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) { 3011 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 3012 extensions[*count] = g_strdup(edata->name); 3013 (*count)++; 3014 } 3015 } 3016 3017 return extensions; 3018 } 3019 3020 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename) 3021 { 3022 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 3023 const size_t maxlen = sizeof("rv128i"); 3024 g_autofree char *isa_base = g_new(char, maxlen); 3025 g_autofree char *riscv_isa; 3026 char **isa_extensions; 3027 int count = 0; 3028 int xlen = riscv_cpu_max_xlen(mcc); 3029 3030 riscv_isa = riscv_isa_string(cpu); 3031 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa); 3032 3033 snprintf(isa_base, maxlen, "rv%di", xlen); 3034 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base); 3035 3036 isa_extensions = riscv_isa_extensions_list(cpu, &count); 3037 qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions", 3038 isa_extensions, count); 3039 3040 for (int i = 0; i < count; i++) { 3041 g_free(isa_extensions[i]); 3042 } 3043 3044 g_free(isa_extensions); 3045 } 3046 #endif 3047 3048 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \ 3049 { \ 3050 .name = (type_name), \ 3051 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 3052 .instance_init = (initfn), \ 3053 .class_init = riscv_cpu_class_init, \ 3054 .class_data = (void *)(misa_mxl_max) \ 3055 } 3056 3057 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \ 3058 { \ 3059 .name = (type_name), \ 3060 .parent = TYPE_RISCV_VENDOR_CPU, \ 3061 .instance_init = (initfn), \ 3062 .class_init = riscv_cpu_class_init, \ 3063 .class_data = (void *)(misa_mxl_max) \ 3064 } 3065 3066 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \ 3067 { \ 3068 .name = (type_name), \ 3069 .parent = TYPE_RISCV_BARE_CPU, \ 3070 .instance_init = (initfn), \ 3071 .class_init = riscv_cpu_class_init, \ 3072 .class_data = (void *)(misa_mxl_max) \ 3073 } 3074 3075 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \ 3076 { \ 3077 .name = (type_name), \ 3078 .parent = TYPE_RISCV_BARE_CPU, \ 3079 .instance_init = (initfn), \ 3080 .class_init = riscv_cpu_class_init, \ 3081 .class_data = (void *)(misa_mxl_max) \ 3082 } 3083 3084 static const TypeInfo riscv_cpu_type_infos[] = { 3085 { 3086 .name = TYPE_RISCV_CPU, 3087 .parent = TYPE_CPU, 3088 .instance_size = sizeof(RISCVCPU), 3089 .instance_align = __alignof(RISCVCPU), 3090 .instance_init = riscv_cpu_init, 3091 .instance_post_init = riscv_cpu_post_init, 3092 .abstract = true, 3093 .class_size = sizeof(RISCVCPUClass), 3094 .class_init = riscv_cpu_common_class_init, 3095 }, 3096 { 3097 .name = TYPE_RISCV_DYNAMIC_CPU, 3098 .parent = TYPE_RISCV_CPU, 3099 .abstract = true, 3100 }, 3101 { 3102 .name = TYPE_RISCV_VENDOR_CPU, 3103 .parent = TYPE_RISCV_CPU, 3104 .abstract = true, 3105 }, 3106 { 3107 .name = TYPE_RISCV_BARE_CPU, 3108 .parent = TYPE_RISCV_CPU, 3109 .instance_init = riscv_bare_cpu_init, 3110 .abstract = true, 3111 }, 3112 #if defined(TARGET_RISCV32) 3113 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), 3114 #elif defined(TARGET_RISCV64) 3115 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), 3116 #endif 3117 3118 #if defined(TARGET_RISCV32) || \ 3119 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 3120 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), 3121 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), 3122 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), 3123 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init), 3124 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), 3125 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init), 3126 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init), 3127 #endif 3128 3129 #if (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 3130 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX32, MXL_RV32, riscv_max_cpu_init), 3131 #endif 3132 3133 #if defined(TARGET_RISCV64) 3134 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), 3135 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), 3136 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), 3137 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init), 3138 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init), 3139 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_TT_ASCALON, MXL_RV64, rv64_tt_ascalon_cpu_init), 3140 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init), 3141 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_XIANGSHAN_NANHU, 3142 MXL_RV64, rv64_xiangshan_nanhu_cpu_init), 3143 #ifdef CONFIG_TCG 3144 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init), 3145 #endif /* CONFIG_TCG */ 3146 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init), 3147 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init), 3148 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init), 3149 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init), 3150 #endif /* TARGET_RISCV64 */ 3151 }; 3152 3153 DEFINE_TYPES(riscv_cpu_type_infos) 3154