1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "exec/exec-all.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/error-report.h" 31 #include "hw/qdev-properties.h" 32 #include "hw/core/qdev-prop-internal.h" 33 #include "migration/vmstate.h" 34 #include "fpu/softfloat-helpers.h" 35 #include "system/device_tree.h" 36 #include "system/kvm.h" 37 #include "system/tcg.h" 38 #include "kvm/kvm_riscv.h" 39 #include "tcg/tcg-cpu.h" 40 #include "tcg/tcg.h" 41 42 /* RISC-V CPU definitions */ 43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH"; 44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 45 RVC, RVS, RVU, RVH, RVG, RVB, 0}; 46 47 /* 48 * From vector_helper.c 49 * Note that vector data is stored in host-endian 64-bit chunks, 50 * so addressing bytes needs a host-endian fixup. 51 */ 52 #if HOST_BIG_ENDIAN 53 #define BYTE(x) ((x) ^ 7) 54 #else 55 #define BYTE(x) (x) 56 #endif 57 58 bool riscv_cpu_is_32bit(RISCVCPU *cpu) 59 { 60 return riscv_cpu_mxl(&cpu->env) == MXL_RV32; 61 } 62 63 /* Hash that stores general user set numeric options */ 64 static GHashTable *general_user_opts; 65 66 static void cpu_option_add_user_setting(const char *optname, uint32_t value) 67 { 68 g_hash_table_insert(general_user_opts, (gpointer)optname, 69 GUINT_TO_POINTER(value)); 70 } 71 72 bool riscv_cpu_option_set(const char *optname) 73 { 74 return g_hash_table_contains(general_user_opts, optname); 75 } 76 77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 78 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 79 80 /* 81 * Here are the ordering rules of extension naming defined by RISC-V 82 * specification : 83 * 1. All extensions should be separated from other multi-letter extensions 84 * by an underscore. 85 * 2. The first letter following the 'Z' conventionally indicates the most 86 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 87 * If multiple 'Z' extensions are named, they should be ordered first 88 * by category, then alphabetically within a category. 89 * 3. Standard supervisor-level extensions (starts with 'S') should be 90 * listed after standard unprivileged extensions. If multiple 91 * supervisor-level extensions are listed, they should be ordered 92 * alphabetically. 93 * 4. Non-standard extensions (starts with 'X') must be listed after all 94 * standard extensions. They must be separated from other multi-letter 95 * extensions by an underscore. 96 * 97 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 98 * instead. 99 */ 100 const RISCVIsaExtData isa_edata_arr[] = { 101 ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b), 102 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 103 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop), 104 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 105 ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11), 106 ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11), 107 ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11), 108 ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11), 109 ISA_EXT_DATA_ENTRY(zicfilp, PRIV_VERSION_1_12_0, ext_zicfilp), 110 ISA_EXT_DATA_ENTRY(zicfiss, PRIV_VERSION_1_13_0, ext_zicfiss), 111 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 112 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 113 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 114 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 115 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 116 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 117 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 118 ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop), 119 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 120 ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_12), 121 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), 122 ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha), 123 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), 124 ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b), 125 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), 126 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 127 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 128 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 129 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 130 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 131 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 132 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 133 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 134 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 135 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 136 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 137 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 138 ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop), 139 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 140 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 141 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 142 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 143 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 144 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 145 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 146 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 147 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 148 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 149 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 150 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 151 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 152 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 153 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 154 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 155 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 156 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 157 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 158 ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso), 159 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 160 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 161 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 162 ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x), 163 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 164 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 165 ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x), 166 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 167 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 168 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 169 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 170 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 171 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 172 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 173 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 174 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 175 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 176 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 177 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 178 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 179 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 180 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 181 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 182 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 183 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 184 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 185 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 186 ISA_EXT_DATA_ENTRY(shcounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 187 ISA_EXT_DATA_ENTRY(sha, PRIV_VERSION_1_12_0, ext_sha), 188 ISA_EXT_DATA_ENTRY(shgatpa, PRIV_VERSION_1_12_0, has_priv_1_12), 189 ISA_EXT_DATA_ENTRY(shtvala, PRIV_VERSION_1_12_0, has_priv_1_12), 190 ISA_EXT_DATA_ENTRY(shvsatpa, PRIV_VERSION_1_12_0, has_priv_1_12), 191 ISA_EXT_DATA_ENTRY(shvstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 192 ISA_EXT_DATA_ENTRY(shvstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 193 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 194 ISA_EXT_DATA_ENTRY(smcdeleg, PRIV_VERSION_1_13_0, ext_smcdeleg), 195 ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf), 196 ISA_EXT_DATA_ENTRY(smcsrind, PRIV_VERSION_1_13_0, ext_smcsrind), 197 ISA_EXT_DATA_ENTRY(smdbltrp, PRIV_VERSION_1_13_0, ext_smdbltrp), 198 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 199 ISA_EXT_DATA_ENTRY(smrnmi, PRIV_VERSION_1_12_0, ext_smrnmi), 200 ISA_EXT_DATA_ENTRY(smmpm, PRIV_VERSION_1_13_0, ext_smmpm), 201 ISA_EXT_DATA_ENTRY(smnpm, PRIV_VERSION_1_13_0, ext_smnpm), 202 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 203 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 204 ISA_EXT_DATA_ENTRY(ssccfg, PRIV_VERSION_1_13_0, ext_ssccfg), 205 ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11), 206 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 207 ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 208 ISA_EXT_DATA_ENTRY(sscsrind, PRIV_VERSION_1_12_0, ext_sscsrind), 209 ISA_EXT_DATA_ENTRY(ssdbltrp, PRIV_VERSION_1_13_0, ext_ssdbltrp), 210 ISA_EXT_DATA_ENTRY(ssnpm, PRIV_VERSION_1_13_0, ext_ssnpm), 211 ISA_EXT_DATA_ENTRY(sspm, PRIV_VERSION_1_13_0, ext_sspm), 212 ISA_EXT_DATA_ENTRY(ssstateen, PRIV_VERSION_1_12_0, ext_ssstateen), 213 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 214 ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 215 ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 216 ISA_EXT_DATA_ENTRY(supm, PRIV_VERSION_1_13_0, ext_supm), 217 ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade), 218 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 219 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 220 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 221 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 222 ISA_EXT_DATA_ENTRY(svukte, PRIV_VERSION_1_13_0, ext_svukte), 223 ISA_EXT_DATA_ENTRY(svvptc, PRIV_VERSION_1_13_0, ext_svvptc), 224 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 225 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 226 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 227 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 228 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 229 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 230 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 231 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 232 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 233 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 234 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 235 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 236 237 { }, 238 }; 239 240 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 241 { 242 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 243 244 return *ext_enabled; 245 } 246 247 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 248 { 249 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 250 251 *ext_enabled = en; 252 } 253 254 bool riscv_cpu_is_vendor(Object *cpu_obj) 255 { 256 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; 257 } 258 259 const char * const riscv_int_regnames[] = { 260 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 261 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 262 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 263 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 264 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 265 }; 266 267 const char * const riscv_int_regnamesh[] = { 268 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 269 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 270 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 271 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 272 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 273 "x30h/t5h", "x31h/t6h" 274 }; 275 276 const char * const riscv_fpr_regnames[] = { 277 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 278 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 279 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 280 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 281 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 282 "f30/ft10", "f31/ft11" 283 }; 284 285 const char * const riscv_rvv_regnames[] = { 286 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 287 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 288 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 289 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 290 "v28", "v29", "v30", "v31" 291 }; 292 293 static const char * const riscv_excp_names[] = { 294 "misaligned_fetch", 295 "fault_fetch", 296 "illegal_instruction", 297 "breakpoint", 298 "misaligned_load", 299 "fault_load", 300 "misaligned_store", 301 "fault_store", 302 "user_ecall", 303 "supervisor_ecall", 304 "hypervisor_ecall", 305 "machine_ecall", 306 "exec_page_fault", 307 "load_page_fault", 308 "reserved", 309 "store_page_fault", 310 "double_trap", 311 "reserved", 312 "reserved", 313 "reserved", 314 "guest_exec_page_fault", 315 "guest_load_page_fault", 316 "reserved", 317 "guest_store_page_fault", 318 }; 319 320 static const char * const riscv_intr_names[] = { 321 "u_software", 322 "s_software", 323 "vs_software", 324 "m_software", 325 "u_timer", 326 "s_timer", 327 "vs_timer", 328 "m_timer", 329 "u_external", 330 "s_external", 331 "vs_external", 332 "m_external", 333 "reserved", 334 "reserved", 335 "reserved", 336 "reserved" 337 }; 338 339 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 340 { 341 if (async) { 342 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 343 riscv_intr_names[cause] : "(unknown)"; 344 } else { 345 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 346 riscv_excp_names[cause] : "(unknown)"; 347 } 348 } 349 350 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) 351 { 352 env->misa_ext_mask = env->misa_ext = ext; 353 } 354 355 int riscv_cpu_max_xlen(RISCVCPUClass *mcc) 356 { 357 return 16 << mcc->misa_mxl_max; 358 } 359 360 #ifndef CONFIG_USER_ONLY 361 static uint8_t satp_mode_from_str(const char *satp_mode_str) 362 { 363 if (!strncmp(satp_mode_str, "mbare", 5)) { 364 return VM_1_10_MBARE; 365 } 366 367 if (!strncmp(satp_mode_str, "sv32", 4)) { 368 return VM_1_10_SV32; 369 } 370 371 if (!strncmp(satp_mode_str, "sv39", 4)) { 372 return VM_1_10_SV39; 373 } 374 375 if (!strncmp(satp_mode_str, "sv48", 4)) { 376 return VM_1_10_SV48; 377 } 378 379 if (!strncmp(satp_mode_str, "sv57", 4)) { 380 return VM_1_10_SV57; 381 } 382 383 if (!strncmp(satp_mode_str, "sv64", 4)) { 384 return VM_1_10_SV64; 385 } 386 387 g_assert_not_reached(); 388 } 389 390 uint8_t satp_mode_max_from_map(uint32_t map) 391 { 392 /* 393 * 'map = 0' will make us return (31 - 32), which C will 394 * happily overflow to UINT_MAX. There's no good result to 395 * return if 'map = 0' (e.g. returning 0 will be ambiguous 396 * with the result for 'map = 1'). 397 * 398 * Assert out if map = 0. Callers will have to deal with 399 * it outside of this function. 400 */ 401 g_assert(map > 0); 402 403 /* map here has at least one bit set, so no problem with clz */ 404 return 31 - __builtin_clz(map); 405 } 406 407 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 408 { 409 if (is_32_bit) { 410 switch (satp_mode) { 411 case VM_1_10_SV32: 412 return "sv32"; 413 case VM_1_10_MBARE: 414 return "none"; 415 } 416 } else { 417 switch (satp_mode) { 418 case VM_1_10_SV64: 419 return "sv64"; 420 case VM_1_10_SV57: 421 return "sv57"; 422 case VM_1_10_SV48: 423 return "sv48"; 424 case VM_1_10_SV39: 425 return "sv39"; 426 case VM_1_10_MBARE: 427 return "none"; 428 } 429 } 430 431 g_assert_not_reached(); 432 } 433 434 static void set_satp_mode_max_supported(RISCVCPU *cpu, 435 uint8_t satp_mode) 436 { 437 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 438 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 439 440 for (int i = 0; i <= satp_mode; ++i) { 441 if (valid_vm[i]) { 442 cpu->cfg.satp_mode.supported |= (1 << i); 443 } 444 } 445 } 446 447 /* Set the satp mode to the max supported */ 448 static void set_satp_mode_default_map(RISCVCPU *cpu) 449 { 450 /* 451 * Bare CPUs do not default to the max available. 452 * Users must set a valid satp_mode in the command 453 * line. 454 */ 455 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { 456 warn_report("No satp mode set. Defaulting to 'bare'"); 457 cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE); 458 return; 459 } 460 461 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 462 } 463 #endif 464 465 static void riscv_max_cpu_init(Object *obj) 466 { 467 RISCVCPU *cpu = RISCV_CPU(obj); 468 CPURISCVState *env = &cpu->env; 469 470 cpu->cfg.mmu = true; 471 cpu->cfg.pmp = true; 472 473 env->priv_ver = PRIV_VERSION_LATEST; 474 #ifndef CONFIG_USER_ONLY 475 set_satp_mode_max_supported(RISCV_CPU(obj), 476 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 477 VM_1_10_SV32 : VM_1_10_SV57); 478 #endif 479 } 480 481 #if defined(TARGET_RISCV64) 482 static void rv64_base_cpu_init(Object *obj) 483 { 484 RISCVCPU *cpu = RISCV_CPU(obj); 485 CPURISCVState *env = &cpu->env; 486 487 cpu->cfg.mmu = true; 488 cpu->cfg.pmp = true; 489 490 /* Set latest version of privileged specification */ 491 env->priv_ver = PRIV_VERSION_LATEST; 492 #ifndef CONFIG_USER_ONLY 493 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 494 #endif 495 } 496 497 static void rv64_sifive_u_cpu_init(Object *obj) 498 { 499 RISCVCPU *cpu = RISCV_CPU(obj); 500 CPURISCVState *env = &cpu->env; 501 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 502 env->priv_ver = PRIV_VERSION_1_10_0; 503 #ifndef CONFIG_USER_ONLY 504 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 505 #endif 506 507 /* inherited from parent obj via riscv_cpu_init() */ 508 cpu->cfg.ext_zifencei = true; 509 cpu->cfg.ext_zicsr = true; 510 cpu->cfg.mmu = true; 511 cpu->cfg.pmp = true; 512 } 513 514 static void rv64_sifive_e_cpu_init(Object *obj) 515 { 516 CPURISCVState *env = &RISCV_CPU(obj)->env; 517 RISCVCPU *cpu = RISCV_CPU(obj); 518 519 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 520 env->priv_ver = PRIV_VERSION_1_10_0; 521 #ifndef CONFIG_USER_ONLY 522 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 523 #endif 524 525 /* inherited from parent obj via riscv_cpu_init() */ 526 cpu->cfg.ext_zifencei = true; 527 cpu->cfg.ext_zicsr = true; 528 cpu->cfg.pmp = true; 529 } 530 531 static void rv64_thead_c906_cpu_init(Object *obj) 532 { 533 CPURISCVState *env = &RISCV_CPU(obj)->env; 534 RISCVCPU *cpu = RISCV_CPU(obj); 535 536 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU); 537 env->priv_ver = PRIV_VERSION_1_11_0; 538 539 cpu->cfg.ext_zfa = true; 540 cpu->cfg.ext_zfh = true; 541 cpu->cfg.mmu = true; 542 cpu->cfg.ext_xtheadba = true; 543 cpu->cfg.ext_xtheadbb = true; 544 cpu->cfg.ext_xtheadbs = true; 545 cpu->cfg.ext_xtheadcmo = true; 546 cpu->cfg.ext_xtheadcondmov = true; 547 cpu->cfg.ext_xtheadfmemidx = true; 548 cpu->cfg.ext_xtheadmac = true; 549 cpu->cfg.ext_xtheadmemidx = true; 550 cpu->cfg.ext_xtheadmempair = true; 551 cpu->cfg.ext_xtheadsync = true; 552 553 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 554 #ifndef CONFIG_USER_ONLY 555 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 556 th_register_custom_csrs(cpu); 557 #endif 558 559 /* inherited from parent obj via riscv_cpu_init() */ 560 cpu->cfg.pmp = true; 561 } 562 563 static void rv64_veyron_v1_cpu_init(Object *obj) 564 { 565 CPURISCVState *env = &RISCV_CPU(obj)->env; 566 RISCVCPU *cpu = RISCV_CPU(obj); 567 568 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH); 569 env->priv_ver = PRIV_VERSION_1_12_0; 570 571 /* Enable ISA extensions */ 572 cpu->cfg.mmu = true; 573 cpu->cfg.ext_zifencei = true; 574 cpu->cfg.ext_zicsr = true; 575 cpu->cfg.pmp = true; 576 cpu->cfg.ext_zicbom = true; 577 cpu->cfg.cbom_blocksize = 64; 578 cpu->cfg.cboz_blocksize = 64; 579 cpu->cfg.ext_zicboz = true; 580 cpu->cfg.ext_smaia = true; 581 cpu->cfg.ext_ssaia = true; 582 cpu->cfg.ext_sscofpmf = true; 583 cpu->cfg.ext_sstc = true; 584 cpu->cfg.ext_svinval = true; 585 cpu->cfg.ext_svnapot = true; 586 cpu->cfg.ext_svpbmt = true; 587 cpu->cfg.ext_smstateen = true; 588 cpu->cfg.ext_zba = true; 589 cpu->cfg.ext_zbb = true; 590 cpu->cfg.ext_zbc = true; 591 cpu->cfg.ext_zbs = true; 592 cpu->cfg.ext_XVentanaCondOps = true; 593 594 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 595 cpu->cfg.marchid = VEYRON_V1_MARCHID; 596 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 597 598 #ifndef CONFIG_USER_ONLY 599 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 600 #endif 601 } 602 603 /* Tenstorrent Ascalon */ 604 static void rv64_tt_ascalon_cpu_init(Object *obj) 605 { 606 CPURISCVState *env = &RISCV_CPU(obj)->env; 607 RISCVCPU *cpu = RISCV_CPU(obj); 608 609 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH | RVV); 610 env->priv_ver = PRIV_VERSION_1_13_0; 611 612 /* Enable ISA extensions */ 613 cpu->cfg.mmu = true; 614 cpu->cfg.vlenb = 256 >> 3; 615 cpu->cfg.elen = 64; 616 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 617 cpu->cfg.rvv_ma_all_1s = true; 618 cpu->cfg.rvv_ta_all_1s = true; 619 cpu->cfg.misa_w = true; 620 cpu->cfg.pmp = true; 621 cpu->cfg.cbom_blocksize = 64; 622 cpu->cfg.cbop_blocksize = 64; 623 cpu->cfg.cboz_blocksize = 64; 624 cpu->cfg.ext_zic64b = true; 625 cpu->cfg.ext_zicbom = true; 626 cpu->cfg.ext_zicbop = true; 627 cpu->cfg.ext_zicboz = true; 628 cpu->cfg.ext_zicntr = true; 629 cpu->cfg.ext_zicond = true; 630 cpu->cfg.ext_zicsr = true; 631 cpu->cfg.ext_zifencei = true; 632 cpu->cfg.ext_zihintntl = true; 633 cpu->cfg.ext_zihintpause = true; 634 cpu->cfg.ext_zihpm = true; 635 cpu->cfg.ext_zimop = true; 636 cpu->cfg.ext_zawrs = true; 637 cpu->cfg.ext_zfa = true; 638 cpu->cfg.ext_zfbfmin = true; 639 cpu->cfg.ext_zfh = true; 640 cpu->cfg.ext_zfhmin = true; 641 cpu->cfg.ext_zcb = true; 642 cpu->cfg.ext_zcmop = true; 643 cpu->cfg.ext_zba = true; 644 cpu->cfg.ext_zbb = true; 645 cpu->cfg.ext_zbs = true; 646 cpu->cfg.ext_zkt = true; 647 cpu->cfg.ext_zvbb = true; 648 cpu->cfg.ext_zvbc = true; 649 cpu->cfg.ext_zvfbfmin = true; 650 cpu->cfg.ext_zvfbfwma = true; 651 cpu->cfg.ext_zvfh = true; 652 cpu->cfg.ext_zvfhmin = true; 653 cpu->cfg.ext_zvkng = true; 654 cpu->cfg.ext_smaia = true; 655 cpu->cfg.ext_smstateen = true; 656 cpu->cfg.ext_ssaia = true; 657 cpu->cfg.ext_sscofpmf = true; 658 cpu->cfg.ext_sstc = true; 659 cpu->cfg.ext_svade = true; 660 cpu->cfg.ext_svinval = true; 661 cpu->cfg.ext_svnapot = true; 662 cpu->cfg.ext_svpbmt = true; 663 664 #ifndef CONFIG_USER_ONLY 665 set_satp_mode_max_supported(cpu, VM_1_10_SV57); 666 #endif 667 } 668 669 static void rv64_xiangshan_nanhu_cpu_init(Object *obj) 670 { 671 CPURISCVState *env = &RISCV_CPU(obj)->env; 672 RISCVCPU *cpu = RISCV_CPU(obj); 673 674 riscv_cpu_set_misa_ext(env, RVG | RVC | RVB | RVS | RVU); 675 env->priv_ver = PRIV_VERSION_1_12_0; 676 677 /* Enable ISA extensions */ 678 cpu->cfg.ext_zbc = true; 679 cpu->cfg.ext_zbkb = true; 680 cpu->cfg.ext_zbkc = true; 681 cpu->cfg.ext_zbkx = true; 682 cpu->cfg.ext_zknd = true; 683 cpu->cfg.ext_zkne = true; 684 cpu->cfg.ext_zknh = true; 685 cpu->cfg.ext_zksed = true; 686 cpu->cfg.ext_zksh = true; 687 cpu->cfg.ext_svinval = true; 688 689 cpu->cfg.mmu = true; 690 cpu->cfg.pmp = true; 691 692 #ifndef CONFIG_USER_ONLY 693 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 694 #endif 695 } 696 697 #ifdef CONFIG_TCG 698 static void rv128_base_cpu_init(Object *obj) 699 { 700 RISCVCPU *cpu = RISCV_CPU(obj); 701 CPURISCVState *env = &cpu->env; 702 703 cpu->cfg.mmu = true; 704 cpu->cfg.pmp = true; 705 706 /* Set latest version of privileged specification */ 707 env->priv_ver = PRIV_VERSION_LATEST; 708 #ifndef CONFIG_USER_ONLY 709 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 710 #endif 711 } 712 #endif /* CONFIG_TCG */ 713 714 static void rv64i_bare_cpu_init(Object *obj) 715 { 716 CPURISCVState *env = &RISCV_CPU(obj)->env; 717 riscv_cpu_set_misa_ext(env, RVI); 718 } 719 720 static void rv64e_bare_cpu_init(Object *obj) 721 { 722 CPURISCVState *env = &RISCV_CPU(obj)->env; 723 riscv_cpu_set_misa_ext(env, RVE); 724 } 725 726 #endif /* !TARGET_RISCV64 */ 727 728 #if defined(TARGET_RISCV32) || \ 729 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 730 731 static void rv32_base_cpu_init(Object *obj) 732 { 733 RISCVCPU *cpu = RISCV_CPU(obj); 734 CPURISCVState *env = &cpu->env; 735 736 cpu->cfg.mmu = true; 737 cpu->cfg.pmp = true; 738 739 /* Set latest version of privileged specification */ 740 env->priv_ver = PRIV_VERSION_LATEST; 741 #ifndef CONFIG_USER_ONLY 742 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 743 #endif 744 } 745 746 static void rv32_sifive_u_cpu_init(Object *obj) 747 { 748 RISCVCPU *cpu = RISCV_CPU(obj); 749 CPURISCVState *env = &cpu->env; 750 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 751 env->priv_ver = PRIV_VERSION_1_10_0; 752 #ifndef CONFIG_USER_ONLY 753 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 754 #endif 755 756 /* inherited from parent obj via riscv_cpu_init() */ 757 cpu->cfg.ext_zifencei = true; 758 cpu->cfg.ext_zicsr = true; 759 cpu->cfg.mmu = true; 760 cpu->cfg.pmp = true; 761 } 762 763 static void rv32_sifive_e_cpu_init(Object *obj) 764 { 765 CPURISCVState *env = &RISCV_CPU(obj)->env; 766 RISCVCPU *cpu = RISCV_CPU(obj); 767 768 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 769 env->priv_ver = PRIV_VERSION_1_10_0; 770 #ifndef CONFIG_USER_ONLY 771 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 772 #endif 773 774 /* inherited from parent obj via riscv_cpu_init() */ 775 cpu->cfg.ext_zifencei = true; 776 cpu->cfg.ext_zicsr = true; 777 cpu->cfg.pmp = true; 778 } 779 780 static void rv32_ibex_cpu_init(Object *obj) 781 { 782 CPURISCVState *env = &RISCV_CPU(obj)->env; 783 RISCVCPU *cpu = RISCV_CPU(obj); 784 785 riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU); 786 env->priv_ver = PRIV_VERSION_1_12_0; 787 #ifndef CONFIG_USER_ONLY 788 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 789 #endif 790 /* inherited from parent obj via riscv_cpu_init() */ 791 cpu->cfg.ext_zifencei = true; 792 cpu->cfg.ext_zicsr = true; 793 cpu->cfg.pmp = true; 794 cpu->cfg.ext_smepmp = true; 795 796 cpu->cfg.ext_zba = true; 797 cpu->cfg.ext_zbb = true; 798 cpu->cfg.ext_zbc = true; 799 cpu->cfg.ext_zbs = true; 800 } 801 802 static void rv32_imafcu_nommu_cpu_init(Object *obj) 803 { 804 CPURISCVState *env = &RISCV_CPU(obj)->env; 805 RISCVCPU *cpu = RISCV_CPU(obj); 806 807 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU); 808 env->priv_ver = PRIV_VERSION_1_10_0; 809 #ifndef CONFIG_USER_ONLY 810 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 811 #endif 812 813 /* inherited from parent obj via riscv_cpu_init() */ 814 cpu->cfg.ext_zifencei = true; 815 cpu->cfg.ext_zicsr = true; 816 cpu->cfg.pmp = true; 817 } 818 819 static void rv32i_bare_cpu_init(Object *obj) 820 { 821 CPURISCVState *env = &RISCV_CPU(obj)->env; 822 riscv_cpu_set_misa_ext(env, RVI); 823 } 824 825 static void rv32e_bare_cpu_init(Object *obj) 826 { 827 CPURISCVState *env = &RISCV_CPU(obj)->env; 828 riscv_cpu_set_misa_ext(env, RVE); 829 } 830 #endif 831 832 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 833 { 834 ObjectClass *oc; 835 char *typename; 836 char **cpuname; 837 838 cpuname = g_strsplit(cpu_model, ",", 1); 839 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 840 oc = object_class_by_name(typename); 841 g_strfreev(cpuname); 842 g_free(typename); 843 844 return oc; 845 } 846 847 char *riscv_cpu_get_name(RISCVCPU *cpu) 848 { 849 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 850 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 851 852 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 853 854 return cpu_model_from_type(typename); 855 } 856 857 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 858 { 859 RISCVCPU *cpu = RISCV_CPU(cs); 860 CPURISCVState *env = &cpu->env; 861 int i, j; 862 uint8_t *p; 863 864 #if !defined(CONFIG_USER_ONLY) 865 if (riscv_has_ext(env, RVH)) { 866 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 867 } 868 #endif 869 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 870 #ifndef CONFIG_USER_ONLY 871 { 872 static const int dump_csrs[] = { 873 CSR_MHARTID, 874 CSR_MSTATUS, 875 CSR_MSTATUSH, 876 /* 877 * CSR_SSTATUS is intentionally omitted here as its value 878 * can be figured out by looking at CSR_MSTATUS 879 */ 880 CSR_HSTATUS, 881 CSR_VSSTATUS, 882 CSR_MIP, 883 CSR_MIE, 884 CSR_MIDELEG, 885 CSR_HIDELEG, 886 CSR_MEDELEG, 887 CSR_HEDELEG, 888 CSR_MTVEC, 889 CSR_STVEC, 890 CSR_VSTVEC, 891 CSR_MEPC, 892 CSR_SEPC, 893 CSR_VSEPC, 894 CSR_MCAUSE, 895 CSR_SCAUSE, 896 CSR_VSCAUSE, 897 CSR_MTVAL, 898 CSR_STVAL, 899 CSR_HTVAL, 900 CSR_MTVAL2, 901 CSR_MSCRATCH, 902 CSR_SSCRATCH, 903 CSR_SATP, 904 }; 905 906 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 907 int csrno = dump_csrs[i]; 908 target_ulong val = 0; 909 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 910 911 /* 912 * Rely on the smode, hmode, etc, predicates within csr.c 913 * to do the filtering of the registers that are present. 914 */ 915 if (res == RISCV_EXCP_NONE) { 916 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 917 csr_ops[csrno].name, val); 918 } 919 } 920 } 921 #endif 922 923 for (i = 0; i < 32; i++) { 924 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 925 riscv_int_regnames[i], env->gpr[i]); 926 if ((i & 3) == 3) { 927 qemu_fprintf(f, "\n"); 928 } 929 } 930 if (flags & CPU_DUMP_FPU) { 931 target_ulong val = 0; 932 RISCVException res = riscv_csrrw_debug(env, CSR_FCSR, &val, 0, 0); 933 if (res == RISCV_EXCP_NONE) { 934 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 935 csr_ops[CSR_FCSR].name, val); 936 } 937 for (i = 0; i < 32; i++) { 938 qemu_fprintf(f, " %-8s %016" PRIx64, 939 riscv_fpr_regnames[i], env->fpr[i]); 940 if ((i & 3) == 3) { 941 qemu_fprintf(f, "\n"); 942 } 943 } 944 } 945 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 946 static const int dump_rvv_csrs[] = { 947 CSR_VSTART, 948 CSR_VXSAT, 949 CSR_VXRM, 950 CSR_VCSR, 951 CSR_VL, 952 CSR_VTYPE, 953 CSR_VLENB, 954 }; 955 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 956 int csrno = dump_rvv_csrs[i]; 957 target_ulong val = 0; 958 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 959 960 /* 961 * Rely on the smode, hmode, etc, predicates within csr.c 962 * to do the filtering of the registers that are present. 963 */ 964 if (res == RISCV_EXCP_NONE) { 965 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 966 csr_ops[csrno].name, val); 967 } 968 } 969 uint16_t vlenb = cpu->cfg.vlenb; 970 971 for (i = 0; i < 32; i++) { 972 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 973 p = (uint8_t *)env->vreg; 974 for (j = vlenb - 1 ; j >= 0; j--) { 975 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 976 } 977 qemu_fprintf(f, "\n"); 978 } 979 } 980 } 981 982 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 983 { 984 RISCVCPU *cpu = RISCV_CPU(cs); 985 CPURISCVState *env = &cpu->env; 986 987 if (env->xl == MXL_RV32) { 988 env->pc = (int32_t)value; 989 } else { 990 env->pc = value; 991 } 992 } 993 994 static vaddr riscv_cpu_get_pc(CPUState *cs) 995 { 996 RISCVCPU *cpu = RISCV_CPU(cs); 997 CPURISCVState *env = &cpu->env; 998 999 /* Match cpu_get_tb_cpu_state. */ 1000 if (env->xl == MXL_RV32) { 1001 return env->pc & UINT32_MAX; 1002 } 1003 return env->pc; 1004 } 1005 1006 bool riscv_cpu_has_work(CPUState *cs) 1007 { 1008 #ifndef CONFIG_USER_ONLY 1009 RISCVCPU *cpu = RISCV_CPU(cs); 1010 CPURISCVState *env = &cpu->env; 1011 /* 1012 * Definition of the WFI instruction requires it to ignore the privilege 1013 * mode and delegation registers, but respect individual enables 1014 */ 1015 return riscv_cpu_all_pending(env) != 0 || 1016 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 1017 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 1018 #else 1019 return true; 1020 #endif 1021 } 1022 1023 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) 1024 { 1025 return riscv_env_mmu_index(cpu_env(cs), ifetch); 1026 } 1027 1028 static void riscv_cpu_reset_hold(Object *obj, ResetType type) 1029 { 1030 #ifndef CONFIG_USER_ONLY 1031 uint8_t iprio; 1032 int i, irq, rdzero; 1033 #endif 1034 CPUState *cs = CPU(obj); 1035 RISCVCPU *cpu = RISCV_CPU(cs); 1036 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1037 CPURISCVState *env = &cpu->env; 1038 1039 if (mcc->parent_phases.hold) { 1040 mcc->parent_phases.hold(obj, type); 1041 } 1042 #ifndef CONFIG_USER_ONLY 1043 env->misa_mxl = mcc->misa_mxl_max; 1044 env->priv = PRV_M; 1045 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 1046 if (env->misa_mxl > MXL_RV32) { 1047 /* 1048 * The reset status of SXL/UXL is undefined, but mstatus is WARL 1049 * and we must ensure that the value after init is valid for read. 1050 */ 1051 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 1052 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 1053 if (riscv_has_ext(env, RVH)) { 1054 env->vsstatus = set_field(env->vsstatus, 1055 MSTATUS64_SXL, env->misa_mxl); 1056 env->vsstatus = set_field(env->vsstatus, 1057 MSTATUS64_UXL, env->misa_mxl); 1058 env->mstatus_hs = set_field(env->mstatus_hs, 1059 MSTATUS64_SXL, env->misa_mxl); 1060 env->mstatus_hs = set_field(env->mstatus_hs, 1061 MSTATUS64_UXL, env->misa_mxl); 1062 } 1063 if (riscv_cpu_cfg(env)->ext_smdbltrp) { 1064 env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 1); 1065 } 1066 } 1067 env->mcause = 0; 1068 env->miclaim = MIP_SGEIP; 1069 env->pc = env->resetvec; 1070 env->bins = 0; 1071 env->two_stage_lookup = false; 1072 1073 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 1074 (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ? 1075 MENVCFG_ADUE : 0); 1076 env->henvcfg = 0; 1077 1078 /* Initialized default priorities of local interrupts. */ 1079 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 1080 iprio = riscv_cpu_default_priority(i); 1081 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 1082 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 1083 env->hviprio[i] = 0; 1084 } 1085 i = 0; 1086 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 1087 if (!rdzero) { 1088 env->hviprio[irq] = env->miprio[irq]; 1089 } 1090 i++; 1091 } 1092 1093 /* 1094 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor 1095 * extension is enabled. 1096 */ 1097 if (riscv_has_ext(env, RVH)) { 1098 env->mideleg |= HS_MODE_INTERRUPTS; 1099 } 1100 1101 /* 1102 * Clear mseccfg and unlock all the PMP entries upon reset. 1103 * This is allowed as per the priv and smepmp specifications 1104 * and is needed to clear stale entries across reboots. 1105 */ 1106 if (riscv_cpu_cfg(env)->ext_smepmp) { 1107 env->mseccfg = 0; 1108 } 1109 1110 pmp_unlock_entries(env); 1111 #else 1112 env->priv = PRV_U; 1113 env->senvcfg = 0; 1114 env->menvcfg = 0; 1115 #endif 1116 1117 /* on reset elp is clear */ 1118 env->elp = false; 1119 /* on reset ssp is set to 0 */ 1120 env->ssp = 0; 1121 1122 env->xl = riscv_cpu_mxl(env); 1123 cs->exception_index = RISCV_EXCP_NONE; 1124 env->load_res = -1; 1125 set_default_nan_mode(1, &env->fp_status); 1126 /* Default NaN value: sign bit clear, frac msb set */ 1127 set_float_default_nan_pattern(0b01000000, &env->fp_status); 1128 env->vill = true; 1129 1130 #ifndef CONFIG_USER_ONLY 1131 if (cpu->cfg.debug) { 1132 riscv_trigger_reset_hold(env); 1133 } 1134 1135 if (cpu->cfg.ext_smrnmi) { 1136 env->rnmip = 0; 1137 env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false); 1138 } 1139 1140 if (kvm_enabled()) { 1141 kvm_riscv_reset_vcpu(cpu); 1142 } 1143 #endif 1144 } 1145 1146 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 1147 { 1148 RISCVCPU *cpu = RISCV_CPU(s); 1149 CPURISCVState *env = &cpu->env; 1150 info->target_info = &cpu->cfg; 1151 1152 switch (env->xl) { 1153 case MXL_RV32: 1154 info->print_insn = print_insn_riscv32; 1155 break; 1156 case MXL_RV64: 1157 info->print_insn = print_insn_riscv64; 1158 break; 1159 case MXL_RV128: 1160 info->print_insn = print_insn_riscv128; 1161 break; 1162 default: 1163 g_assert_not_reached(); 1164 } 1165 } 1166 1167 #ifndef CONFIG_USER_ONLY 1168 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1169 { 1170 bool rv32 = riscv_cpu_is_32bit(cpu); 1171 uint8_t satp_mode_map_max, satp_mode_supported_max; 1172 1173 /* The CPU wants the OS to decide which satp mode to use */ 1174 if (cpu->cfg.satp_mode.supported == 0) { 1175 return; 1176 } 1177 1178 satp_mode_supported_max = 1179 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1180 1181 if (cpu->cfg.satp_mode.map == 0) { 1182 if (cpu->cfg.satp_mode.init == 0) { 1183 /* If unset by the user, we fallback to the default satp mode. */ 1184 set_satp_mode_default_map(cpu); 1185 } else { 1186 /* 1187 * Find the lowest level that was disabled and then enable the 1188 * first valid level below which can be found in 1189 * valid_vm_1_10_32/64. 1190 */ 1191 for (int i = 1; i < 16; ++i) { 1192 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1193 (cpu->cfg.satp_mode.supported & (1 << i))) { 1194 for (int j = i - 1; j >= 0; --j) { 1195 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1196 cpu->cfg.satp_mode.map |= (1 << j); 1197 break; 1198 } 1199 } 1200 break; 1201 } 1202 } 1203 } 1204 } 1205 1206 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1207 1208 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1209 if (satp_mode_map_max > satp_mode_supported_max) { 1210 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1211 satp_mode_str(satp_mode_map_max, rv32), 1212 satp_mode_str(satp_mode_supported_max, rv32)); 1213 return; 1214 } 1215 1216 /* 1217 * Make sure the user did not ask for an invalid configuration as per 1218 * the specification. 1219 */ 1220 if (!rv32) { 1221 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1222 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1223 (cpu->cfg.satp_mode.init & (1 << i)) && 1224 (cpu->cfg.satp_mode.supported & (1 << i))) { 1225 error_setg(errp, "cannot disable %s satp mode if %s " 1226 "is enabled", satp_mode_str(i, false), 1227 satp_mode_str(satp_mode_map_max, false)); 1228 return; 1229 } 1230 } 1231 } 1232 1233 /* Finally expand the map so that all valid modes are set */ 1234 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1235 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1236 cpu->cfg.satp_mode.map |= (1 << i); 1237 } 1238 } 1239 } 1240 #endif 1241 1242 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1243 { 1244 Error *local_err = NULL; 1245 1246 #ifndef CONFIG_USER_ONLY 1247 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1248 if (local_err != NULL) { 1249 error_propagate(errp, local_err); 1250 return; 1251 } 1252 #endif 1253 1254 if (tcg_enabled()) { 1255 riscv_tcg_cpu_finalize_features(cpu, &local_err); 1256 if (local_err != NULL) { 1257 error_propagate(errp, local_err); 1258 return; 1259 } 1260 riscv_tcg_cpu_finalize_dynamic_decoder(cpu); 1261 } else if (kvm_enabled()) { 1262 riscv_kvm_cpu_finalize_features(cpu, &local_err); 1263 if (local_err != NULL) { 1264 error_propagate(errp, local_err); 1265 return; 1266 } 1267 } 1268 } 1269 1270 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1271 { 1272 CPUState *cs = CPU(dev); 1273 RISCVCPU *cpu = RISCV_CPU(dev); 1274 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1275 Error *local_err = NULL; 1276 1277 cpu_exec_realizefn(cs, &local_err); 1278 if (local_err != NULL) { 1279 error_propagate(errp, local_err); 1280 return; 1281 } 1282 1283 riscv_cpu_finalize_features(cpu, &local_err); 1284 if (local_err != NULL) { 1285 error_propagate(errp, local_err); 1286 return; 1287 } 1288 1289 riscv_cpu_register_gdb_regs_for_features(cs); 1290 1291 #ifndef CONFIG_USER_ONLY 1292 if (cpu->cfg.debug) { 1293 riscv_trigger_realize(&cpu->env); 1294 } 1295 #endif 1296 1297 qemu_init_vcpu(cs); 1298 cpu_reset(cs); 1299 1300 mcc->parent_realize(dev, errp); 1301 } 1302 1303 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 1304 { 1305 if (tcg_enabled()) { 1306 return riscv_cpu_tcg_compatible(cpu); 1307 } 1308 1309 return true; 1310 } 1311 1312 #ifndef CONFIG_USER_ONLY 1313 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1314 void *opaque, Error **errp) 1315 { 1316 RISCVSATPMap *satp_map = opaque; 1317 uint8_t satp = satp_mode_from_str(name); 1318 bool value; 1319 1320 value = satp_map->map & (1 << satp); 1321 1322 visit_type_bool(v, name, &value, errp); 1323 } 1324 1325 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1326 void *opaque, Error **errp) 1327 { 1328 RISCVSATPMap *satp_map = opaque; 1329 uint8_t satp = satp_mode_from_str(name); 1330 bool value; 1331 1332 if (!visit_type_bool(v, name, &value, errp)) { 1333 return; 1334 } 1335 1336 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1337 satp_map->init |= 1 << satp; 1338 } 1339 1340 void riscv_add_satp_mode_properties(Object *obj) 1341 { 1342 RISCVCPU *cpu = RISCV_CPU(obj); 1343 1344 if (cpu->env.misa_mxl == MXL_RV32) { 1345 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1346 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1347 } else { 1348 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1349 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1350 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1351 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1352 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1353 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1354 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1355 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1356 } 1357 } 1358 1359 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1360 { 1361 RISCVCPU *cpu = RISCV_CPU(opaque); 1362 CPURISCVState *env = &cpu->env; 1363 1364 if (irq < IRQ_LOCAL_MAX) { 1365 switch (irq) { 1366 case IRQ_U_SOFT: 1367 case IRQ_S_SOFT: 1368 case IRQ_VS_SOFT: 1369 case IRQ_M_SOFT: 1370 case IRQ_U_TIMER: 1371 case IRQ_S_TIMER: 1372 case IRQ_VS_TIMER: 1373 case IRQ_M_TIMER: 1374 case IRQ_U_EXT: 1375 case IRQ_VS_EXT: 1376 case IRQ_M_EXT: 1377 if (kvm_enabled()) { 1378 kvm_riscv_set_irq(cpu, irq, level); 1379 } else { 1380 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1381 } 1382 break; 1383 case IRQ_S_EXT: 1384 if (kvm_enabled()) { 1385 kvm_riscv_set_irq(cpu, irq, level); 1386 } else { 1387 env->external_seip = level; 1388 riscv_cpu_update_mip(env, 1 << irq, 1389 BOOL_TO_MASK(level | env->software_seip)); 1390 } 1391 break; 1392 default: 1393 g_assert_not_reached(); 1394 } 1395 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1396 /* Require H-extension for handling guest local interrupts */ 1397 if (!riscv_has_ext(env, RVH)) { 1398 g_assert_not_reached(); 1399 } 1400 1401 /* Compute bit position in HGEIP CSR */ 1402 irq = irq - IRQ_LOCAL_MAX + 1; 1403 if (env->geilen < irq) { 1404 g_assert_not_reached(); 1405 } 1406 1407 /* Update HGEIP CSR */ 1408 env->hgeip &= ~((target_ulong)1 << irq); 1409 if (level) { 1410 env->hgeip |= (target_ulong)1 << irq; 1411 } 1412 1413 /* Update mip.SGEIP bit */ 1414 riscv_cpu_update_mip(env, MIP_SGEIP, 1415 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1416 } else { 1417 g_assert_not_reached(); 1418 } 1419 } 1420 1421 static void riscv_cpu_set_nmi(void *opaque, int irq, int level) 1422 { 1423 riscv_cpu_set_rnmi(RISCV_CPU(opaque), irq, level); 1424 } 1425 #endif /* CONFIG_USER_ONLY */ 1426 1427 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1428 { 1429 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1430 } 1431 1432 static void riscv_cpu_post_init(Object *obj) 1433 { 1434 accel_cpu_instance_init(CPU(obj)); 1435 } 1436 1437 static void riscv_cpu_init(Object *obj) 1438 { 1439 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1440 RISCVCPU *cpu = RISCV_CPU(obj); 1441 CPURISCVState *env = &cpu->env; 1442 1443 env->misa_mxl = mcc->misa_mxl_max; 1444 1445 #ifndef CONFIG_USER_ONLY 1446 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1447 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1448 qdev_init_gpio_in_named(DEVICE(cpu), riscv_cpu_set_nmi, 1449 "riscv.cpu.rnmi", RNMI_MAX); 1450 #endif /* CONFIG_USER_ONLY */ 1451 1452 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal); 1453 1454 /* 1455 * The timer and performance counters extensions were supported 1456 * in QEMU before they were added as discrete extensions in the 1457 * ISA. To keep compatibility we'll always default them to 'true' 1458 * for all CPUs. Each accelerator will decide what to do when 1459 * users disable them. 1460 */ 1461 RISCV_CPU(obj)->cfg.ext_zicntr = true; 1462 RISCV_CPU(obj)->cfg.ext_zihpm = true; 1463 1464 /* Default values for non-bool cpu properties */ 1465 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); 1466 cpu->cfg.vlenb = 128 >> 3; 1467 cpu->cfg.elen = 64; 1468 cpu->cfg.cbom_blocksize = 64; 1469 cpu->cfg.cbop_blocksize = 64; 1470 cpu->cfg.cboz_blocksize = 64; 1471 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1472 } 1473 1474 static void riscv_bare_cpu_init(Object *obj) 1475 { 1476 RISCVCPU *cpu = RISCV_CPU(obj); 1477 1478 /* 1479 * Bare CPUs do not inherit the timer and performance 1480 * counters from the parent class (see riscv_cpu_init() 1481 * for info on why the parent enables them). 1482 * 1483 * Users have to explicitly enable these counters for 1484 * bare CPUs. 1485 */ 1486 cpu->cfg.ext_zicntr = false; 1487 cpu->cfg.ext_zihpm = false; 1488 1489 /* Set to QEMU's first supported priv version */ 1490 cpu->env.priv_ver = PRIV_VERSION_1_10_0; 1491 1492 /* 1493 * Support all available satp_mode settings. The default 1494 * value will be set to MBARE if the user doesn't set 1495 * satp_mode manually (see set_satp_mode_default()). 1496 */ 1497 #ifndef CONFIG_USER_ONLY 1498 set_satp_mode_max_supported(cpu, VM_1_10_SV64); 1499 #endif 1500 } 1501 1502 typedef struct misa_ext_info { 1503 const char *name; 1504 const char *description; 1505 } MISAExtInfo; 1506 1507 #define MISA_INFO_IDX(_bit) \ 1508 __builtin_ctz(_bit) 1509 1510 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1511 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1512 1513 static const MISAExtInfo misa_ext_info_arr[] = { 1514 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1515 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1516 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1517 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1518 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1519 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1520 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1521 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1522 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1523 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1524 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1525 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1526 MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)") 1527 }; 1528 1529 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) 1530 { 1531 CPUClass *cc = CPU_CLASS(mcc); 1532 1533 /* Validate that MISA_MXL is set properly. */ 1534 switch (mcc->misa_mxl_max) { 1535 #ifdef TARGET_RISCV64 1536 case MXL_RV64: 1537 case MXL_RV128: 1538 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1539 break; 1540 #endif 1541 case MXL_RV32: 1542 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1543 break; 1544 default: 1545 g_assert_not_reached(); 1546 } 1547 } 1548 1549 static int riscv_validate_misa_info_idx(uint32_t bit) 1550 { 1551 int idx; 1552 1553 /* 1554 * Our lowest valid input (RVA) is 1 and 1555 * __builtin_ctz() is UB with zero. 1556 */ 1557 g_assert(bit != 0); 1558 idx = MISA_INFO_IDX(bit); 1559 1560 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1561 return idx; 1562 } 1563 1564 const char *riscv_get_misa_ext_name(uint32_t bit) 1565 { 1566 int idx = riscv_validate_misa_info_idx(bit); 1567 const char *val = misa_ext_info_arr[idx].name; 1568 1569 g_assert(val != NULL); 1570 return val; 1571 } 1572 1573 const char *riscv_get_misa_ext_description(uint32_t bit) 1574 { 1575 int idx = riscv_validate_misa_info_idx(bit); 1576 const char *val = misa_ext_info_arr[idx].description; 1577 1578 g_assert(val != NULL); 1579 return val; 1580 } 1581 1582 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1583 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1584 .enabled = _defval} 1585 1586 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1587 /* Defaults for standard extensions */ 1588 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1589 MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false), 1590 MULTI_EXT_CFG_BOOL("smcsrind", ext_smcsrind, false), 1591 MULTI_EXT_CFG_BOOL("smcdeleg", ext_smcdeleg, false), 1592 MULTI_EXT_CFG_BOOL("sscsrind", ext_sscsrind, false), 1593 MULTI_EXT_CFG_BOOL("ssccfg", ext_ssccfg, false), 1594 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1595 MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false), 1596 MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false), 1597 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1598 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1599 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1600 MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false), 1601 MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false), 1602 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), 1603 MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false), 1604 MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false), 1605 MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), 1606 MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), 1607 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1608 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1609 MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false), 1610 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1611 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1612 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1613 MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false), 1614 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1615 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1616 MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false), 1617 MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false), 1618 MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false), 1619 MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false), 1620 MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false), 1621 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1622 MULTI_EXT_CFG_BOOL("ssnpm", ext_ssnpm, false), 1623 MULTI_EXT_CFG_BOOL("sspm", ext_sspm, false), 1624 MULTI_EXT_CFG_BOOL("supm", ext_supm, false), 1625 1626 MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false), 1627 MULTI_EXT_CFG_BOOL("smdbltrp", ext_smdbltrp, false), 1628 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1629 MULTI_EXT_CFG_BOOL("smrnmi", ext_smrnmi, false), 1630 MULTI_EXT_CFG_BOOL("smmpm", ext_smmpm, false), 1631 MULTI_EXT_CFG_BOOL("smnpm", ext_smnpm, false), 1632 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1633 MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false), 1634 MULTI_EXT_CFG_BOOL("ssdbltrp", ext_ssdbltrp, false), 1635 MULTI_EXT_CFG_BOOL("svade", ext_svade, false), 1636 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1637 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1638 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1639 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1640 MULTI_EXT_CFG_BOOL("svvptc", ext_svvptc, true), 1641 1642 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1643 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1644 1645 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1646 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1647 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1648 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1649 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1650 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1651 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1652 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1653 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1654 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1655 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1656 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1657 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1658 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1659 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1660 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1661 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1662 MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false), 1663 1664 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1665 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1666 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1667 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1668 1669 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1670 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true), 1671 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1672 1673 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1674 1675 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1676 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1677 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1678 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1679 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1680 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1681 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1682 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1683 1684 /* Vector cryptography extensions */ 1685 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1686 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1687 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false), 1688 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1689 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1690 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1691 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1692 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1693 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1694 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1695 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1696 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1697 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1698 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1699 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1700 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1701 1702 { }, 1703 }; 1704 1705 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1706 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1707 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1708 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1709 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1710 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1711 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1712 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1713 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1714 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1715 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1716 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1717 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1718 1719 { }, 1720 }; 1721 1722 /* These are experimental so mark with 'x-' */ 1723 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1724 MULTI_EXT_CFG_BOOL("x-svukte", ext_svukte, false), 1725 1726 { }, 1727 }; 1728 1729 /* 1730 * 'Named features' is the name we give to extensions that we 1731 * don't want to expose to users. They are either immutable 1732 * (always enabled/disable) or they'll vary depending on 1733 * the resulting CPU state. They have riscv,isa strings 1734 * and priv_ver like regular extensions. 1735 */ 1736 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { 1737 MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true), 1738 MULTI_EXT_CFG_BOOL("ssstateen", ext_ssstateen, true), 1739 MULTI_EXT_CFG_BOOL("sha", ext_sha, true), 1740 1741 { }, 1742 }; 1743 1744 /* Deprecated entries marked for future removal */ 1745 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1746 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1747 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1748 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1749 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1750 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1751 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1752 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1753 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1754 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1755 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1756 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1757 1758 { }, 1759 }; 1760 1761 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname, 1762 Error **errp) 1763 { 1764 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1765 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'", 1766 cpuname, propname); 1767 } 1768 1769 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1770 void *opaque, Error **errp) 1771 { 1772 RISCVCPU *cpu = RISCV_CPU(obj); 1773 uint8_t pmu_num, curr_pmu_num; 1774 uint32_t pmu_mask; 1775 1776 visit_type_uint8(v, name, &pmu_num, errp); 1777 1778 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask); 1779 1780 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) { 1781 cpu_set_prop_err(cpu, name, errp); 1782 error_append_hint(errp, "Current '%s' val: %u\n", 1783 name, curr_pmu_num); 1784 return; 1785 } 1786 1787 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1788 error_setg(errp, "Number of counters exceeds maximum available"); 1789 return; 1790 } 1791 1792 if (pmu_num == 0) { 1793 pmu_mask = 0; 1794 } else { 1795 pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1796 } 1797 1798 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1799 cpu->cfg.pmu_mask = pmu_mask; 1800 cpu_option_add_user_setting("pmu-mask", pmu_mask); 1801 } 1802 1803 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1804 void *opaque, Error **errp) 1805 { 1806 RISCVCPU *cpu = RISCV_CPU(obj); 1807 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1808 1809 visit_type_uint8(v, name, &pmu_num, errp); 1810 } 1811 1812 static const PropertyInfo prop_pmu_num = { 1813 .name = "pmu-num", 1814 .get = prop_pmu_num_get, 1815 .set = prop_pmu_num_set, 1816 }; 1817 1818 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name, 1819 void *opaque, Error **errp) 1820 { 1821 RISCVCPU *cpu = RISCV_CPU(obj); 1822 uint32_t value; 1823 uint8_t pmu_num; 1824 1825 visit_type_uint32(v, name, &value, errp); 1826 1827 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) { 1828 cpu_set_prop_err(cpu, name, errp); 1829 error_append_hint(errp, "Current '%s' val: %x\n", 1830 name, cpu->cfg.pmu_mask); 1831 return; 1832 } 1833 1834 pmu_num = ctpop32(value); 1835 1836 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1837 error_setg(errp, "Number of counters exceeds maximum available"); 1838 return; 1839 } 1840 1841 cpu_option_add_user_setting(name, value); 1842 cpu->cfg.pmu_mask = value; 1843 } 1844 1845 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name, 1846 void *opaque, Error **errp) 1847 { 1848 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask; 1849 1850 visit_type_uint8(v, name, &pmu_mask, errp); 1851 } 1852 1853 static const PropertyInfo prop_pmu_mask = { 1854 .name = "pmu-mask", 1855 .get = prop_pmu_mask_get, 1856 .set = prop_pmu_mask_set, 1857 }; 1858 1859 static void prop_mmu_set(Object *obj, Visitor *v, const char *name, 1860 void *opaque, Error **errp) 1861 { 1862 RISCVCPU *cpu = RISCV_CPU(obj); 1863 bool value; 1864 1865 visit_type_bool(v, name, &value, errp); 1866 1867 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) { 1868 cpu_set_prop_err(cpu, "mmu", errp); 1869 return; 1870 } 1871 1872 cpu_option_add_user_setting(name, value); 1873 cpu->cfg.mmu = value; 1874 } 1875 1876 static void prop_mmu_get(Object *obj, Visitor *v, const char *name, 1877 void *opaque, Error **errp) 1878 { 1879 bool value = RISCV_CPU(obj)->cfg.mmu; 1880 1881 visit_type_bool(v, name, &value, errp); 1882 } 1883 1884 static const PropertyInfo prop_mmu = { 1885 .name = "mmu", 1886 .get = prop_mmu_get, 1887 .set = prop_mmu_set, 1888 }; 1889 1890 static void prop_pmp_set(Object *obj, Visitor *v, const char *name, 1891 void *opaque, Error **errp) 1892 { 1893 RISCVCPU *cpu = RISCV_CPU(obj); 1894 bool value; 1895 1896 visit_type_bool(v, name, &value, errp); 1897 1898 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) { 1899 cpu_set_prop_err(cpu, name, errp); 1900 return; 1901 } 1902 1903 cpu_option_add_user_setting(name, value); 1904 cpu->cfg.pmp = value; 1905 } 1906 1907 static void prop_pmp_get(Object *obj, Visitor *v, const char *name, 1908 void *opaque, Error **errp) 1909 { 1910 bool value = RISCV_CPU(obj)->cfg.pmp; 1911 1912 visit_type_bool(v, name, &value, errp); 1913 } 1914 1915 static const PropertyInfo prop_pmp = { 1916 .name = "pmp", 1917 .get = prop_pmp_get, 1918 .set = prop_pmp_set, 1919 }; 1920 1921 static int priv_spec_from_str(const char *priv_spec_str) 1922 { 1923 int priv_version = -1; 1924 1925 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) { 1926 priv_version = PRIV_VERSION_1_13_0; 1927 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) { 1928 priv_version = PRIV_VERSION_1_12_0; 1929 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) { 1930 priv_version = PRIV_VERSION_1_11_0; 1931 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) { 1932 priv_version = PRIV_VERSION_1_10_0; 1933 } 1934 1935 return priv_version; 1936 } 1937 1938 const char *priv_spec_to_str(int priv_version) 1939 { 1940 switch (priv_version) { 1941 case PRIV_VERSION_1_10_0: 1942 return PRIV_VER_1_10_0_STR; 1943 case PRIV_VERSION_1_11_0: 1944 return PRIV_VER_1_11_0_STR; 1945 case PRIV_VERSION_1_12_0: 1946 return PRIV_VER_1_12_0_STR; 1947 case PRIV_VERSION_1_13_0: 1948 return PRIV_VER_1_13_0_STR; 1949 default: 1950 return NULL; 1951 } 1952 } 1953 1954 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name, 1955 void *opaque, Error **errp) 1956 { 1957 RISCVCPU *cpu = RISCV_CPU(obj); 1958 g_autofree char *value = NULL; 1959 int priv_version = -1; 1960 1961 visit_type_str(v, name, &value, errp); 1962 1963 priv_version = priv_spec_from_str(value); 1964 if (priv_version < 0) { 1965 error_setg(errp, "Unsupported privilege spec version '%s'", value); 1966 return; 1967 } 1968 1969 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) { 1970 cpu_set_prop_err(cpu, name, errp); 1971 error_append_hint(errp, "Current '%s' val: %s\n", name, 1972 object_property_get_str(obj, name, NULL)); 1973 return; 1974 } 1975 1976 cpu_option_add_user_setting(name, priv_version); 1977 cpu->env.priv_ver = priv_version; 1978 } 1979 1980 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name, 1981 void *opaque, Error **errp) 1982 { 1983 RISCVCPU *cpu = RISCV_CPU(obj); 1984 const char *value = priv_spec_to_str(cpu->env.priv_ver); 1985 1986 visit_type_str(v, name, (char **)&value, errp); 1987 } 1988 1989 static const PropertyInfo prop_priv_spec = { 1990 .name = "priv_spec", 1991 .get = prop_priv_spec_get, 1992 .set = prop_priv_spec_set, 1993 }; 1994 1995 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name, 1996 void *opaque, Error **errp) 1997 { 1998 RISCVCPU *cpu = RISCV_CPU(obj); 1999 g_autofree char *value = NULL; 2000 2001 visit_type_str(v, name, &value, errp); 2002 2003 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) { 2004 error_setg(errp, "Unsupported vector spec version '%s'", value); 2005 return; 2006 } 2007 2008 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0); 2009 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 2010 } 2011 2012 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name, 2013 void *opaque, Error **errp) 2014 { 2015 const char *value = VEXT_VER_1_00_0_STR; 2016 2017 visit_type_str(v, name, (char **)&value, errp); 2018 } 2019 2020 static const PropertyInfo prop_vext_spec = { 2021 .name = "vext_spec", 2022 .get = prop_vext_spec_get, 2023 .set = prop_vext_spec_set, 2024 }; 2025 2026 static void prop_vlen_set(Object *obj, Visitor *v, const char *name, 2027 void *opaque, Error **errp) 2028 { 2029 RISCVCPU *cpu = RISCV_CPU(obj); 2030 uint16_t value; 2031 2032 if (!visit_type_uint16(v, name, &value, errp)) { 2033 return; 2034 } 2035 2036 if (!is_power_of_2(value)) { 2037 error_setg(errp, "Vector extension VLEN must be power of 2"); 2038 return; 2039 } 2040 2041 if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) { 2042 cpu_set_prop_err(cpu, name, errp); 2043 error_append_hint(errp, "Current '%s' val: %u\n", 2044 name, cpu->cfg.vlenb << 3); 2045 return; 2046 } 2047 2048 cpu_option_add_user_setting(name, value); 2049 cpu->cfg.vlenb = value >> 3; 2050 } 2051 2052 static void prop_vlen_get(Object *obj, Visitor *v, const char *name, 2053 void *opaque, Error **errp) 2054 { 2055 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3; 2056 2057 visit_type_uint16(v, name, &value, errp); 2058 } 2059 2060 static const PropertyInfo prop_vlen = { 2061 .name = "vlen", 2062 .get = prop_vlen_get, 2063 .set = prop_vlen_set, 2064 }; 2065 2066 static void prop_elen_set(Object *obj, Visitor *v, const char *name, 2067 void *opaque, Error **errp) 2068 { 2069 RISCVCPU *cpu = RISCV_CPU(obj); 2070 uint16_t value; 2071 2072 if (!visit_type_uint16(v, name, &value, errp)) { 2073 return; 2074 } 2075 2076 if (!is_power_of_2(value)) { 2077 error_setg(errp, "Vector extension ELEN must be power of 2"); 2078 return; 2079 } 2080 2081 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) { 2082 cpu_set_prop_err(cpu, name, errp); 2083 error_append_hint(errp, "Current '%s' val: %u\n", 2084 name, cpu->cfg.elen); 2085 return; 2086 } 2087 2088 cpu_option_add_user_setting(name, value); 2089 cpu->cfg.elen = value; 2090 } 2091 2092 static void prop_elen_get(Object *obj, Visitor *v, const char *name, 2093 void *opaque, Error **errp) 2094 { 2095 uint16_t value = RISCV_CPU(obj)->cfg.elen; 2096 2097 visit_type_uint16(v, name, &value, errp); 2098 } 2099 2100 static const PropertyInfo prop_elen = { 2101 .name = "elen", 2102 .get = prop_elen_get, 2103 .set = prop_elen_set, 2104 }; 2105 2106 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name, 2107 void *opaque, Error **errp) 2108 { 2109 RISCVCPU *cpu = RISCV_CPU(obj); 2110 uint16_t value; 2111 2112 if (!visit_type_uint16(v, name, &value, errp)) { 2113 return; 2114 } 2115 2116 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) { 2117 cpu_set_prop_err(cpu, name, errp); 2118 error_append_hint(errp, "Current '%s' val: %u\n", 2119 name, cpu->cfg.cbom_blocksize); 2120 return; 2121 } 2122 2123 cpu_option_add_user_setting(name, value); 2124 cpu->cfg.cbom_blocksize = value; 2125 } 2126 2127 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name, 2128 void *opaque, Error **errp) 2129 { 2130 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize; 2131 2132 visit_type_uint16(v, name, &value, errp); 2133 } 2134 2135 static const PropertyInfo prop_cbom_blksize = { 2136 .name = "cbom_blocksize", 2137 .get = prop_cbom_blksize_get, 2138 .set = prop_cbom_blksize_set, 2139 }; 2140 2141 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name, 2142 void *opaque, Error **errp) 2143 { 2144 RISCVCPU *cpu = RISCV_CPU(obj); 2145 uint16_t value; 2146 2147 if (!visit_type_uint16(v, name, &value, errp)) { 2148 return; 2149 } 2150 2151 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) { 2152 cpu_set_prop_err(cpu, name, errp); 2153 error_append_hint(errp, "Current '%s' val: %u\n", 2154 name, cpu->cfg.cbop_blocksize); 2155 return; 2156 } 2157 2158 cpu_option_add_user_setting(name, value); 2159 cpu->cfg.cbop_blocksize = value; 2160 } 2161 2162 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name, 2163 void *opaque, Error **errp) 2164 { 2165 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize; 2166 2167 visit_type_uint16(v, name, &value, errp); 2168 } 2169 2170 static const PropertyInfo prop_cbop_blksize = { 2171 .name = "cbop_blocksize", 2172 .get = prop_cbop_blksize_get, 2173 .set = prop_cbop_blksize_set, 2174 }; 2175 2176 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name, 2177 void *opaque, Error **errp) 2178 { 2179 RISCVCPU *cpu = RISCV_CPU(obj); 2180 uint16_t value; 2181 2182 if (!visit_type_uint16(v, name, &value, errp)) { 2183 return; 2184 } 2185 2186 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) { 2187 cpu_set_prop_err(cpu, name, errp); 2188 error_append_hint(errp, "Current '%s' val: %u\n", 2189 name, cpu->cfg.cboz_blocksize); 2190 return; 2191 } 2192 2193 cpu_option_add_user_setting(name, value); 2194 cpu->cfg.cboz_blocksize = value; 2195 } 2196 2197 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name, 2198 void *opaque, Error **errp) 2199 { 2200 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize; 2201 2202 visit_type_uint16(v, name, &value, errp); 2203 } 2204 2205 static const PropertyInfo prop_cboz_blksize = { 2206 .name = "cboz_blocksize", 2207 .get = prop_cboz_blksize_get, 2208 .set = prop_cboz_blksize_set, 2209 }; 2210 2211 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name, 2212 void *opaque, Error **errp) 2213 { 2214 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2215 RISCVCPU *cpu = RISCV_CPU(obj); 2216 uint32_t prev_val = cpu->cfg.mvendorid; 2217 uint32_t value; 2218 2219 if (!visit_type_uint32(v, name, &value, errp)) { 2220 return; 2221 } 2222 2223 if (!dynamic_cpu && prev_val != value) { 2224 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2225 object_get_typename(obj), prev_val); 2226 return; 2227 } 2228 2229 cpu->cfg.mvendorid = value; 2230 } 2231 2232 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name, 2233 void *opaque, Error **errp) 2234 { 2235 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; 2236 2237 visit_type_uint32(v, name, &value, errp); 2238 } 2239 2240 static const PropertyInfo prop_mvendorid = { 2241 .name = "mvendorid", 2242 .get = prop_mvendorid_get, 2243 .set = prop_mvendorid_set, 2244 }; 2245 2246 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name, 2247 void *opaque, Error **errp) 2248 { 2249 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2250 RISCVCPU *cpu = RISCV_CPU(obj); 2251 uint64_t prev_val = cpu->cfg.mimpid; 2252 uint64_t value; 2253 2254 if (!visit_type_uint64(v, name, &value, errp)) { 2255 return; 2256 } 2257 2258 if (!dynamic_cpu && prev_val != value) { 2259 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2260 object_get_typename(obj), prev_val); 2261 return; 2262 } 2263 2264 cpu->cfg.mimpid = value; 2265 } 2266 2267 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name, 2268 void *opaque, Error **errp) 2269 { 2270 uint64_t value = RISCV_CPU(obj)->cfg.mimpid; 2271 2272 visit_type_uint64(v, name, &value, errp); 2273 } 2274 2275 static const PropertyInfo prop_mimpid = { 2276 .name = "mimpid", 2277 .get = prop_mimpid_get, 2278 .set = prop_mimpid_set, 2279 }; 2280 2281 static void prop_marchid_set(Object *obj, Visitor *v, const char *name, 2282 void *opaque, Error **errp) 2283 { 2284 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2285 RISCVCPU *cpu = RISCV_CPU(obj); 2286 uint64_t prev_val = cpu->cfg.marchid; 2287 uint64_t value, invalid_val; 2288 uint32_t mxlen = 0; 2289 2290 if (!visit_type_uint64(v, name, &value, errp)) { 2291 return; 2292 } 2293 2294 if (!dynamic_cpu && prev_val != value) { 2295 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2296 object_get_typename(obj), prev_val); 2297 return; 2298 } 2299 2300 switch (riscv_cpu_mxl(&cpu->env)) { 2301 case MXL_RV32: 2302 mxlen = 32; 2303 break; 2304 case MXL_RV64: 2305 case MXL_RV128: 2306 mxlen = 64; 2307 break; 2308 default: 2309 g_assert_not_reached(); 2310 } 2311 2312 invalid_val = 1LL << (mxlen - 1); 2313 2314 if (value == invalid_val) { 2315 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2316 "and the remaining bits zero", mxlen); 2317 return; 2318 } 2319 2320 cpu->cfg.marchid = value; 2321 } 2322 2323 static void prop_marchid_get(Object *obj, Visitor *v, const char *name, 2324 void *opaque, Error **errp) 2325 { 2326 uint64_t value = RISCV_CPU(obj)->cfg.marchid; 2327 2328 visit_type_uint64(v, name, &value, errp); 2329 } 2330 2331 static const PropertyInfo prop_marchid = { 2332 .name = "marchid", 2333 .get = prop_marchid_get, 2334 .set = prop_marchid_set, 2335 }; 2336 2337 /* 2338 * RVA22U64 defines some 'named features' that are cache 2339 * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa 2340 * and Zicclsm. They are always implemented in TCG and 2341 * doesn't need to be manually enabled by the profile. 2342 */ 2343 static RISCVCPUProfile RVA22U64 = { 2344 .parent = NULL, 2345 .name = "rva22u64", 2346 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU, 2347 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2348 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2349 .ext_offsets = { 2350 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause), 2351 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb), 2352 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin), 2353 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr), 2354 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom), 2355 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz), 2356 2357 /* mandatory named features for this profile */ 2358 CPU_CFG_OFFSET(ext_zic64b), 2359 2360 RISCV_PROFILE_EXT_LIST_END 2361 } 2362 }; 2363 2364 /* 2365 * As with RVA22U64, RVA22S64 also defines 'named features'. 2366 * 2367 * Cache related features that we consider enabled since we don't 2368 * implement cache: Ssccptr 2369 * 2370 * Other named features that we already implement: Sstvecd, Sstvala, 2371 * Sscounterenw 2372 * 2373 * The remaining features/extensions comes from RVA22U64. 2374 */ 2375 static RISCVCPUProfile RVA22S64 = { 2376 .parent = &RVA22U64, 2377 .name = "rva22s64", 2378 .misa_ext = RVS, 2379 .priv_spec = PRIV_VERSION_1_12_0, 2380 .satp_mode = VM_1_10_SV39, 2381 .ext_offsets = { 2382 /* rva22s64 exts */ 2383 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt), 2384 CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade), 2385 2386 RISCV_PROFILE_EXT_LIST_END 2387 } 2388 }; 2389 2390 RISCVCPUProfile *riscv_profiles[] = { 2391 &RVA22U64, 2392 &RVA22S64, 2393 NULL, 2394 }; 2395 2396 static RISCVCPUImpliedExtsRule RVA_IMPLIED = { 2397 .is_misa = true, 2398 .ext = RVA, 2399 .implied_multi_exts = { 2400 CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo), 2401 2402 RISCV_IMPLIED_EXTS_RULE_END 2403 }, 2404 }; 2405 2406 static RISCVCPUImpliedExtsRule RVD_IMPLIED = { 2407 .is_misa = true, 2408 .ext = RVD, 2409 .implied_misa_exts = RVF, 2410 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2411 }; 2412 2413 static RISCVCPUImpliedExtsRule RVF_IMPLIED = { 2414 .is_misa = true, 2415 .ext = RVF, 2416 .implied_multi_exts = { 2417 CPU_CFG_OFFSET(ext_zicsr), 2418 2419 RISCV_IMPLIED_EXTS_RULE_END 2420 }, 2421 }; 2422 2423 static RISCVCPUImpliedExtsRule RVM_IMPLIED = { 2424 .is_misa = true, 2425 .ext = RVM, 2426 .implied_multi_exts = { 2427 CPU_CFG_OFFSET(ext_zmmul), 2428 2429 RISCV_IMPLIED_EXTS_RULE_END 2430 }, 2431 }; 2432 2433 static RISCVCPUImpliedExtsRule RVV_IMPLIED = { 2434 .is_misa = true, 2435 .ext = RVV, 2436 .implied_multi_exts = { 2437 CPU_CFG_OFFSET(ext_zve64d), 2438 2439 RISCV_IMPLIED_EXTS_RULE_END 2440 }, 2441 }; 2442 2443 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = { 2444 .ext = CPU_CFG_OFFSET(ext_zcb), 2445 .implied_multi_exts = { 2446 CPU_CFG_OFFSET(ext_zca), 2447 2448 RISCV_IMPLIED_EXTS_RULE_END 2449 }, 2450 }; 2451 2452 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = { 2453 .ext = CPU_CFG_OFFSET(ext_zcd), 2454 .implied_misa_exts = RVD, 2455 .implied_multi_exts = { 2456 CPU_CFG_OFFSET(ext_zca), 2457 2458 RISCV_IMPLIED_EXTS_RULE_END 2459 }, 2460 }; 2461 2462 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = { 2463 .ext = CPU_CFG_OFFSET(ext_zce), 2464 .implied_multi_exts = { 2465 CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp), 2466 CPU_CFG_OFFSET(ext_zcmt), 2467 2468 RISCV_IMPLIED_EXTS_RULE_END 2469 }, 2470 }; 2471 2472 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = { 2473 .ext = CPU_CFG_OFFSET(ext_zcf), 2474 .implied_misa_exts = RVF, 2475 .implied_multi_exts = { 2476 CPU_CFG_OFFSET(ext_zca), 2477 2478 RISCV_IMPLIED_EXTS_RULE_END 2479 }, 2480 }; 2481 2482 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = { 2483 .ext = CPU_CFG_OFFSET(ext_zcmp), 2484 .implied_multi_exts = { 2485 CPU_CFG_OFFSET(ext_zca), 2486 2487 RISCV_IMPLIED_EXTS_RULE_END 2488 }, 2489 }; 2490 2491 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = { 2492 .ext = CPU_CFG_OFFSET(ext_zcmt), 2493 .implied_multi_exts = { 2494 CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr), 2495 2496 RISCV_IMPLIED_EXTS_RULE_END 2497 }, 2498 }; 2499 2500 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = { 2501 .ext = CPU_CFG_OFFSET(ext_zdinx), 2502 .implied_multi_exts = { 2503 CPU_CFG_OFFSET(ext_zfinx), 2504 2505 RISCV_IMPLIED_EXTS_RULE_END 2506 }, 2507 }; 2508 2509 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = { 2510 .ext = CPU_CFG_OFFSET(ext_zfa), 2511 .implied_misa_exts = RVF, 2512 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2513 }; 2514 2515 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = { 2516 .ext = CPU_CFG_OFFSET(ext_zfbfmin), 2517 .implied_misa_exts = RVF, 2518 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2519 }; 2520 2521 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = { 2522 .ext = CPU_CFG_OFFSET(ext_zfh), 2523 .implied_multi_exts = { 2524 CPU_CFG_OFFSET(ext_zfhmin), 2525 2526 RISCV_IMPLIED_EXTS_RULE_END 2527 }, 2528 }; 2529 2530 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = { 2531 .ext = CPU_CFG_OFFSET(ext_zfhmin), 2532 .implied_misa_exts = RVF, 2533 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2534 }; 2535 2536 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = { 2537 .ext = CPU_CFG_OFFSET(ext_zfinx), 2538 .implied_multi_exts = { 2539 CPU_CFG_OFFSET(ext_zicsr), 2540 2541 RISCV_IMPLIED_EXTS_RULE_END 2542 }, 2543 }; 2544 2545 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = { 2546 .ext = CPU_CFG_OFFSET(ext_zhinx), 2547 .implied_multi_exts = { 2548 CPU_CFG_OFFSET(ext_zhinxmin), 2549 2550 RISCV_IMPLIED_EXTS_RULE_END 2551 }, 2552 }; 2553 2554 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = { 2555 .ext = CPU_CFG_OFFSET(ext_zhinxmin), 2556 .implied_multi_exts = { 2557 CPU_CFG_OFFSET(ext_zfinx), 2558 2559 RISCV_IMPLIED_EXTS_RULE_END 2560 }, 2561 }; 2562 2563 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = { 2564 .ext = CPU_CFG_OFFSET(ext_zicntr), 2565 .implied_multi_exts = { 2566 CPU_CFG_OFFSET(ext_zicsr), 2567 2568 RISCV_IMPLIED_EXTS_RULE_END 2569 }, 2570 }; 2571 2572 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = { 2573 .ext = CPU_CFG_OFFSET(ext_zihpm), 2574 .implied_multi_exts = { 2575 CPU_CFG_OFFSET(ext_zicsr), 2576 2577 RISCV_IMPLIED_EXTS_RULE_END 2578 }, 2579 }; 2580 2581 static RISCVCPUImpliedExtsRule ZK_IMPLIED = { 2582 .ext = CPU_CFG_OFFSET(ext_zk), 2583 .implied_multi_exts = { 2584 CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr), 2585 CPU_CFG_OFFSET(ext_zkt), 2586 2587 RISCV_IMPLIED_EXTS_RULE_END 2588 }, 2589 }; 2590 2591 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = { 2592 .ext = CPU_CFG_OFFSET(ext_zkn), 2593 .implied_multi_exts = { 2594 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2595 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne), 2596 CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh), 2597 2598 RISCV_IMPLIED_EXTS_RULE_END 2599 }, 2600 }; 2601 2602 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = { 2603 .ext = CPU_CFG_OFFSET(ext_zks), 2604 .implied_multi_exts = { 2605 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2606 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed), 2607 CPU_CFG_OFFSET(ext_zksh), 2608 2609 RISCV_IMPLIED_EXTS_RULE_END 2610 }, 2611 }; 2612 2613 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = { 2614 .ext = CPU_CFG_OFFSET(ext_zvbb), 2615 .implied_multi_exts = { 2616 CPU_CFG_OFFSET(ext_zvkb), 2617 2618 RISCV_IMPLIED_EXTS_RULE_END 2619 }, 2620 }; 2621 2622 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = { 2623 .ext = CPU_CFG_OFFSET(ext_zve32f), 2624 .implied_misa_exts = RVF, 2625 .implied_multi_exts = { 2626 CPU_CFG_OFFSET(ext_zve32x), 2627 2628 RISCV_IMPLIED_EXTS_RULE_END 2629 }, 2630 }; 2631 2632 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = { 2633 .ext = CPU_CFG_OFFSET(ext_zve32x), 2634 .implied_multi_exts = { 2635 CPU_CFG_OFFSET(ext_zicsr), 2636 2637 RISCV_IMPLIED_EXTS_RULE_END 2638 }, 2639 }; 2640 2641 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = { 2642 .ext = CPU_CFG_OFFSET(ext_zve64d), 2643 .implied_misa_exts = RVD, 2644 .implied_multi_exts = { 2645 CPU_CFG_OFFSET(ext_zve64f), 2646 2647 RISCV_IMPLIED_EXTS_RULE_END 2648 }, 2649 }; 2650 2651 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = { 2652 .ext = CPU_CFG_OFFSET(ext_zve64f), 2653 .implied_misa_exts = RVF, 2654 .implied_multi_exts = { 2655 CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x), 2656 2657 RISCV_IMPLIED_EXTS_RULE_END 2658 }, 2659 }; 2660 2661 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = { 2662 .ext = CPU_CFG_OFFSET(ext_zve64x), 2663 .implied_multi_exts = { 2664 CPU_CFG_OFFSET(ext_zve32x), 2665 2666 RISCV_IMPLIED_EXTS_RULE_END 2667 }, 2668 }; 2669 2670 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = { 2671 .ext = CPU_CFG_OFFSET(ext_zvfbfmin), 2672 .implied_multi_exts = { 2673 CPU_CFG_OFFSET(ext_zve32f), 2674 2675 RISCV_IMPLIED_EXTS_RULE_END 2676 }, 2677 }; 2678 2679 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = { 2680 .ext = CPU_CFG_OFFSET(ext_zvfbfwma), 2681 .implied_multi_exts = { 2682 CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin), 2683 2684 RISCV_IMPLIED_EXTS_RULE_END 2685 }, 2686 }; 2687 2688 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = { 2689 .ext = CPU_CFG_OFFSET(ext_zvfh), 2690 .implied_multi_exts = { 2691 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin), 2692 2693 RISCV_IMPLIED_EXTS_RULE_END 2694 }, 2695 }; 2696 2697 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = { 2698 .ext = CPU_CFG_OFFSET(ext_zvfhmin), 2699 .implied_multi_exts = { 2700 CPU_CFG_OFFSET(ext_zve32f), 2701 2702 RISCV_IMPLIED_EXTS_RULE_END 2703 }, 2704 }; 2705 2706 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = { 2707 .ext = CPU_CFG_OFFSET(ext_zvkn), 2708 .implied_multi_exts = { 2709 CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb), 2710 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2711 2712 RISCV_IMPLIED_EXTS_RULE_END 2713 }, 2714 }; 2715 2716 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = { 2717 .ext = CPU_CFG_OFFSET(ext_zvknc), 2718 .implied_multi_exts = { 2719 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc), 2720 2721 RISCV_IMPLIED_EXTS_RULE_END 2722 }, 2723 }; 2724 2725 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = { 2726 .ext = CPU_CFG_OFFSET(ext_zvkng), 2727 .implied_multi_exts = { 2728 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg), 2729 2730 RISCV_IMPLIED_EXTS_RULE_END 2731 }, 2732 }; 2733 2734 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = { 2735 .ext = CPU_CFG_OFFSET(ext_zvknhb), 2736 .implied_multi_exts = { 2737 CPU_CFG_OFFSET(ext_zve64x), 2738 2739 RISCV_IMPLIED_EXTS_RULE_END 2740 }, 2741 }; 2742 2743 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = { 2744 .ext = CPU_CFG_OFFSET(ext_zvks), 2745 .implied_multi_exts = { 2746 CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh), 2747 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2748 2749 RISCV_IMPLIED_EXTS_RULE_END 2750 }, 2751 }; 2752 2753 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = { 2754 .ext = CPU_CFG_OFFSET(ext_zvksc), 2755 .implied_multi_exts = { 2756 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc), 2757 2758 RISCV_IMPLIED_EXTS_RULE_END 2759 }, 2760 }; 2761 2762 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = { 2763 .ext = CPU_CFG_OFFSET(ext_zvksg), 2764 .implied_multi_exts = { 2765 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg), 2766 2767 RISCV_IMPLIED_EXTS_RULE_END 2768 }, 2769 }; 2770 2771 static RISCVCPUImpliedExtsRule SSCFG_IMPLIED = { 2772 .ext = CPU_CFG_OFFSET(ext_ssccfg), 2773 .implied_multi_exts = { 2774 CPU_CFG_OFFSET(ext_smcsrind), CPU_CFG_OFFSET(ext_sscsrind), 2775 CPU_CFG_OFFSET(ext_smcdeleg), 2776 2777 RISCV_IMPLIED_EXTS_RULE_END 2778 }, 2779 }; 2780 2781 static RISCVCPUImpliedExtsRule SUPM_IMPLIED = { 2782 .ext = CPU_CFG_OFFSET(ext_supm), 2783 .implied_multi_exts = { 2784 CPU_CFG_OFFSET(ext_ssnpm), CPU_CFG_OFFSET(ext_smnpm), 2785 2786 RISCV_IMPLIED_EXTS_RULE_END 2787 }, 2788 }; 2789 2790 static RISCVCPUImpliedExtsRule SSPM_IMPLIED = { 2791 .ext = CPU_CFG_OFFSET(ext_sspm), 2792 .implied_multi_exts = { 2793 CPU_CFG_OFFSET(ext_smnpm), 2794 2795 RISCV_IMPLIED_EXTS_RULE_END 2796 }, 2797 }; 2798 2799 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = { 2800 &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED, 2801 &RVM_IMPLIED, &RVV_IMPLIED, NULL 2802 }; 2803 2804 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = { 2805 &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED, 2806 &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED, 2807 &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED, 2808 &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED, 2809 &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED, 2810 &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED, 2811 &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED, 2812 &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED, 2813 &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED, 2814 &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED, 2815 &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED, 2816 &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, &SSCFG_IMPLIED, 2817 &SUPM_IMPLIED, &SSPM_IMPLIED, 2818 NULL 2819 }; 2820 2821 static const Property riscv_cpu_properties[] = { 2822 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 2823 2824 {.name = "pmu-mask", .info = &prop_pmu_mask}, 2825 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 2826 2827 {.name = "mmu", .info = &prop_mmu}, 2828 {.name = "pmp", .info = &prop_pmp}, 2829 2830 {.name = "priv_spec", .info = &prop_priv_spec}, 2831 {.name = "vext_spec", .info = &prop_vext_spec}, 2832 2833 {.name = "vlen", .info = &prop_vlen}, 2834 {.name = "elen", .info = &prop_elen}, 2835 2836 {.name = "cbom_blocksize", .info = &prop_cbom_blksize}, 2837 {.name = "cbop_blocksize", .info = &prop_cbop_blksize}, 2838 {.name = "cboz_blocksize", .info = &prop_cboz_blksize}, 2839 2840 {.name = "mvendorid", .info = &prop_mvendorid}, 2841 {.name = "mimpid", .info = &prop_mimpid}, 2842 {.name = "marchid", .info = &prop_marchid}, 2843 2844 #ifndef CONFIG_USER_ONLY 2845 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 2846 DEFINE_PROP_UINT64("rnmi-interrupt-vector", RISCVCPU, env.rnmi_irqvec, 2847 DEFAULT_RNMI_IRQVEC), 2848 DEFINE_PROP_UINT64("rnmi-exception-vector", RISCVCPU, env.rnmi_excpvec, 2849 DEFAULT_RNMI_EXCPVEC), 2850 #endif 2851 2852 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 2853 2854 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 2855 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 2856 DEFINE_PROP_BOOL("rvv_vl_half_avl", RISCVCPU, cfg.rvv_vl_half_avl, false), 2857 2858 /* 2859 * write_misa() is marked as experimental for now so mark 2860 * it with -x and default to 'false'. 2861 */ 2862 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 2863 }; 2864 2865 #if defined(TARGET_RISCV64) 2866 static void rva22u64_profile_cpu_init(Object *obj) 2867 { 2868 rv64i_bare_cpu_init(obj); 2869 2870 RVA22U64.enabled = true; 2871 } 2872 2873 static void rva22s64_profile_cpu_init(Object *obj) 2874 { 2875 rv64i_bare_cpu_init(obj); 2876 2877 RVA22S64.enabled = true; 2878 } 2879 #endif 2880 2881 static const gchar *riscv_gdb_arch_name(CPUState *cs) 2882 { 2883 RISCVCPU *cpu = RISCV_CPU(cs); 2884 CPURISCVState *env = &cpu->env; 2885 2886 switch (riscv_cpu_mxl(env)) { 2887 case MXL_RV32: 2888 return "riscv:rv32"; 2889 case MXL_RV64: 2890 case MXL_RV128: 2891 return "riscv:rv64"; 2892 default: 2893 g_assert_not_reached(); 2894 } 2895 } 2896 2897 #ifndef CONFIG_USER_ONLY 2898 static int64_t riscv_get_arch_id(CPUState *cs) 2899 { 2900 RISCVCPU *cpu = RISCV_CPU(cs); 2901 2902 return cpu->env.mhartid; 2903 } 2904 2905 #include "hw/core/sysemu-cpu-ops.h" 2906 2907 static const struct SysemuCPUOps riscv_sysemu_ops = { 2908 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 2909 .write_elf64_note = riscv_cpu_write_elf64_note, 2910 .write_elf32_note = riscv_cpu_write_elf32_note, 2911 .legacy_vmsd = &vmstate_riscv_cpu, 2912 }; 2913 #endif 2914 2915 static void riscv_cpu_common_class_init(ObjectClass *c, void *data) 2916 { 2917 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2918 CPUClass *cc = CPU_CLASS(c); 2919 DeviceClass *dc = DEVICE_CLASS(c); 2920 ResettableClass *rc = RESETTABLE_CLASS(c); 2921 2922 device_class_set_parent_realize(dc, riscv_cpu_realize, 2923 &mcc->parent_realize); 2924 2925 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 2926 &mcc->parent_phases); 2927 2928 cc->class_by_name = riscv_cpu_class_by_name; 2929 cc->has_work = riscv_cpu_has_work; 2930 cc->mmu_index = riscv_cpu_mmu_index; 2931 cc->dump_state = riscv_cpu_dump_state; 2932 cc->set_pc = riscv_cpu_set_pc; 2933 cc->get_pc = riscv_cpu_get_pc; 2934 cc->gdb_read_register = riscv_cpu_gdb_read_register; 2935 cc->gdb_write_register = riscv_cpu_gdb_write_register; 2936 cc->gdb_stop_before_watchpoint = true; 2937 cc->disas_set_info = riscv_cpu_disas_set_info; 2938 #ifndef CONFIG_USER_ONLY 2939 cc->sysemu_ops = &riscv_sysemu_ops; 2940 cc->get_arch_id = riscv_get_arch_id; 2941 #endif 2942 cc->gdb_arch_name = riscv_gdb_arch_name; 2943 2944 device_class_set_props(dc, riscv_cpu_properties); 2945 } 2946 2947 static void riscv_cpu_class_init(ObjectClass *c, void *data) 2948 { 2949 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2950 2951 mcc->misa_mxl_max = (uint32_t)(uintptr_t)data; 2952 riscv_cpu_validate_misa_mxl(mcc); 2953 } 2954 2955 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 2956 int max_str_len) 2957 { 2958 const RISCVIsaExtData *edata; 2959 char *old = *isa_str; 2960 char *new = *isa_str; 2961 2962 for (edata = isa_edata_arr; edata && edata->name; edata++) { 2963 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 2964 new = g_strconcat(old, "_", edata->name, NULL); 2965 g_free(old); 2966 old = new; 2967 } 2968 } 2969 2970 *isa_str = new; 2971 } 2972 2973 char *riscv_isa_string(RISCVCPU *cpu) 2974 { 2975 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 2976 int i; 2977 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 2978 char *isa_str = g_new(char, maxlen); 2979 int xlen = riscv_cpu_max_xlen(mcc); 2980 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen); 2981 2982 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2983 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2984 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 2985 } 2986 } 2987 *p = '\0'; 2988 if (!cpu->cfg.short_isa_string) { 2989 riscv_isa_string_ext(cpu, &isa_str, maxlen); 2990 } 2991 return isa_str; 2992 } 2993 2994 #ifndef CONFIG_USER_ONLY 2995 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count) 2996 { 2997 int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr); 2998 char **extensions = g_new(char *, maxlen); 2999 3000 for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 3001 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 3002 extensions[*count] = g_new(char, 2); 3003 snprintf(extensions[*count], 2, "%c", 3004 qemu_tolower(riscv_single_letter_exts[i])); 3005 (*count)++; 3006 } 3007 } 3008 3009 for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) { 3010 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 3011 extensions[*count] = g_strdup(edata->name); 3012 (*count)++; 3013 } 3014 } 3015 3016 return extensions; 3017 } 3018 3019 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename) 3020 { 3021 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 3022 const size_t maxlen = sizeof("rv128i"); 3023 g_autofree char *isa_base = g_new(char, maxlen); 3024 g_autofree char *riscv_isa; 3025 char **isa_extensions; 3026 int count = 0; 3027 int xlen = riscv_cpu_max_xlen(mcc); 3028 3029 riscv_isa = riscv_isa_string(cpu); 3030 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa); 3031 3032 snprintf(isa_base, maxlen, "rv%di", xlen); 3033 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base); 3034 3035 isa_extensions = riscv_isa_extensions_list(cpu, &count); 3036 qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions", 3037 isa_extensions, count); 3038 3039 for (int i = 0; i < count; i++) { 3040 g_free(isa_extensions[i]); 3041 } 3042 3043 g_free(isa_extensions); 3044 } 3045 #endif 3046 3047 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \ 3048 { \ 3049 .name = (type_name), \ 3050 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 3051 .instance_init = (initfn), \ 3052 .class_init = riscv_cpu_class_init, \ 3053 .class_data = (void *)(misa_mxl_max) \ 3054 } 3055 3056 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \ 3057 { \ 3058 .name = (type_name), \ 3059 .parent = TYPE_RISCV_VENDOR_CPU, \ 3060 .instance_init = (initfn), \ 3061 .class_init = riscv_cpu_class_init, \ 3062 .class_data = (void *)(misa_mxl_max) \ 3063 } 3064 3065 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \ 3066 { \ 3067 .name = (type_name), \ 3068 .parent = TYPE_RISCV_BARE_CPU, \ 3069 .instance_init = (initfn), \ 3070 .class_init = riscv_cpu_class_init, \ 3071 .class_data = (void *)(misa_mxl_max) \ 3072 } 3073 3074 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \ 3075 { \ 3076 .name = (type_name), \ 3077 .parent = TYPE_RISCV_BARE_CPU, \ 3078 .instance_init = (initfn), \ 3079 .class_init = riscv_cpu_class_init, \ 3080 .class_data = (void *)(misa_mxl_max) \ 3081 } 3082 3083 static const TypeInfo riscv_cpu_type_infos[] = { 3084 { 3085 .name = TYPE_RISCV_CPU, 3086 .parent = TYPE_CPU, 3087 .instance_size = sizeof(RISCVCPU), 3088 .instance_align = __alignof(RISCVCPU), 3089 .instance_init = riscv_cpu_init, 3090 .instance_post_init = riscv_cpu_post_init, 3091 .abstract = true, 3092 .class_size = sizeof(RISCVCPUClass), 3093 .class_init = riscv_cpu_common_class_init, 3094 }, 3095 { 3096 .name = TYPE_RISCV_DYNAMIC_CPU, 3097 .parent = TYPE_RISCV_CPU, 3098 .abstract = true, 3099 }, 3100 { 3101 .name = TYPE_RISCV_VENDOR_CPU, 3102 .parent = TYPE_RISCV_CPU, 3103 .abstract = true, 3104 }, 3105 { 3106 .name = TYPE_RISCV_BARE_CPU, 3107 .parent = TYPE_RISCV_CPU, 3108 .instance_init = riscv_bare_cpu_init, 3109 .abstract = true, 3110 }, 3111 #if defined(TARGET_RISCV32) 3112 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), 3113 #elif defined(TARGET_RISCV64) 3114 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), 3115 #endif 3116 3117 #if defined(TARGET_RISCV32) || \ 3118 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 3119 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), 3120 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), 3121 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), 3122 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init), 3123 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), 3124 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init), 3125 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init), 3126 #endif 3127 3128 #if (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 3129 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX32, MXL_RV32, riscv_max_cpu_init), 3130 #endif 3131 3132 #if defined(TARGET_RISCV64) 3133 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), 3134 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), 3135 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), 3136 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init), 3137 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init), 3138 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_TT_ASCALON, MXL_RV64, rv64_tt_ascalon_cpu_init), 3139 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init), 3140 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_XIANGSHAN_NANHU, 3141 MXL_RV64, rv64_xiangshan_nanhu_cpu_init), 3142 #ifdef CONFIG_TCG 3143 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init), 3144 #endif /* CONFIG_TCG */ 3145 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init), 3146 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init), 3147 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init), 3148 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init), 3149 #endif /* TARGET_RISCV64 */ 3150 }; 3151 3152 DEFINE_TYPES(riscv_cpu_type_infos) 3153