1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "exec/exec-all.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/error-report.h" 31 #include "hw/qdev-properties.h" 32 #include "hw/core/qdev-prop-internal.h" 33 #include "migration/vmstate.h" 34 #include "fpu/softfloat-helpers.h" 35 #include "system/device_tree.h" 36 #include "system/kvm.h" 37 #include "system/tcg.h" 38 #include "kvm/kvm_riscv.h" 39 #include "tcg/tcg-cpu.h" 40 #include "tcg/tcg.h" 41 42 /* RISC-V CPU definitions */ 43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH"; 44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 45 RVC, RVS, RVU, RVH, RVG, RVB, 0}; 46 47 /* 48 * From vector_helper.c 49 * Note that vector data is stored in host-endian 64-bit chunks, 50 * so addressing bytes needs a host-endian fixup. 51 */ 52 #if HOST_BIG_ENDIAN 53 #define BYTE(x) ((x) ^ 7) 54 #else 55 #define BYTE(x) (x) 56 #endif 57 58 bool riscv_cpu_is_32bit(RISCVCPU *cpu) 59 { 60 return riscv_cpu_mxl(&cpu->env) == MXL_RV32; 61 } 62 63 /* Hash that stores general user set numeric options */ 64 static GHashTable *general_user_opts; 65 66 static void cpu_option_add_user_setting(const char *optname, uint32_t value) 67 { 68 g_hash_table_insert(general_user_opts, (gpointer)optname, 69 GUINT_TO_POINTER(value)); 70 } 71 72 bool riscv_cpu_option_set(const char *optname) 73 { 74 return g_hash_table_contains(general_user_opts, optname); 75 } 76 77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 78 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 79 80 /* 81 * Here are the ordering rules of extension naming defined by RISC-V 82 * specification : 83 * 1. All extensions should be separated from other multi-letter extensions 84 * by an underscore. 85 * 2. The first letter following the 'Z' conventionally indicates the most 86 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 87 * If multiple 'Z' extensions are named, they should be ordered first 88 * by category, then alphabetically within a category. 89 * 3. Standard supervisor-level extensions (starts with 'S') should be 90 * listed after standard unprivileged extensions. If multiple 91 * supervisor-level extensions are listed, they should be ordered 92 * alphabetically. 93 * 4. Non-standard extensions (starts with 'X') must be listed after all 94 * standard extensions. They must be separated from other multi-letter 95 * extensions by an underscore. 96 * 97 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 98 * instead. 99 */ 100 const RISCVIsaExtData isa_edata_arr[] = { 101 ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b), 102 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 103 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop), 104 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 105 ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11), 106 ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11), 107 ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11), 108 ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11), 109 ISA_EXT_DATA_ENTRY(zicfilp, PRIV_VERSION_1_12_0, ext_zicfilp), 110 ISA_EXT_DATA_ENTRY(zicfiss, PRIV_VERSION_1_13_0, ext_zicfiss), 111 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 112 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 113 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 114 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 115 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 116 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 117 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 118 ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop), 119 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 120 ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_12), 121 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), 122 ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha), 123 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), 124 ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b), 125 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), 126 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 127 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 128 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 129 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 130 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 131 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 132 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 133 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 134 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 135 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 136 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 137 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 138 ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop), 139 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 140 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 141 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 142 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 143 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 144 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 145 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 146 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 147 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 148 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 149 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 150 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 151 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 152 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 153 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 154 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 155 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 156 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 157 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 158 ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso), 159 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 160 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 161 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 162 ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x), 163 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 164 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 165 ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x), 166 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 167 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 168 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 169 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 170 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 171 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 172 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 173 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 174 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 175 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 176 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 177 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 178 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 179 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 180 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 181 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 182 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 183 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 184 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 185 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 186 ISA_EXT_DATA_ENTRY(shcounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 187 ISA_EXT_DATA_ENTRY(sha, PRIV_VERSION_1_12_0, ext_sha), 188 ISA_EXT_DATA_ENTRY(shgatpa, PRIV_VERSION_1_12_0, has_priv_1_12), 189 ISA_EXT_DATA_ENTRY(shtvala, PRIV_VERSION_1_12_0, has_priv_1_12), 190 ISA_EXT_DATA_ENTRY(shvsatpa, PRIV_VERSION_1_12_0, has_priv_1_12), 191 ISA_EXT_DATA_ENTRY(shvstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 192 ISA_EXT_DATA_ENTRY(shvstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 193 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 194 ISA_EXT_DATA_ENTRY(smcdeleg, PRIV_VERSION_1_13_0, ext_smcdeleg), 195 ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf), 196 ISA_EXT_DATA_ENTRY(smcsrind, PRIV_VERSION_1_13_0, ext_smcsrind), 197 ISA_EXT_DATA_ENTRY(smdbltrp, PRIV_VERSION_1_13_0, ext_smdbltrp), 198 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 199 ISA_EXT_DATA_ENTRY(smrnmi, PRIV_VERSION_1_12_0, ext_smrnmi), 200 ISA_EXT_DATA_ENTRY(smmpm, PRIV_VERSION_1_13_0, ext_smmpm), 201 ISA_EXT_DATA_ENTRY(smnpm, PRIV_VERSION_1_13_0, ext_smnpm), 202 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 203 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 204 ISA_EXT_DATA_ENTRY(ssccfg, PRIV_VERSION_1_13_0, ext_ssccfg), 205 ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11), 206 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 207 ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 208 ISA_EXT_DATA_ENTRY(sscsrind, PRIV_VERSION_1_12_0, ext_sscsrind), 209 ISA_EXT_DATA_ENTRY(ssdbltrp, PRIV_VERSION_1_13_0, ext_ssdbltrp), 210 ISA_EXT_DATA_ENTRY(ssnpm, PRIV_VERSION_1_13_0, ext_ssnpm), 211 ISA_EXT_DATA_ENTRY(sspm, PRIV_VERSION_1_13_0, ext_sspm), 212 ISA_EXT_DATA_ENTRY(ssstateen, PRIV_VERSION_1_12_0, ext_ssstateen), 213 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 214 ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 215 ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 216 ISA_EXT_DATA_ENTRY(ssu64xl, PRIV_VERSION_1_12_0, has_priv_1_12), 217 ISA_EXT_DATA_ENTRY(supm, PRIV_VERSION_1_13_0, ext_supm), 218 ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade), 219 ISA_EXT_DATA_ENTRY(smctr, PRIV_VERSION_1_12_0, ext_smctr), 220 ISA_EXT_DATA_ENTRY(ssctr, PRIV_VERSION_1_12_0, ext_ssctr), 221 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 222 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 223 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 224 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 225 ISA_EXT_DATA_ENTRY(svukte, PRIV_VERSION_1_13_0, ext_svukte), 226 ISA_EXT_DATA_ENTRY(svvptc, PRIV_VERSION_1_13_0, ext_svvptc), 227 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 228 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 229 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 230 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 231 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 232 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 233 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 234 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 235 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 236 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 237 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 238 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 239 240 { }, 241 }; 242 243 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 244 { 245 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 246 247 return *ext_enabled; 248 } 249 250 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 251 { 252 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 253 254 *ext_enabled = en; 255 } 256 257 bool riscv_cpu_is_vendor(Object *cpu_obj) 258 { 259 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; 260 } 261 262 const char * const riscv_int_regnames[] = { 263 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 264 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 265 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 266 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 267 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 268 }; 269 270 const char * const riscv_int_regnamesh[] = { 271 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 272 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 273 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 274 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 275 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 276 "x30h/t5h", "x31h/t6h" 277 }; 278 279 const char * const riscv_fpr_regnames[] = { 280 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 281 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 282 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 283 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 284 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 285 "f30/ft10", "f31/ft11" 286 }; 287 288 const char * const riscv_rvv_regnames[] = { 289 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 290 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 291 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 292 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 293 "v28", "v29", "v30", "v31" 294 }; 295 296 static const char * const riscv_excp_names[] = { 297 "misaligned_fetch", 298 "fault_fetch", 299 "illegal_instruction", 300 "breakpoint", 301 "misaligned_load", 302 "fault_load", 303 "misaligned_store", 304 "fault_store", 305 "user_ecall", 306 "supervisor_ecall", 307 "hypervisor_ecall", 308 "machine_ecall", 309 "exec_page_fault", 310 "load_page_fault", 311 "reserved", 312 "store_page_fault", 313 "double_trap", 314 "reserved", 315 "reserved", 316 "reserved", 317 "guest_exec_page_fault", 318 "guest_load_page_fault", 319 "reserved", 320 "guest_store_page_fault", 321 }; 322 323 static const char * const riscv_intr_names[] = { 324 "u_software", 325 "s_software", 326 "vs_software", 327 "m_software", 328 "u_timer", 329 "s_timer", 330 "vs_timer", 331 "m_timer", 332 "u_external", 333 "s_external", 334 "vs_external", 335 "m_external", 336 "reserved", 337 "reserved", 338 "reserved", 339 "reserved" 340 }; 341 342 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 343 { 344 if (async) { 345 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 346 riscv_intr_names[cause] : "(unknown)"; 347 } else { 348 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 349 riscv_excp_names[cause] : "(unknown)"; 350 } 351 } 352 353 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) 354 { 355 env->misa_ext_mask = env->misa_ext = ext; 356 } 357 358 int riscv_cpu_max_xlen(RISCVCPUClass *mcc) 359 { 360 return 16 << mcc->misa_mxl_max; 361 } 362 363 #ifndef CONFIG_USER_ONLY 364 static uint8_t satp_mode_from_str(const char *satp_mode_str) 365 { 366 if (!strncmp(satp_mode_str, "mbare", 5)) { 367 return VM_1_10_MBARE; 368 } 369 370 if (!strncmp(satp_mode_str, "sv32", 4)) { 371 return VM_1_10_SV32; 372 } 373 374 if (!strncmp(satp_mode_str, "sv39", 4)) { 375 return VM_1_10_SV39; 376 } 377 378 if (!strncmp(satp_mode_str, "sv48", 4)) { 379 return VM_1_10_SV48; 380 } 381 382 if (!strncmp(satp_mode_str, "sv57", 4)) { 383 return VM_1_10_SV57; 384 } 385 386 if (!strncmp(satp_mode_str, "sv64", 4)) { 387 return VM_1_10_SV64; 388 } 389 390 g_assert_not_reached(); 391 } 392 393 uint8_t satp_mode_max_from_map(uint32_t map) 394 { 395 /* 396 * 'map = 0' will make us return (31 - 32), which C will 397 * happily overflow to UINT_MAX. There's no good result to 398 * return if 'map = 0' (e.g. returning 0 will be ambiguous 399 * with the result for 'map = 1'). 400 * 401 * Assert out if map = 0. Callers will have to deal with 402 * it outside of this function. 403 */ 404 g_assert(map > 0); 405 406 /* map here has at least one bit set, so no problem with clz */ 407 return 31 - __builtin_clz(map); 408 } 409 410 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 411 { 412 if (is_32_bit) { 413 switch (satp_mode) { 414 case VM_1_10_SV32: 415 return "sv32"; 416 case VM_1_10_MBARE: 417 return "none"; 418 } 419 } else { 420 switch (satp_mode) { 421 case VM_1_10_SV64: 422 return "sv64"; 423 case VM_1_10_SV57: 424 return "sv57"; 425 case VM_1_10_SV48: 426 return "sv48"; 427 case VM_1_10_SV39: 428 return "sv39"; 429 case VM_1_10_MBARE: 430 return "none"; 431 } 432 } 433 434 g_assert_not_reached(); 435 } 436 437 static void set_satp_mode_max_supported(RISCVCPU *cpu, 438 uint8_t satp_mode) 439 { 440 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 441 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 442 443 for (int i = 0; i <= satp_mode; ++i) { 444 if (valid_vm[i]) { 445 cpu->cfg.satp_mode.supported |= (1 << i); 446 } 447 } 448 } 449 450 /* Set the satp mode to the max supported */ 451 static void set_satp_mode_default_map(RISCVCPU *cpu) 452 { 453 /* 454 * Bare CPUs do not default to the max available. 455 * Users must set a valid satp_mode in the command 456 * line. 457 */ 458 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { 459 warn_report("No satp mode set. Defaulting to 'bare'"); 460 cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE); 461 return; 462 } 463 464 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 465 } 466 #endif 467 468 static void riscv_max_cpu_init(Object *obj) 469 { 470 RISCVCPU *cpu = RISCV_CPU(obj); 471 CPURISCVState *env = &cpu->env; 472 473 cpu->cfg.mmu = true; 474 cpu->cfg.pmp = true; 475 476 env->priv_ver = PRIV_VERSION_LATEST; 477 #ifndef CONFIG_USER_ONLY 478 set_satp_mode_max_supported(RISCV_CPU(obj), 479 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 480 VM_1_10_SV32 : VM_1_10_SV57); 481 #endif 482 } 483 484 #if defined(TARGET_RISCV64) 485 static void rv64_base_cpu_init(Object *obj) 486 { 487 RISCVCPU *cpu = RISCV_CPU(obj); 488 CPURISCVState *env = &cpu->env; 489 490 cpu->cfg.mmu = true; 491 cpu->cfg.pmp = true; 492 493 /* Set latest version of privileged specification */ 494 env->priv_ver = PRIV_VERSION_LATEST; 495 #ifndef CONFIG_USER_ONLY 496 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 497 #endif 498 } 499 500 static void rv64_sifive_u_cpu_init(Object *obj) 501 { 502 RISCVCPU *cpu = RISCV_CPU(obj); 503 CPURISCVState *env = &cpu->env; 504 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 505 env->priv_ver = PRIV_VERSION_1_10_0; 506 #ifndef CONFIG_USER_ONLY 507 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 508 #endif 509 510 /* inherited from parent obj via riscv_cpu_init() */ 511 cpu->cfg.ext_zifencei = true; 512 cpu->cfg.ext_zicsr = true; 513 cpu->cfg.mmu = true; 514 cpu->cfg.pmp = true; 515 } 516 517 static void rv64_sifive_e_cpu_init(Object *obj) 518 { 519 CPURISCVState *env = &RISCV_CPU(obj)->env; 520 RISCVCPU *cpu = RISCV_CPU(obj); 521 522 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 523 env->priv_ver = PRIV_VERSION_1_10_0; 524 #ifndef CONFIG_USER_ONLY 525 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 526 #endif 527 528 /* inherited from parent obj via riscv_cpu_init() */ 529 cpu->cfg.ext_zifencei = true; 530 cpu->cfg.ext_zicsr = true; 531 cpu->cfg.pmp = true; 532 } 533 534 static void rv64_thead_c906_cpu_init(Object *obj) 535 { 536 CPURISCVState *env = &RISCV_CPU(obj)->env; 537 RISCVCPU *cpu = RISCV_CPU(obj); 538 539 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU); 540 env->priv_ver = PRIV_VERSION_1_11_0; 541 542 cpu->cfg.ext_zfa = true; 543 cpu->cfg.ext_zfh = true; 544 cpu->cfg.mmu = true; 545 cpu->cfg.ext_xtheadba = true; 546 cpu->cfg.ext_xtheadbb = true; 547 cpu->cfg.ext_xtheadbs = true; 548 cpu->cfg.ext_xtheadcmo = true; 549 cpu->cfg.ext_xtheadcondmov = true; 550 cpu->cfg.ext_xtheadfmemidx = true; 551 cpu->cfg.ext_xtheadmac = true; 552 cpu->cfg.ext_xtheadmemidx = true; 553 cpu->cfg.ext_xtheadmempair = true; 554 cpu->cfg.ext_xtheadsync = true; 555 556 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 557 #ifndef CONFIG_USER_ONLY 558 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 559 th_register_custom_csrs(cpu); 560 #endif 561 562 /* inherited from parent obj via riscv_cpu_init() */ 563 cpu->cfg.pmp = true; 564 } 565 566 static void rv64_veyron_v1_cpu_init(Object *obj) 567 { 568 CPURISCVState *env = &RISCV_CPU(obj)->env; 569 RISCVCPU *cpu = RISCV_CPU(obj); 570 571 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH); 572 env->priv_ver = PRIV_VERSION_1_12_0; 573 574 /* Enable ISA extensions */ 575 cpu->cfg.mmu = true; 576 cpu->cfg.ext_zifencei = true; 577 cpu->cfg.ext_zicsr = true; 578 cpu->cfg.pmp = true; 579 cpu->cfg.ext_zicbom = true; 580 cpu->cfg.cbom_blocksize = 64; 581 cpu->cfg.cboz_blocksize = 64; 582 cpu->cfg.ext_zicboz = true; 583 cpu->cfg.ext_smaia = true; 584 cpu->cfg.ext_ssaia = true; 585 cpu->cfg.ext_sscofpmf = true; 586 cpu->cfg.ext_sstc = true; 587 cpu->cfg.ext_svinval = true; 588 cpu->cfg.ext_svnapot = true; 589 cpu->cfg.ext_svpbmt = true; 590 cpu->cfg.ext_smstateen = true; 591 cpu->cfg.ext_zba = true; 592 cpu->cfg.ext_zbb = true; 593 cpu->cfg.ext_zbc = true; 594 cpu->cfg.ext_zbs = true; 595 cpu->cfg.ext_XVentanaCondOps = true; 596 597 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 598 cpu->cfg.marchid = VEYRON_V1_MARCHID; 599 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 600 601 #ifndef CONFIG_USER_ONLY 602 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 603 #endif 604 } 605 606 /* Tenstorrent Ascalon */ 607 static void rv64_tt_ascalon_cpu_init(Object *obj) 608 { 609 CPURISCVState *env = &RISCV_CPU(obj)->env; 610 RISCVCPU *cpu = RISCV_CPU(obj); 611 612 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH | RVV); 613 env->priv_ver = PRIV_VERSION_1_13_0; 614 615 /* Enable ISA extensions */ 616 cpu->cfg.mmu = true; 617 cpu->cfg.vlenb = 256 >> 3; 618 cpu->cfg.elen = 64; 619 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 620 cpu->cfg.rvv_ma_all_1s = true; 621 cpu->cfg.rvv_ta_all_1s = true; 622 cpu->cfg.misa_w = true; 623 cpu->cfg.pmp = true; 624 cpu->cfg.cbom_blocksize = 64; 625 cpu->cfg.cbop_blocksize = 64; 626 cpu->cfg.cboz_blocksize = 64; 627 cpu->cfg.ext_zic64b = true; 628 cpu->cfg.ext_zicbom = true; 629 cpu->cfg.ext_zicbop = true; 630 cpu->cfg.ext_zicboz = true; 631 cpu->cfg.ext_zicntr = true; 632 cpu->cfg.ext_zicond = true; 633 cpu->cfg.ext_zicsr = true; 634 cpu->cfg.ext_zifencei = true; 635 cpu->cfg.ext_zihintntl = true; 636 cpu->cfg.ext_zihintpause = true; 637 cpu->cfg.ext_zihpm = true; 638 cpu->cfg.ext_zimop = true; 639 cpu->cfg.ext_zawrs = true; 640 cpu->cfg.ext_zfa = true; 641 cpu->cfg.ext_zfbfmin = true; 642 cpu->cfg.ext_zfh = true; 643 cpu->cfg.ext_zfhmin = true; 644 cpu->cfg.ext_zcb = true; 645 cpu->cfg.ext_zcmop = true; 646 cpu->cfg.ext_zba = true; 647 cpu->cfg.ext_zbb = true; 648 cpu->cfg.ext_zbs = true; 649 cpu->cfg.ext_zkt = true; 650 cpu->cfg.ext_zvbb = true; 651 cpu->cfg.ext_zvbc = true; 652 cpu->cfg.ext_zvfbfmin = true; 653 cpu->cfg.ext_zvfbfwma = true; 654 cpu->cfg.ext_zvfh = true; 655 cpu->cfg.ext_zvfhmin = true; 656 cpu->cfg.ext_zvkng = true; 657 cpu->cfg.ext_smaia = true; 658 cpu->cfg.ext_smstateen = true; 659 cpu->cfg.ext_ssaia = true; 660 cpu->cfg.ext_sscofpmf = true; 661 cpu->cfg.ext_sstc = true; 662 cpu->cfg.ext_svade = true; 663 cpu->cfg.ext_svinval = true; 664 cpu->cfg.ext_svnapot = true; 665 cpu->cfg.ext_svpbmt = true; 666 667 #ifndef CONFIG_USER_ONLY 668 set_satp_mode_max_supported(cpu, VM_1_10_SV57); 669 #endif 670 } 671 672 static void rv64_xiangshan_nanhu_cpu_init(Object *obj) 673 { 674 CPURISCVState *env = &RISCV_CPU(obj)->env; 675 RISCVCPU *cpu = RISCV_CPU(obj); 676 677 riscv_cpu_set_misa_ext(env, RVG | RVC | RVB | RVS | RVU); 678 env->priv_ver = PRIV_VERSION_1_12_0; 679 680 /* Enable ISA extensions */ 681 cpu->cfg.ext_zbc = true; 682 cpu->cfg.ext_zbkb = true; 683 cpu->cfg.ext_zbkc = true; 684 cpu->cfg.ext_zbkx = true; 685 cpu->cfg.ext_zknd = true; 686 cpu->cfg.ext_zkne = true; 687 cpu->cfg.ext_zknh = true; 688 cpu->cfg.ext_zksed = true; 689 cpu->cfg.ext_zksh = true; 690 cpu->cfg.ext_svinval = true; 691 692 cpu->cfg.mmu = true; 693 cpu->cfg.pmp = true; 694 695 #ifndef CONFIG_USER_ONLY 696 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 697 #endif 698 } 699 700 #ifdef CONFIG_TCG 701 static void rv128_base_cpu_init(Object *obj) 702 { 703 RISCVCPU *cpu = RISCV_CPU(obj); 704 CPURISCVState *env = &cpu->env; 705 706 cpu->cfg.mmu = true; 707 cpu->cfg.pmp = true; 708 709 /* Set latest version of privileged specification */ 710 env->priv_ver = PRIV_VERSION_LATEST; 711 #ifndef CONFIG_USER_ONLY 712 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 713 #endif 714 } 715 #endif /* CONFIG_TCG */ 716 717 static void rv64i_bare_cpu_init(Object *obj) 718 { 719 CPURISCVState *env = &RISCV_CPU(obj)->env; 720 riscv_cpu_set_misa_ext(env, RVI); 721 } 722 723 static void rv64e_bare_cpu_init(Object *obj) 724 { 725 CPURISCVState *env = &RISCV_CPU(obj)->env; 726 riscv_cpu_set_misa_ext(env, RVE); 727 } 728 729 #endif /* !TARGET_RISCV64 */ 730 731 #if defined(TARGET_RISCV32) || \ 732 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 733 734 static void rv32_base_cpu_init(Object *obj) 735 { 736 RISCVCPU *cpu = RISCV_CPU(obj); 737 CPURISCVState *env = &cpu->env; 738 739 cpu->cfg.mmu = true; 740 cpu->cfg.pmp = true; 741 742 /* Set latest version of privileged specification */ 743 env->priv_ver = PRIV_VERSION_LATEST; 744 #ifndef CONFIG_USER_ONLY 745 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 746 #endif 747 } 748 749 static void rv32_sifive_u_cpu_init(Object *obj) 750 { 751 RISCVCPU *cpu = RISCV_CPU(obj); 752 CPURISCVState *env = &cpu->env; 753 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 754 env->priv_ver = PRIV_VERSION_1_10_0; 755 #ifndef CONFIG_USER_ONLY 756 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 757 #endif 758 759 /* inherited from parent obj via riscv_cpu_init() */ 760 cpu->cfg.ext_zifencei = true; 761 cpu->cfg.ext_zicsr = true; 762 cpu->cfg.mmu = true; 763 cpu->cfg.pmp = true; 764 } 765 766 static void rv32_sifive_e_cpu_init(Object *obj) 767 { 768 CPURISCVState *env = &RISCV_CPU(obj)->env; 769 RISCVCPU *cpu = RISCV_CPU(obj); 770 771 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 772 env->priv_ver = PRIV_VERSION_1_10_0; 773 #ifndef CONFIG_USER_ONLY 774 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 775 #endif 776 777 /* inherited from parent obj via riscv_cpu_init() */ 778 cpu->cfg.ext_zifencei = true; 779 cpu->cfg.ext_zicsr = true; 780 cpu->cfg.pmp = true; 781 } 782 783 static void rv32_ibex_cpu_init(Object *obj) 784 { 785 CPURISCVState *env = &RISCV_CPU(obj)->env; 786 RISCVCPU *cpu = RISCV_CPU(obj); 787 788 riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU); 789 env->priv_ver = PRIV_VERSION_1_12_0; 790 #ifndef CONFIG_USER_ONLY 791 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 792 #endif 793 /* inherited from parent obj via riscv_cpu_init() */ 794 cpu->cfg.ext_zifencei = true; 795 cpu->cfg.ext_zicsr = true; 796 cpu->cfg.pmp = true; 797 cpu->cfg.ext_smepmp = true; 798 799 cpu->cfg.ext_zba = true; 800 cpu->cfg.ext_zbb = true; 801 cpu->cfg.ext_zbc = true; 802 cpu->cfg.ext_zbs = true; 803 } 804 805 static void rv32_imafcu_nommu_cpu_init(Object *obj) 806 { 807 CPURISCVState *env = &RISCV_CPU(obj)->env; 808 RISCVCPU *cpu = RISCV_CPU(obj); 809 810 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU); 811 env->priv_ver = PRIV_VERSION_1_10_0; 812 #ifndef CONFIG_USER_ONLY 813 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 814 #endif 815 816 /* inherited from parent obj via riscv_cpu_init() */ 817 cpu->cfg.ext_zifencei = true; 818 cpu->cfg.ext_zicsr = true; 819 cpu->cfg.pmp = true; 820 } 821 822 static void rv32i_bare_cpu_init(Object *obj) 823 { 824 CPURISCVState *env = &RISCV_CPU(obj)->env; 825 riscv_cpu_set_misa_ext(env, RVI); 826 } 827 828 static void rv32e_bare_cpu_init(Object *obj) 829 { 830 CPURISCVState *env = &RISCV_CPU(obj)->env; 831 riscv_cpu_set_misa_ext(env, RVE); 832 } 833 #endif 834 835 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 836 { 837 ObjectClass *oc; 838 char *typename; 839 char **cpuname; 840 841 cpuname = g_strsplit(cpu_model, ",", 1); 842 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 843 oc = object_class_by_name(typename); 844 g_strfreev(cpuname); 845 g_free(typename); 846 847 return oc; 848 } 849 850 char *riscv_cpu_get_name(RISCVCPU *cpu) 851 { 852 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 853 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 854 855 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 856 857 return cpu_model_from_type(typename); 858 } 859 860 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 861 { 862 RISCVCPU *cpu = RISCV_CPU(cs); 863 CPURISCVState *env = &cpu->env; 864 int i, j; 865 uint8_t *p; 866 867 #if !defined(CONFIG_USER_ONLY) 868 if (riscv_has_ext(env, RVH)) { 869 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 870 } 871 #endif 872 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 873 #ifndef CONFIG_USER_ONLY 874 { 875 static const int dump_csrs[] = { 876 CSR_MHARTID, 877 CSR_MSTATUS, 878 CSR_MSTATUSH, 879 /* 880 * CSR_SSTATUS is intentionally omitted here as its value 881 * can be figured out by looking at CSR_MSTATUS 882 */ 883 CSR_HSTATUS, 884 CSR_VSSTATUS, 885 CSR_MIP, 886 CSR_MIE, 887 CSR_MIDELEG, 888 CSR_HIDELEG, 889 CSR_MEDELEG, 890 CSR_HEDELEG, 891 CSR_MTVEC, 892 CSR_STVEC, 893 CSR_VSTVEC, 894 CSR_MEPC, 895 CSR_SEPC, 896 CSR_VSEPC, 897 CSR_MCAUSE, 898 CSR_SCAUSE, 899 CSR_VSCAUSE, 900 CSR_MTVAL, 901 CSR_STVAL, 902 CSR_HTVAL, 903 CSR_MTVAL2, 904 CSR_MSCRATCH, 905 CSR_SSCRATCH, 906 CSR_SATP, 907 }; 908 909 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 910 int csrno = dump_csrs[i]; 911 target_ulong val = 0; 912 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 913 914 /* 915 * Rely on the smode, hmode, etc, predicates within csr.c 916 * to do the filtering of the registers that are present. 917 */ 918 if (res == RISCV_EXCP_NONE) { 919 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 920 csr_ops[csrno].name, val); 921 } 922 } 923 } 924 #endif 925 926 for (i = 0; i < 32; i++) { 927 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 928 riscv_int_regnames[i], env->gpr[i]); 929 if ((i & 3) == 3) { 930 qemu_fprintf(f, "\n"); 931 } 932 } 933 if (flags & CPU_DUMP_FPU) { 934 target_ulong val = 0; 935 RISCVException res = riscv_csrrw_debug(env, CSR_FCSR, &val, 0, 0); 936 if (res == RISCV_EXCP_NONE) { 937 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 938 csr_ops[CSR_FCSR].name, val); 939 } 940 for (i = 0; i < 32; i++) { 941 qemu_fprintf(f, " %-8s %016" PRIx64, 942 riscv_fpr_regnames[i], env->fpr[i]); 943 if ((i & 3) == 3) { 944 qemu_fprintf(f, "\n"); 945 } 946 } 947 } 948 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 949 static const int dump_rvv_csrs[] = { 950 CSR_VSTART, 951 CSR_VXSAT, 952 CSR_VXRM, 953 CSR_VCSR, 954 CSR_VL, 955 CSR_VTYPE, 956 CSR_VLENB, 957 }; 958 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 959 int csrno = dump_rvv_csrs[i]; 960 target_ulong val = 0; 961 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 962 963 /* 964 * Rely on the smode, hmode, etc, predicates within csr.c 965 * to do the filtering of the registers that are present. 966 */ 967 if (res == RISCV_EXCP_NONE) { 968 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 969 csr_ops[csrno].name, val); 970 } 971 } 972 uint16_t vlenb = cpu->cfg.vlenb; 973 974 for (i = 0; i < 32; i++) { 975 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 976 p = (uint8_t *)env->vreg; 977 for (j = vlenb - 1 ; j >= 0; j--) { 978 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 979 } 980 qemu_fprintf(f, "\n"); 981 } 982 } 983 } 984 985 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 986 { 987 RISCVCPU *cpu = RISCV_CPU(cs); 988 CPURISCVState *env = &cpu->env; 989 990 if (env->xl == MXL_RV32) { 991 env->pc = (int32_t)value; 992 } else { 993 env->pc = value; 994 } 995 } 996 997 static vaddr riscv_cpu_get_pc(CPUState *cs) 998 { 999 RISCVCPU *cpu = RISCV_CPU(cs); 1000 CPURISCVState *env = &cpu->env; 1001 1002 /* Match cpu_get_tb_cpu_state. */ 1003 if (env->xl == MXL_RV32) { 1004 return env->pc & UINT32_MAX; 1005 } 1006 return env->pc; 1007 } 1008 1009 bool riscv_cpu_has_work(CPUState *cs) 1010 { 1011 #ifndef CONFIG_USER_ONLY 1012 RISCVCPU *cpu = RISCV_CPU(cs); 1013 CPURISCVState *env = &cpu->env; 1014 /* 1015 * Definition of the WFI instruction requires it to ignore the privilege 1016 * mode and delegation registers, but respect individual enables 1017 */ 1018 return riscv_cpu_all_pending(env) != 0 || 1019 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 1020 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 1021 #else 1022 return true; 1023 #endif 1024 } 1025 1026 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) 1027 { 1028 return riscv_env_mmu_index(cpu_env(cs), ifetch); 1029 } 1030 1031 static void riscv_cpu_reset_hold(Object *obj, ResetType type) 1032 { 1033 #ifndef CONFIG_USER_ONLY 1034 uint8_t iprio; 1035 int i, irq, rdzero; 1036 #endif 1037 CPUState *cs = CPU(obj); 1038 RISCVCPU *cpu = RISCV_CPU(cs); 1039 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1040 CPURISCVState *env = &cpu->env; 1041 1042 if (mcc->parent_phases.hold) { 1043 mcc->parent_phases.hold(obj, type); 1044 } 1045 #ifndef CONFIG_USER_ONLY 1046 env->misa_mxl = mcc->misa_mxl_max; 1047 env->priv = PRV_M; 1048 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 1049 if (env->misa_mxl > MXL_RV32) { 1050 /* 1051 * The reset status of SXL/UXL is undefined, but mstatus is WARL 1052 * and we must ensure that the value after init is valid for read. 1053 */ 1054 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 1055 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 1056 if (riscv_has_ext(env, RVH)) { 1057 env->vsstatus = set_field(env->vsstatus, 1058 MSTATUS64_SXL, env->misa_mxl); 1059 env->vsstatus = set_field(env->vsstatus, 1060 MSTATUS64_UXL, env->misa_mxl); 1061 env->mstatus_hs = set_field(env->mstatus_hs, 1062 MSTATUS64_SXL, env->misa_mxl); 1063 env->mstatus_hs = set_field(env->mstatus_hs, 1064 MSTATUS64_UXL, env->misa_mxl); 1065 } 1066 if (riscv_cpu_cfg(env)->ext_smdbltrp) { 1067 env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 1); 1068 } 1069 } 1070 env->mcause = 0; 1071 env->miclaim = MIP_SGEIP; 1072 env->pc = env->resetvec; 1073 env->bins = 0; 1074 env->two_stage_lookup = false; 1075 1076 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 1077 (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ? 1078 MENVCFG_ADUE : 0); 1079 env->henvcfg = 0; 1080 1081 /* Initialized default priorities of local interrupts. */ 1082 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 1083 iprio = riscv_cpu_default_priority(i); 1084 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 1085 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 1086 env->hviprio[i] = 0; 1087 } 1088 i = 0; 1089 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 1090 if (!rdzero) { 1091 env->hviprio[irq] = env->miprio[irq]; 1092 } 1093 i++; 1094 } 1095 1096 /* 1097 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor 1098 * extension is enabled. 1099 */ 1100 if (riscv_has_ext(env, RVH)) { 1101 env->mideleg |= HS_MODE_INTERRUPTS; 1102 } 1103 1104 /* 1105 * Clear mseccfg and unlock all the PMP entries upon reset. 1106 * This is allowed as per the priv and smepmp specifications 1107 * and is needed to clear stale entries across reboots. 1108 */ 1109 if (riscv_cpu_cfg(env)->ext_smepmp) { 1110 env->mseccfg = 0; 1111 } 1112 1113 pmp_unlock_entries(env); 1114 #else 1115 env->priv = PRV_U; 1116 env->senvcfg = 0; 1117 env->menvcfg = 0; 1118 #endif 1119 1120 /* on reset elp is clear */ 1121 env->elp = false; 1122 /* on reset ssp is set to 0 */ 1123 env->ssp = 0; 1124 1125 env->xl = riscv_cpu_mxl(env); 1126 cs->exception_index = RISCV_EXCP_NONE; 1127 env->load_res = -1; 1128 set_default_nan_mode(1, &env->fp_status); 1129 /* Default NaN value: sign bit clear, frac msb set */ 1130 set_float_default_nan_pattern(0b01000000, &env->fp_status); 1131 env->vill = true; 1132 1133 #ifndef CONFIG_USER_ONLY 1134 if (cpu->cfg.debug) { 1135 riscv_trigger_reset_hold(env); 1136 } 1137 1138 if (cpu->cfg.ext_smrnmi) { 1139 env->rnmip = 0; 1140 env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false); 1141 } 1142 1143 if (kvm_enabled()) { 1144 kvm_riscv_reset_vcpu(cpu); 1145 } 1146 #endif 1147 } 1148 1149 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 1150 { 1151 RISCVCPU *cpu = RISCV_CPU(s); 1152 CPURISCVState *env = &cpu->env; 1153 info->target_info = &cpu->cfg; 1154 1155 switch (env->xl) { 1156 case MXL_RV32: 1157 info->print_insn = print_insn_riscv32; 1158 break; 1159 case MXL_RV64: 1160 info->print_insn = print_insn_riscv64; 1161 break; 1162 case MXL_RV128: 1163 info->print_insn = print_insn_riscv128; 1164 break; 1165 default: 1166 g_assert_not_reached(); 1167 } 1168 } 1169 1170 #ifndef CONFIG_USER_ONLY 1171 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1172 { 1173 bool rv32 = riscv_cpu_is_32bit(cpu); 1174 uint8_t satp_mode_map_max, satp_mode_supported_max; 1175 1176 /* The CPU wants the OS to decide which satp mode to use */ 1177 if (cpu->cfg.satp_mode.supported == 0) { 1178 return; 1179 } 1180 1181 satp_mode_supported_max = 1182 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1183 1184 if (cpu->cfg.satp_mode.map == 0) { 1185 if (cpu->cfg.satp_mode.init == 0) { 1186 /* If unset by the user, we fallback to the default satp mode. */ 1187 set_satp_mode_default_map(cpu); 1188 } else { 1189 /* 1190 * Find the lowest level that was disabled and then enable the 1191 * first valid level below which can be found in 1192 * valid_vm_1_10_32/64. 1193 */ 1194 for (int i = 1; i < 16; ++i) { 1195 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1196 (cpu->cfg.satp_mode.supported & (1 << i))) { 1197 for (int j = i - 1; j >= 0; --j) { 1198 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1199 cpu->cfg.satp_mode.map |= (1 << j); 1200 break; 1201 } 1202 } 1203 break; 1204 } 1205 } 1206 } 1207 } 1208 1209 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1210 1211 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1212 if (satp_mode_map_max > satp_mode_supported_max) { 1213 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1214 satp_mode_str(satp_mode_map_max, rv32), 1215 satp_mode_str(satp_mode_supported_max, rv32)); 1216 return; 1217 } 1218 1219 /* 1220 * Make sure the user did not ask for an invalid configuration as per 1221 * the specification. 1222 */ 1223 if (!rv32) { 1224 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1225 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1226 (cpu->cfg.satp_mode.init & (1 << i)) && 1227 (cpu->cfg.satp_mode.supported & (1 << i))) { 1228 error_setg(errp, "cannot disable %s satp mode if %s " 1229 "is enabled", satp_mode_str(i, false), 1230 satp_mode_str(satp_mode_map_max, false)); 1231 return; 1232 } 1233 } 1234 } 1235 1236 /* Finally expand the map so that all valid modes are set */ 1237 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1238 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1239 cpu->cfg.satp_mode.map |= (1 << i); 1240 } 1241 } 1242 } 1243 #endif 1244 1245 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1246 { 1247 Error *local_err = NULL; 1248 1249 #ifndef CONFIG_USER_ONLY 1250 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1251 if (local_err != NULL) { 1252 error_propagate(errp, local_err); 1253 return; 1254 } 1255 #endif 1256 1257 if (tcg_enabled()) { 1258 riscv_tcg_cpu_finalize_features(cpu, &local_err); 1259 if (local_err != NULL) { 1260 error_propagate(errp, local_err); 1261 return; 1262 } 1263 riscv_tcg_cpu_finalize_dynamic_decoder(cpu); 1264 } else if (kvm_enabled()) { 1265 riscv_kvm_cpu_finalize_features(cpu, &local_err); 1266 if (local_err != NULL) { 1267 error_propagate(errp, local_err); 1268 return; 1269 } 1270 } 1271 } 1272 1273 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1274 { 1275 CPUState *cs = CPU(dev); 1276 RISCVCPU *cpu = RISCV_CPU(dev); 1277 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1278 Error *local_err = NULL; 1279 1280 cpu_exec_realizefn(cs, &local_err); 1281 if (local_err != NULL) { 1282 error_propagate(errp, local_err); 1283 return; 1284 } 1285 1286 riscv_cpu_finalize_features(cpu, &local_err); 1287 if (local_err != NULL) { 1288 error_propagate(errp, local_err); 1289 return; 1290 } 1291 1292 riscv_cpu_register_gdb_regs_for_features(cs); 1293 1294 #ifndef CONFIG_USER_ONLY 1295 if (cpu->cfg.debug) { 1296 riscv_trigger_realize(&cpu->env); 1297 } 1298 #endif 1299 1300 qemu_init_vcpu(cs); 1301 cpu_reset(cs); 1302 1303 mcc->parent_realize(dev, errp); 1304 } 1305 1306 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 1307 { 1308 if (tcg_enabled()) { 1309 return riscv_cpu_tcg_compatible(cpu); 1310 } 1311 1312 return true; 1313 } 1314 1315 #ifndef CONFIG_USER_ONLY 1316 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1317 void *opaque, Error **errp) 1318 { 1319 RISCVSATPMap *satp_map = opaque; 1320 uint8_t satp = satp_mode_from_str(name); 1321 bool value; 1322 1323 value = satp_map->map & (1 << satp); 1324 1325 visit_type_bool(v, name, &value, errp); 1326 } 1327 1328 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1329 void *opaque, Error **errp) 1330 { 1331 RISCVSATPMap *satp_map = opaque; 1332 uint8_t satp = satp_mode_from_str(name); 1333 bool value; 1334 1335 if (!visit_type_bool(v, name, &value, errp)) { 1336 return; 1337 } 1338 1339 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1340 satp_map->init |= 1 << satp; 1341 } 1342 1343 void riscv_add_satp_mode_properties(Object *obj) 1344 { 1345 RISCVCPU *cpu = RISCV_CPU(obj); 1346 1347 if (cpu->env.misa_mxl == MXL_RV32) { 1348 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1349 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1350 } else { 1351 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1352 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1353 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1354 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1355 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1356 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1357 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1358 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1359 } 1360 } 1361 1362 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1363 { 1364 RISCVCPU *cpu = RISCV_CPU(opaque); 1365 CPURISCVState *env = &cpu->env; 1366 1367 if (irq < IRQ_LOCAL_MAX) { 1368 switch (irq) { 1369 case IRQ_U_SOFT: 1370 case IRQ_S_SOFT: 1371 case IRQ_VS_SOFT: 1372 case IRQ_M_SOFT: 1373 case IRQ_U_TIMER: 1374 case IRQ_S_TIMER: 1375 case IRQ_VS_TIMER: 1376 case IRQ_M_TIMER: 1377 case IRQ_U_EXT: 1378 case IRQ_VS_EXT: 1379 case IRQ_M_EXT: 1380 if (kvm_enabled()) { 1381 kvm_riscv_set_irq(cpu, irq, level); 1382 } else { 1383 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1384 } 1385 break; 1386 case IRQ_S_EXT: 1387 if (kvm_enabled()) { 1388 kvm_riscv_set_irq(cpu, irq, level); 1389 } else { 1390 env->external_seip = level; 1391 riscv_cpu_update_mip(env, 1 << irq, 1392 BOOL_TO_MASK(level | env->software_seip)); 1393 } 1394 break; 1395 default: 1396 g_assert_not_reached(); 1397 } 1398 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1399 /* Require H-extension for handling guest local interrupts */ 1400 if (!riscv_has_ext(env, RVH)) { 1401 g_assert_not_reached(); 1402 } 1403 1404 /* Compute bit position in HGEIP CSR */ 1405 irq = irq - IRQ_LOCAL_MAX + 1; 1406 if (env->geilen < irq) { 1407 g_assert_not_reached(); 1408 } 1409 1410 /* Update HGEIP CSR */ 1411 env->hgeip &= ~((target_ulong)1 << irq); 1412 if (level) { 1413 env->hgeip |= (target_ulong)1 << irq; 1414 } 1415 1416 /* Update mip.SGEIP bit */ 1417 riscv_cpu_update_mip(env, MIP_SGEIP, 1418 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1419 } else { 1420 g_assert_not_reached(); 1421 } 1422 } 1423 1424 static void riscv_cpu_set_nmi(void *opaque, int irq, int level) 1425 { 1426 riscv_cpu_set_rnmi(RISCV_CPU(opaque), irq, level); 1427 } 1428 #endif /* CONFIG_USER_ONLY */ 1429 1430 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1431 { 1432 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1433 } 1434 1435 static void riscv_cpu_post_init(Object *obj) 1436 { 1437 accel_cpu_instance_init(CPU(obj)); 1438 } 1439 1440 static void riscv_cpu_init(Object *obj) 1441 { 1442 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1443 RISCVCPU *cpu = RISCV_CPU(obj); 1444 CPURISCVState *env = &cpu->env; 1445 1446 env->misa_mxl = mcc->misa_mxl_max; 1447 1448 #ifndef CONFIG_USER_ONLY 1449 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1450 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1451 qdev_init_gpio_in_named(DEVICE(cpu), riscv_cpu_set_nmi, 1452 "riscv.cpu.rnmi", RNMI_MAX); 1453 #endif /* CONFIG_USER_ONLY */ 1454 1455 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal); 1456 1457 /* 1458 * The timer and performance counters extensions were supported 1459 * in QEMU before they were added as discrete extensions in the 1460 * ISA. To keep compatibility we'll always default them to 'true' 1461 * for all CPUs. Each accelerator will decide what to do when 1462 * users disable them. 1463 */ 1464 RISCV_CPU(obj)->cfg.ext_zicntr = true; 1465 RISCV_CPU(obj)->cfg.ext_zihpm = true; 1466 1467 /* Default values for non-bool cpu properties */ 1468 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); 1469 cpu->cfg.vlenb = 128 >> 3; 1470 cpu->cfg.elen = 64; 1471 cpu->cfg.cbom_blocksize = 64; 1472 cpu->cfg.cbop_blocksize = 64; 1473 cpu->cfg.cboz_blocksize = 64; 1474 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1475 } 1476 1477 static void riscv_bare_cpu_init(Object *obj) 1478 { 1479 RISCVCPU *cpu = RISCV_CPU(obj); 1480 1481 /* 1482 * Bare CPUs do not inherit the timer and performance 1483 * counters from the parent class (see riscv_cpu_init() 1484 * for info on why the parent enables them). 1485 * 1486 * Users have to explicitly enable these counters for 1487 * bare CPUs. 1488 */ 1489 cpu->cfg.ext_zicntr = false; 1490 cpu->cfg.ext_zihpm = false; 1491 1492 /* Set to QEMU's first supported priv version */ 1493 cpu->env.priv_ver = PRIV_VERSION_1_10_0; 1494 1495 /* 1496 * Support all available satp_mode settings. The default 1497 * value will be set to MBARE if the user doesn't set 1498 * satp_mode manually (see set_satp_mode_default()). 1499 */ 1500 #ifndef CONFIG_USER_ONLY 1501 set_satp_mode_max_supported(cpu, VM_1_10_SV64); 1502 #endif 1503 } 1504 1505 typedef struct misa_ext_info { 1506 const char *name; 1507 const char *description; 1508 } MISAExtInfo; 1509 1510 #define MISA_INFO_IDX(_bit) \ 1511 __builtin_ctz(_bit) 1512 1513 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1514 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1515 1516 static const MISAExtInfo misa_ext_info_arr[] = { 1517 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1518 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1519 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1520 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1521 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1522 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1523 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1524 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1525 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1526 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1527 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1528 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1529 MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)") 1530 }; 1531 1532 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) 1533 { 1534 CPUClass *cc = CPU_CLASS(mcc); 1535 1536 /* Validate that MISA_MXL is set properly. */ 1537 switch (mcc->misa_mxl_max) { 1538 #ifdef TARGET_RISCV64 1539 case MXL_RV64: 1540 case MXL_RV128: 1541 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1542 break; 1543 #endif 1544 case MXL_RV32: 1545 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1546 break; 1547 default: 1548 g_assert_not_reached(); 1549 } 1550 } 1551 1552 static int riscv_validate_misa_info_idx(uint32_t bit) 1553 { 1554 int idx; 1555 1556 /* 1557 * Our lowest valid input (RVA) is 1 and 1558 * __builtin_ctz() is UB with zero. 1559 */ 1560 g_assert(bit != 0); 1561 idx = MISA_INFO_IDX(bit); 1562 1563 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1564 return idx; 1565 } 1566 1567 const char *riscv_get_misa_ext_name(uint32_t bit) 1568 { 1569 int idx = riscv_validate_misa_info_idx(bit); 1570 const char *val = misa_ext_info_arr[idx].name; 1571 1572 g_assert(val != NULL); 1573 return val; 1574 } 1575 1576 const char *riscv_get_misa_ext_description(uint32_t bit) 1577 { 1578 int idx = riscv_validate_misa_info_idx(bit); 1579 const char *val = misa_ext_info_arr[idx].description; 1580 1581 g_assert(val != NULL); 1582 return val; 1583 } 1584 1585 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1586 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1587 .enabled = _defval} 1588 1589 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1590 /* Defaults for standard extensions */ 1591 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1592 MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false), 1593 MULTI_EXT_CFG_BOOL("smcsrind", ext_smcsrind, false), 1594 MULTI_EXT_CFG_BOOL("smcdeleg", ext_smcdeleg, false), 1595 MULTI_EXT_CFG_BOOL("sscsrind", ext_sscsrind, false), 1596 MULTI_EXT_CFG_BOOL("ssccfg", ext_ssccfg, false), 1597 MULTI_EXT_CFG_BOOL("smctr", ext_smctr, false), 1598 MULTI_EXT_CFG_BOOL("ssctr", ext_ssctr, false), 1599 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1600 MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false), 1601 MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false), 1602 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1603 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1604 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1605 MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false), 1606 MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false), 1607 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), 1608 MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false), 1609 MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false), 1610 MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), 1611 MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), 1612 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1613 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1614 MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false), 1615 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1616 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1617 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1618 MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false), 1619 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1620 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1621 MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false), 1622 MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false), 1623 MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false), 1624 MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false), 1625 MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false), 1626 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1627 MULTI_EXT_CFG_BOOL("ssnpm", ext_ssnpm, false), 1628 MULTI_EXT_CFG_BOOL("sspm", ext_sspm, false), 1629 MULTI_EXT_CFG_BOOL("supm", ext_supm, false), 1630 1631 MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false), 1632 MULTI_EXT_CFG_BOOL("smdbltrp", ext_smdbltrp, false), 1633 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1634 MULTI_EXT_CFG_BOOL("smrnmi", ext_smrnmi, false), 1635 MULTI_EXT_CFG_BOOL("smmpm", ext_smmpm, false), 1636 MULTI_EXT_CFG_BOOL("smnpm", ext_smnpm, false), 1637 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1638 MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false), 1639 MULTI_EXT_CFG_BOOL("ssdbltrp", ext_ssdbltrp, false), 1640 MULTI_EXT_CFG_BOOL("svade", ext_svade, false), 1641 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1642 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1643 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1644 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1645 MULTI_EXT_CFG_BOOL("svvptc", ext_svvptc, true), 1646 1647 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1648 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1649 1650 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1651 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1652 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1653 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1654 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1655 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1656 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1657 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1658 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1659 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1660 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1661 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1662 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1663 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1664 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1665 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1666 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1667 MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false), 1668 1669 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1670 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1671 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1672 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1673 1674 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1675 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true), 1676 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1677 1678 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1679 1680 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1681 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1682 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1683 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1684 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1685 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1686 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1687 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1688 1689 /* Vector cryptography extensions */ 1690 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1691 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1692 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false), 1693 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1694 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1695 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1696 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1697 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1698 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1699 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1700 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1701 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1702 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1703 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1704 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1705 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1706 1707 { }, 1708 }; 1709 1710 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1711 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1712 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1713 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1714 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1715 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1716 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1717 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1718 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1719 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1720 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1721 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1722 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1723 1724 { }, 1725 }; 1726 1727 /* These are experimental so mark with 'x-' */ 1728 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1729 MULTI_EXT_CFG_BOOL("x-svukte", ext_svukte, false), 1730 1731 { }, 1732 }; 1733 1734 /* 1735 * 'Named features' is the name we give to extensions that we 1736 * don't want to expose to users. They are either immutable 1737 * (always enabled/disable) or they'll vary depending on 1738 * the resulting CPU state. They have riscv,isa strings 1739 * and priv_ver like regular extensions. 1740 */ 1741 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { 1742 MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true), 1743 MULTI_EXT_CFG_BOOL("ssstateen", ext_ssstateen, true), 1744 MULTI_EXT_CFG_BOOL("sha", ext_sha, true), 1745 1746 { }, 1747 }; 1748 1749 /* Deprecated entries marked for future removal */ 1750 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1751 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1752 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1753 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1754 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1755 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1756 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1757 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1758 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1759 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1760 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1761 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1762 1763 { }, 1764 }; 1765 1766 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname, 1767 Error **errp) 1768 { 1769 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1770 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'", 1771 cpuname, propname); 1772 } 1773 1774 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1775 void *opaque, Error **errp) 1776 { 1777 RISCVCPU *cpu = RISCV_CPU(obj); 1778 uint8_t pmu_num, curr_pmu_num; 1779 uint32_t pmu_mask; 1780 1781 visit_type_uint8(v, name, &pmu_num, errp); 1782 1783 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask); 1784 1785 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) { 1786 cpu_set_prop_err(cpu, name, errp); 1787 error_append_hint(errp, "Current '%s' val: %u\n", 1788 name, curr_pmu_num); 1789 return; 1790 } 1791 1792 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1793 error_setg(errp, "Number of counters exceeds maximum available"); 1794 return; 1795 } 1796 1797 if (pmu_num == 0) { 1798 pmu_mask = 0; 1799 } else { 1800 pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1801 } 1802 1803 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1804 cpu->cfg.pmu_mask = pmu_mask; 1805 cpu_option_add_user_setting("pmu-mask", pmu_mask); 1806 } 1807 1808 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1809 void *opaque, Error **errp) 1810 { 1811 RISCVCPU *cpu = RISCV_CPU(obj); 1812 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1813 1814 visit_type_uint8(v, name, &pmu_num, errp); 1815 } 1816 1817 static const PropertyInfo prop_pmu_num = { 1818 .name = "pmu-num", 1819 .get = prop_pmu_num_get, 1820 .set = prop_pmu_num_set, 1821 }; 1822 1823 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name, 1824 void *opaque, Error **errp) 1825 { 1826 RISCVCPU *cpu = RISCV_CPU(obj); 1827 uint32_t value; 1828 uint8_t pmu_num; 1829 1830 visit_type_uint32(v, name, &value, errp); 1831 1832 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) { 1833 cpu_set_prop_err(cpu, name, errp); 1834 error_append_hint(errp, "Current '%s' val: %x\n", 1835 name, cpu->cfg.pmu_mask); 1836 return; 1837 } 1838 1839 pmu_num = ctpop32(value); 1840 1841 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1842 error_setg(errp, "Number of counters exceeds maximum available"); 1843 return; 1844 } 1845 1846 cpu_option_add_user_setting(name, value); 1847 cpu->cfg.pmu_mask = value; 1848 } 1849 1850 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name, 1851 void *opaque, Error **errp) 1852 { 1853 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask; 1854 1855 visit_type_uint8(v, name, &pmu_mask, errp); 1856 } 1857 1858 static const PropertyInfo prop_pmu_mask = { 1859 .name = "pmu-mask", 1860 .get = prop_pmu_mask_get, 1861 .set = prop_pmu_mask_set, 1862 }; 1863 1864 static void prop_mmu_set(Object *obj, Visitor *v, const char *name, 1865 void *opaque, Error **errp) 1866 { 1867 RISCVCPU *cpu = RISCV_CPU(obj); 1868 bool value; 1869 1870 visit_type_bool(v, name, &value, errp); 1871 1872 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) { 1873 cpu_set_prop_err(cpu, "mmu", errp); 1874 return; 1875 } 1876 1877 cpu_option_add_user_setting(name, value); 1878 cpu->cfg.mmu = value; 1879 } 1880 1881 static void prop_mmu_get(Object *obj, Visitor *v, const char *name, 1882 void *opaque, Error **errp) 1883 { 1884 bool value = RISCV_CPU(obj)->cfg.mmu; 1885 1886 visit_type_bool(v, name, &value, errp); 1887 } 1888 1889 static const PropertyInfo prop_mmu = { 1890 .name = "mmu", 1891 .get = prop_mmu_get, 1892 .set = prop_mmu_set, 1893 }; 1894 1895 static void prop_pmp_set(Object *obj, Visitor *v, const char *name, 1896 void *opaque, Error **errp) 1897 { 1898 RISCVCPU *cpu = RISCV_CPU(obj); 1899 bool value; 1900 1901 visit_type_bool(v, name, &value, errp); 1902 1903 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) { 1904 cpu_set_prop_err(cpu, name, errp); 1905 return; 1906 } 1907 1908 cpu_option_add_user_setting(name, value); 1909 cpu->cfg.pmp = value; 1910 } 1911 1912 static void prop_pmp_get(Object *obj, Visitor *v, const char *name, 1913 void *opaque, Error **errp) 1914 { 1915 bool value = RISCV_CPU(obj)->cfg.pmp; 1916 1917 visit_type_bool(v, name, &value, errp); 1918 } 1919 1920 static const PropertyInfo prop_pmp = { 1921 .name = "pmp", 1922 .get = prop_pmp_get, 1923 .set = prop_pmp_set, 1924 }; 1925 1926 static int priv_spec_from_str(const char *priv_spec_str) 1927 { 1928 int priv_version = -1; 1929 1930 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) { 1931 priv_version = PRIV_VERSION_1_13_0; 1932 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) { 1933 priv_version = PRIV_VERSION_1_12_0; 1934 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) { 1935 priv_version = PRIV_VERSION_1_11_0; 1936 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) { 1937 priv_version = PRIV_VERSION_1_10_0; 1938 } 1939 1940 return priv_version; 1941 } 1942 1943 const char *priv_spec_to_str(int priv_version) 1944 { 1945 switch (priv_version) { 1946 case PRIV_VERSION_1_10_0: 1947 return PRIV_VER_1_10_0_STR; 1948 case PRIV_VERSION_1_11_0: 1949 return PRIV_VER_1_11_0_STR; 1950 case PRIV_VERSION_1_12_0: 1951 return PRIV_VER_1_12_0_STR; 1952 case PRIV_VERSION_1_13_0: 1953 return PRIV_VER_1_13_0_STR; 1954 default: 1955 return NULL; 1956 } 1957 } 1958 1959 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name, 1960 void *opaque, Error **errp) 1961 { 1962 RISCVCPU *cpu = RISCV_CPU(obj); 1963 g_autofree char *value = NULL; 1964 int priv_version = -1; 1965 1966 visit_type_str(v, name, &value, errp); 1967 1968 priv_version = priv_spec_from_str(value); 1969 if (priv_version < 0) { 1970 error_setg(errp, "Unsupported privilege spec version '%s'", value); 1971 return; 1972 } 1973 1974 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) { 1975 cpu_set_prop_err(cpu, name, errp); 1976 error_append_hint(errp, "Current '%s' val: %s\n", name, 1977 object_property_get_str(obj, name, NULL)); 1978 return; 1979 } 1980 1981 cpu_option_add_user_setting(name, priv_version); 1982 cpu->env.priv_ver = priv_version; 1983 } 1984 1985 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name, 1986 void *opaque, Error **errp) 1987 { 1988 RISCVCPU *cpu = RISCV_CPU(obj); 1989 const char *value = priv_spec_to_str(cpu->env.priv_ver); 1990 1991 visit_type_str(v, name, (char **)&value, errp); 1992 } 1993 1994 static const PropertyInfo prop_priv_spec = { 1995 .name = "priv_spec", 1996 .get = prop_priv_spec_get, 1997 .set = prop_priv_spec_set, 1998 }; 1999 2000 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name, 2001 void *opaque, Error **errp) 2002 { 2003 RISCVCPU *cpu = RISCV_CPU(obj); 2004 g_autofree char *value = NULL; 2005 2006 visit_type_str(v, name, &value, errp); 2007 2008 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) { 2009 error_setg(errp, "Unsupported vector spec version '%s'", value); 2010 return; 2011 } 2012 2013 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0); 2014 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 2015 } 2016 2017 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name, 2018 void *opaque, Error **errp) 2019 { 2020 const char *value = VEXT_VER_1_00_0_STR; 2021 2022 visit_type_str(v, name, (char **)&value, errp); 2023 } 2024 2025 static const PropertyInfo prop_vext_spec = { 2026 .name = "vext_spec", 2027 .get = prop_vext_spec_get, 2028 .set = prop_vext_spec_set, 2029 }; 2030 2031 static void prop_vlen_set(Object *obj, Visitor *v, const char *name, 2032 void *opaque, Error **errp) 2033 { 2034 RISCVCPU *cpu = RISCV_CPU(obj); 2035 uint16_t cpu_vlen = cpu->cfg.vlenb << 3; 2036 uint16_t value; 2037 2038 if (!visit_type_uint16(v, name, &value, errp)) { 2039 return; 2040 } 2041 2042 if (!is_power_of_2(value)) { 2043 error_setg(errp, "Vector extension VLEN must be power of 2"); 2044 return; 2045 } 2046 2047 if (value != cpu_vlen && riscv_cpu_is_vendor(obj)) { 2048 cpu_set_prop_err(cpu, name, errp); 2049 error_append_hint(errp, "Current '%s' val: %u\n", 2050 name, cpu_vlen); 2051 return; 2052 } 2053 2054 cpu_option_add_user_setting(name, value); 2055 cpu->cfg.vlenb = value >> 3; 2056 } 2057 2058 static void prop_vlen_get(Object *obj, Visitor *v, const char *name, 2059 void *opaque, Error **errp) 2060 { 2061 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3; 2062 2063 visit_type_uint16(v, name, &value, errp); 2064 } 2065 2066 static const PropertyInfo prop_vlen = { 2067 .name = "vlen", 2068 .get = prop_vlen_get, 2069 .set = prop_vlen_set, 2070 }; 2071 2072 static void prop_elen_set(Object *obj, Visitor *v, const char *name, 2073 void *opaque, Error **errp) 2074 { 2075 RISCVCPU *cpu = RISCV_CPU(obj); 2076 uint16_t value; 2077 2078 if (!visit_type_uint16(v, name, &value, errp)) { 2079 return; 2080 } 2081 2082 if (!is_power_of_2(value)) { 2083 error_setg(errp, "Vector extension ELEN must be power of 2"); 2084 return; 2085 } 2086 2087 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) { 2088 cpu_set_prop_err(cpu, name, errp); 2089 error_append_hint(errp, "Current '%s' val: %u\n", 2090 name, cpu->cfg.elen); 2091 return; 2092 } 2093 2094 cpu_option_add_user_setting(name, value); 2095 cpu->cfg.elen = value; 2096 } 2097 2098 static void prop_elen_get(Object *obj, Visitor *v, const char *name, 2099 void *opaque, Error **errp) 2100 { 2101 uint16_t value = RISCV_CPU(obj)->cfg.elen; 2102 2103 visit_type_uint16(v, name, &value, errp); 2104 } 2105 2106 static const PropertyInfo prop_elen = { 2107 .name = "elen", 2108 .get = prop_elen_get, 2109 .set = prop_elen_set, 2110 }; 2111 2112 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name, 2113 void *opaque, Error **errp) 2114 { 2115 RISCVCPU *cpu = RISCV_CPU(obj); 2116 uint16_t value; 2117 2118 if (!visit_type_uint16(v, name, &value, errp)) { 2119 return; 2120 } 2121 2122 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) { 2123 cpu_set_prop_err(cpu, name, errp); 2124 error_append_hint(errp, "Current '%s' val: %u\n", 2125 name, cpu->cfg.cbom_blocksize); 2126 return; 2127 } 2128 2129 cpu_option_add_user_setting(name, value); 2130 cpu->cfg.cbom_blocksize = value; 2131 } 2132 2133 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name, 2134 void *opaque, Error **errp) 2135 { 2136 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize; 2137 2138 visit_type_uint16(v, name, &value, errp); 2139 } 2140 2141 static const PropertyInfo prop_cbom_blksize = { 2142 .name = "cbom_blocksize", 2143 .get = prop_cbom_blksize_get, 2144 .set = prop_cbom_blksize_set, 2145 }; 2146 2147 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name, 2148 void *opaque, Error **errp) 2149 { 2150 RISCVCPU *cpu = RISCV_CPU(obj); 2151 uint16_t value; 2152 2153 if (!visit_type_uint16(v, name, &value, errp)) { 2154 return; 2155 } 2156 2157 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) { 2158 cpu_set_prop_err(cpu, name, errp); 2159 error_append_hint(errp, "Current '%s' val: %u\n", 2160 name, cpu->cfg.cbop_blocksize); 2161 return; 2162 } 2163 2164 cpu_option_add_user_setting(name, value); 2165 cpu->cfg.cbop_blocksize = value; 2166 } 2167 2168 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name, 2169 void *opaque, Error **errp) 2170 { 2171 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize; 2172 2173 visit_type_uint16(v, name, &value, errp); 2174 } 2175 2176 static const PropertyInfo prop_cbop_blksize = { 2177 .name = "cbop_blocksize", 2178 .get = prop_cbop_blksize_get, 2179 .set = prop_cbop_blksize_set, 2180 }; 2181 2182 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name, 2183 void *opaque, Error **errp) 2184 { 2185 RISCVCPU *cpu = RISCV_CPU(obj); 2186 uint16_t value; 2187 2188 if (!visit_type_uint16(v, name, &value, errp)) { 2189 return; 2190 } 2191 2192 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) { 2193 cpu_set_prop_err(cpu, name, errp); 2194 error_append_hint(errp, "Current '%s' val: %u\n", 2195 name, cpu->cfg.cboz_blocksize); 2196 return; 2197 } 2198 2199 cpu_option_add_user_setting(name, value); 2200 cpu->cfg.cboz_blocksize = value; 2201 } 2202 2203 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name, 2204 void *opaque, Error **errp) 2205 { 2206 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize; 2207 2208 visit_type_uint16(v, name, &value, errp); 2209 } 2210 2211 static const PropertyInfo prop_cboz_blksize = { 2212 .name = "cboz_blocksize", 2213 .get = prop_cboz_blksize_get, 2214 .set = prop_cboz_blksize_set, 2215 }; 2216 2217 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name, 2218 void *opaque, Error **errp) 2219 { 2220 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2221 RISCVCPU *cpu = RISCV_CPU(obj); 2222 uint32_t prev_val = cpu->cfg.mvendorid; 2223 uint32_t value; 2224 2225 if (!visit_type_uint32(v, name, &value, errp)) { 2226 return; 2227 } 2228 2229 if (!dynamic_cpu && prev_val != value) { 2230 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2231 object_get_typename(obj), prev_val); 2232 return; 2233 } 2234 2235 cpu->cfg.mvendorid = value; 2236 } 2237 2238 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name, 2239 void *opaque, Error **errp) 2240 { 2241 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; 2242 2243 visit_type_uint32(v, name, &value, errp); 2244 } 2245 2246 static const PropertyInfo prop_mvendorid = { 2247 .name = "mvendorid", 2248 .get = prop_mvendorid_get, 2249 .set = prop_mvendorid_set, 2250 }; 2251 2252 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name, 2253 void *opaque, Error **errp) 2254 { 2255 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2256 RISCVCPU *cpu = RISCV_CPU(obj); 2257 uint64_t prev_val = cpu->cfg.mimpid; 2258 uint64_t value; 2259 2260 if (!visit_type_uint64(v, name, &value, errp)) { 2261 return; 2262 } 2263 2264 if (!dynamic_cpu && prev_val != value) { 2265 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2266 object_get_typename(obj), prev_val); 2267 return; 2268 } 2269 2270 cpu->cfg.mimpid = value; 2271 } 2272 2273 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name, 2274 void *opaque, Error **errp) 2275 { 2276 uint64_t value = RISCV_CPU(obj)->cfg.mimpid; 2277 2278 visit_type_uint64(v, name, &value, errp); 2279 } 2280 2281 static const PropertyInfo prop_mimpid = { 2282 .name = "mimpid", 2283 .get = prop_mimpid_get, 2284 .set = prop_mimpid_set, 2285 }; 2286 2287 static void prop_marchid_set(Object *obj, Visitor *v, const char *name, 2288 void *opaque, Error **errp) 2289 { 2290 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2291 RISCVCPU *cpu = RISCV_CPU(obj); 2292 uint64_t prev_val = cpu->cfg.marchid; 2293 uint64_t value, invalid_val; 2294 uint32_t mxlen = 0; 2295 2296 if (!visit_type_uint64(v, name, &value, errp)) { 2297 return; 2298 } 2299 2300 if (!dynamic_cpu && prev_val != value) { 2301 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2302 object_get_typename(obj), prev_val); 2303 return; 2304 } 2305 2306 switch (riscv_cpu_mxl(&cpu->env)) { 2307 case MXL_RV32: 2308 mxlen = 32; 2309 break; 2310 case MXL_RV64: 2311 case MXL_RV128: 2312 mxlen = 64; 2313 break; 2314 default: 2315 g_assert_not_reached(); 2316 } 2317 2318 invalid_val = 1LL << (mxlen - 1); 2319 2320 if (value == invalid_val) { 2321 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2322 "and the remaining bits zero", mxlen); 2323 return; 2324 } 2325 2326 cpu->cfg.marchid = value; 2327 } 2328 2329 static void prop_marchid_get(Object *obj, Visitor *v, const char *name, 2330 void *opaque, Error **errp) 2331 { 2332 uint64_t value = RISCV_CPU(obj)->cfg.marchid; 2333 2334 visit_type_uint64(v, name, &value, errp); 2335 } 2336 2337 static const PropertyInfo prop_marchid = { 2338 .name = "marchid", 2339 .get = prop_marchid_get, 2340 .set = prop_marchid_set, 2341 }; 2342 2343 /* 2344 * RVA22U64 defines some 'named features' that are cache 2345 * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa 2346 * and Zicclsm. They are always implemented in TCG and 2347 * doesn't need to be manually enabled by the profile. 2348 */ 2349 static RISCVCPUProfile RVA22U64 = { 2350 .u_parent = NULL, 2351 .s_parent = NULL, 2352 .name = "rva22u64", 2353 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVB | RVU, 2354 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2355 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2356 .ext_offsets = { 2357 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause), 2358 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb), 2359 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin), 2360 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr), 2361 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom), 2362 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz), 2363 2364 /* mandatory named features for this profile */ 2365 CPU_CFG_OFFSET(ext_zic64b), 2366 2367 RISCV_PROFILE_EXT_LIST_END 2368 } 2369 }; 2370 2371 /* 2372 * As with RVA22U64, RVA22S64 also defines 'named features'. 2373 * 2374 * Cache related features that we consider enabled since we don't 2375 * implement cache: Ssccptr 2376 * 2377 * Other named features that we already implement: Sstvecd, Sstvala, 2378 * Sscounterenw 2379 * 2380 * The remaining features/extensions comes from RVA22U64. 2381 */ 2382 static RISCVCPUProfile RVA22S64 = { 2383 .u_parent = &RVA22U64, 2384 .s_parent = NULL, 2385 .name = "rva22s64", 2386 .misa_ext = RVS, 2387 .priv_spec = PRIV_VERSION_1_12_0, 2388 .satp_mode = VM_1_10_SV39, 2389 .ext_offsets = { 2390 /* rva22s64 exts */ 2391 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt), 2392 CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade), 2393 2394 RISCV_PROFILE_EXT_LIST_END 2395 } 2396 }; 2397 2398 /* 2399 * All mandatory extensions from RVA22U64 are present 2400 * in RVA23U64 so set RVA22 as a parent. We need to 2401 * declare just the newly added mandatory extensions. 2402 */ 2403 static RISCVCPUProfile RVA23U64 = { 2404 .u_parent = &RVA22U64, 2405 .s_parent = NULL, 2406 .name = "rva23u64", 2407 .misa_ext = RVV, 2408 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2409 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2410 .ext_offsets = { 2411 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zvbb), 2412 CPU_CFG_OFFSET(ext_zvkt), CPU_CFG_OFFSET(ext_zihintntl), 2413 CPU_CFG_OFFSET(ext_zicond), CPU_CFG_OFFSET(ext_zimop), 2414 CPU_CFG_OFFSET(ext_zcmop), CPU_CFG_OFFSET(ext_zcb), 2415 CPU_CFG_OFFSET(ext_zfa), CPU_CFG_OFFSET(ext_zawrs), 2416 CPU_CFG_OFFSET(ext_supm), 2417 2418 RISCV_PROFILE_EXT_LIST_END 2419 } 2420 }; 2421 2422 /* 2423 * As with RVA23U64, RVA23S64 also defines 'named features'. 2424 * 2425 * Cache related features that we consider enabled since we don't 2426 * implement cache: Ssccptr 2427 * 2428 * Other named features that we already implement: Sstvecd, Sstvala, 2429 * Sscounterenw, Ssu64xl 2430 * 2431 * The remaining features/extensions comes from RVA23S64. 2432 */ 2433 static RISCVCPUProfile RVA23S64 = { 2434 .u_parent = &RVA23U64, 2435 .s_parent = &RVA22S64, 2436 .name = "rva23s64", 2437 .misa_ext = RVS, 2438 .priv_spec = PRIV_VERSION_1_13_0, 2439 .satp_mode = VM_1_10_SV39, 2440 .ext_offsets = { 2441 /* New in RVA23S64 */ 2442 CPU_CFG_OFFSET(ext_svnapot), CPU_CFG_OFFSET(ext_sstc), 2443 CPU_CFG_OFFSET(ext_sscofpmf), CPU_CFG_OFFSET(ext_ssnpm), 2444 2445 /* Named features: Sha */ 2446 CPU_CFG_OFFSET(ext_sha), 2447 2448 RISCV_PROFILE_EXT_LIST_END 2449 } 2450 }; 2451 2452 RISCVCPUProfile *riscv_profiles[] = { 2453 &RVA22U64, 2454 &RVA22S64, 2455 &RVA23U64, 2456 &RVA23S64, 2457 NULL, 2458 }; 2459 2460 static RISCVCPUImpliedExtsRule RVA_IMPLIED = { 2461 .is_misa = true, 2462 .ext = RVA, 2463 .implied_multi_exts = { 2464 CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo), 2465 2466 RISCV_IMPLIED_EXTS_RULE_END 2467 }, 2468 }; 2469 2470 static RISCVCPUImpliedExtsRule RVD_IMPLIED = { 2471 .is_misa = true, 2472 .ext = RVD, 2473 .implied_misa_exts = RVF, 2474 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2475 }; 2476 2477 static RISCVCPUImpliedExtsRule RVF_IMPLIED = { 2478 .is_misa = true, 2479 .ext = RVF, 2480 .implied_multi_exts = { 2481 CPU_CFG_OFFSET(ext_zicsr), 2482 2483 RISCV_IMPLIED_EXTS_RULE_END 2484 }, 2485 }; 2486 2487 static RISCVCPUImpliedExtsRule RVM_IMPLIED = { 2488 .is_misa = true, 2489 .ext = RVM, 2490 .implied_multi_exts = { 2491 CPU_CFG_OFFSET(ext_zmmul), 2492 2493 RISCV_IMPLIED_EXTS_RULE_END 2494 }, 2495 }; 2496 2497 static RISCVCPUImpliedExtsRule RVV_IMPLIED = { 2498 .is_misa = true, 2499 .ext = RVV, 2500 .implied_multi_exts = { 2501 CPU_CFG_OFFSET(ext_zve64d), 2502 2503 RISCV_IMPLIED_EXTS_RULE_END 2504 }, 2505 }; 2506 2507 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = { 2508 .ext = CPU_CFG_OFFSET(ext_zcb), 2509 .implied_multi_exts = { 2510 CPU_CFG_OFFSET(ext_zca), 2511 2512 RISCV_IMPLIED_EXTS_RULE_END 2513 }, 2514 }; 2515 2516 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = { 2517 .ext = CPU_CFG_OFFSET(ext_zcd), 2518 .implied_misa_exts = RVD, 2519 .implied_multi_exts = { 2520 CPU_CFG_OFFSET(ext_zca), 2521 2522 RISCV_IMPLIED_EXTS_RULE_END 2523 }, 2524 }; 2525 2526 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = { 2527 .ext = CPU_CFG_OFFSET(ext_zce), 2528 .implied_multi_exts = { 2529 CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp), 2530 CPU_CFG_OFFSET(ext_zcmt), 2531 2532 RISCV_IMPLIED_EXTS_RULE_END 2533 }, 2534 }; 2535 2536 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = { 2537 .ext = CPU_CFG_OFFSET(ext_zcf), 2538 .implied_misa_exts = RVF, 2539 .implied_multi_exts = { 2540 CPU_CFG_OFFSET(ext_zca), 2541 2542 RISCV_IMPLIED_EXTS_RULE_END 2543 }, 2544 }; 2545 2546 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = { 2547 .ext = CPU_CFG_OFFSET(ext_zcmp), 2548 .implied_multi_exts = { 2549 CPU_CFG_OFFSET(ext_zca), 2550 2551 RISCV_IMPLIED_EXTS_RULE_END 2552 }, 2553 }; 2554 2555 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = { 2556 .ext = CPU_CFG_OFFSET(ext_zcmt), 2557 .implied_multi_exts = { 2558 CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr), 2559 2560 RISCV_IMPLIED_EXTS_RULE_END 2561 }, 2562 }; 2563 2564 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = { 2565 .ext = CPU_CFG_OFFSET(ext_zdinx), 2566 .implied_multi_exts = { 2567 CPU_CFG_OFFSET(ext_zfinx), 2568 2569 RISCV_IMPLIED_EXTS_RULE_END 2570 }, 2571 }; 2572 2573 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = { 2574 .ext = CPU_CFG_OFFSET(ext_zfa), 2575 .implied_misa_exts = RVF, 2576 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2577 }; 2578 2579 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = { 2580 .ext = CPU_CFG_OFFSET(ext_zfbfmin), 2581 .implied_misa_exts = RVF, 2582 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2583 }; 2584 2585 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = { 2586 .ext = CPU_CFG_OFFSET(ext_zfh), 2587 .implied_multi_exts = { 2588 CPU_CFG_OFFSET(ext_zfhmin), 2589 2590 RISCV_IMPLIED_EXTS_RULE_END 2591 }, 2592 }; 2593 2594 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = { 2595 .ext = CPU_CFG_OFFSET(ext_zfhmin), 2596 .implied_misa_exts = RVF, 2597 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2598 }; 2599 2600 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = { 2601 .ext = CPU_CFG_OFFSET(ext_zfinx), 2602 .implied_multi_exts = { 2603 CPU_CFG_OFFSET(ext_zicsr), 2604 2605 RISCV_IMPLIED_EXTS_RULE_END 2606 }, 2607 }; 2608 2609 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = { 2610 .ext = CPU_CFG_OFFSET(ext_zhinx), 2611 .implied_multi_exts = { 2612 CPU_CFG_OFFSET(ext_zhinxmin), 2613 2614 RISCV_IMPLIED_EXTS_RULE_END 2615 }, 2616 }; 2617 2618 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = { 2619 .ext = CPU_CFG_OFFSET(ext_zhinxmin), 2620 .implied_multi_exts = { 2621 CPU_CFG_OFFSET(ext_zfinx), 2622 2623 RISCV_IMPLIED_EXTS_RULE_END 2624 }, 2625 }; 2626 2627 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = { 2628 .ext = CPU_CFG_OFFSET(ext_zicntr), 2629 .implied_multi_exts = { 2630 CPU_CFG_OFFSET(ext_zicsr), 2631 2632 RISCV_IMPLIED_EXTS_RULE_END 2633 }, 2634 }; 2635 2636 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = { 2637 .ext = CPU_CFG_OFFSET(ext_zihpm), 2638 .implied_multi_exts = { 2639 CPU_CFG_OFFSET(ext_zicsr), 2640 2641 RISCV_IMPLIED_EXTS_RULE_END 2642 }, 2643 }; 2644 2645 static RISCVCPUImpliedExtsRule ZK_IMPLIED = { 2646 .ext = CPU_CFG_OFFSET(ext_zk), 2647 .implied_multi_exts = { 2648 CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr), 2649 CPU_CFG_OFFSET(ext_zkt), 2650 2651 RISCV_IMPLIED_EXTS_RULE_END 2652 }, 2653 }; 2654 2655 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = { 2656 .ext = CPU_CFG_OFFSET(ext_zkn), 2657 .implied_multi_exts = { 2658 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2659 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne), 2660 CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh), 2661 2662 RISCV_IMPLIED_EXTS_RULE_END 2663 }, 2664 }; 2665 2666 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = { 2667 .ext = CPU_CFG_OFFSET(ext_zks), 2668 .implied_multi_exts = { 2669 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2670 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed), 2671 CPU_CFG_OFFSET(ext_zksh), 2672 2673 RISCV_IMPLIED_EXTS_RULE_END 2674 }, 2675 }; 2676 2677 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = { 2678 .ext = CPU_CFG_OFFSET(ext_zvbb), 2679 .implied_multi_exts = { 2680 CPU_CFG_OFFSET(ext_zvkb), 2681 2682 RISCV_IMPLIED_EXTS_RULE_END 2683 }, 2684 }; 2685 2686 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = { 2687 .ext = CPU_CFG_OFFSET(ext_zve32f), 2688 .implied_misa_exts = RVF, 2689 .implied_multi_exts = { 2690 CPU_CFG_OFFSET(ext_zve32x), 2691 2692 RISCV_IMPLIED_EXTS_RULE_END 2693 }, 2694 }; 2695 2696 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = { 2697 .ext = CPU_CFG_OFFSET(ext_zve32x), 2698 .implied_multi_exts = { 2699 CPU_CFG_OFFSET(ext_zicsr), 2700 2701 RISCV_IMPLIED_EXTS_RULE_END 2702 }, 2703 }; 2704 2705 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = { 2706 .ext = CPU_CFG_OFFSET(ext_zve64d), 2707 .implied_misa_exts = RVD, 2708 .implied_multi_exts = { 2709 CPU_CFG_OFFSET(ext_zve64f), 2710 2711 RISCV_IMPLIED_EXTS_RULE_END 2712 }, 2713 }; 2714 2715 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = { 2716 .ext = CPU_CFG_OFFSET(ext_zve64f), 2717 .implied_misa_exts = RVF, 2718 .implied_multi_exts = { 2719 CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x), 2720 2721 RISCV_IMPLIED_EXTS_RULE_END 2722 }, 2723 }; 2724 2725 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = { 2726 .ext = CPU_CFG_OFFSET(ext_zve64x), 2727 .implied_multi_exts = { 2728 CPU_CFG_OFFSET(ext_zve32x), 2729 2730 RISCV_IMPLIED_EXTS_RULE_END 2731 }, 2732 }; 2733 2734 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = { 2735 .ext = CPU_CFG_OFFSET(ext_zvfbfmin), 2736 .implied_multi_exts = { 2737 CPU_CFG_OFFSET(ext_zve32f), 2738 2739 RISCV_IMPLIED_EXTS_RULE_END 2740 }, 2741 }; 2742 2743 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = { 2744 .ext = CPU_CFG_OFFSET(ext_zvfbfwma), 2745 .implied_multi_exts = { 2746 CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin), 2747 2748 RISCV_IMPLIED_EXTS_RULE_END 2749 }, 2750 }; 2751 2752 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = { 2753 .ext = CPU_CFG_OFFSET(ext_zvfh), 2754 .implied_multi_exts = { 2755 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin), 2756 2757 RISCV_IMPLIED_EXTS_RULE_END 2758 }, 2759 }; 2760 2761 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = { 2762 .ext = CPU_CFG_OFFSET(ext_zvfhmin), 2763 .implied_multi_exts = { 2764 CPU_CFG_OFFSET(ext_zve32f), 2765 2766 RISCV_IMPLIED_EXTS_RULE_END 2767 }, 2768 }; 2769 2770 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = { 2771 .ext = CPU_CFG_OFFSET(ext_zvkn), 2772 .implied_multi_exts = { 2773 CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb), 2774 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2775 2776 RISCV_IMPLIED_EXTS_RULE_END 2777 }, 2778 }; 2779 2780 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = { 2781 .ext = CPU_CFG_OFFSET(ext_zvknc), 2782 .implied_multi_exts = { 2783 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc), 2784 2785 RISCV_IMPLIED_EXTS_RULE_END 2786 }, 2787 }; 2788 2789 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = { 2790 .ext = CPU_CFG_OFFSET(ext_zvkng), 2791 .implied_multi_exts = { 2792 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg), 2793 2794 RISCV_IMPLIED_EXTS_RULE_END 2795 }, 2796 }; 2797 2798 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = { 2799 .ext = CPU_CFG_OFFSET(ext_zvknhb), 2800 .implied_multi_exts = { 2801 CPU_CFG_OFFSET(ext_zve64x), 2802 2803 RISCV_IMPLIED_EXTS_RULE_END 2804 }, 2805 }; 2806 2807 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = { 2808 .ext = CPU_CFG_OFFSET(ext_zvks), 2809 .implied_multi_exts = { 2810 CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh), 2811 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2812 2813 RISCV_IMPLIED_EXTS_RULE_END 2814 }, 2815 }; 2816 2817 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = { 2818 .ext = CPU_CFG_OFFSET(ext_zvksc), 2819 .implied_multi_exts = { 2820 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc), 2821 2822 RISCV_IMPLIED_EXTS_RULE_END 2823 }, 2824 }; 2825 2826 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = { 2827 .ext = CPU_CFG_OFFSET(ext_zvksg), 2828 .implied_multi_exts = { 2829 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg), 2830 2831 RISCV_IMPLIED_EXTS_RULE_END 2832 }, 2833 }; 2834 2835 static RISCVCPUImpliedExtsRule SSCFG_IMPLIED = { 2836 .ext = CPU_CFG_OFFSET(ext_ssccfg), 2837 .implied_multi_exts = { 2838 CPU_CFG_OFFSET(ext_smcsrind), CPU_CFG_OFFSET(ext_sscsrind), 2839 CPU_CFG_OFFSET(ext_smcdeleg), 2840 2841 RISCV_IMPLIED_EXTS_RULE_END 2842 }, 2843 }; 2844 2845 static RISCVCPUImpliedExtsRule SUPM_IMPLIED = { 2846 .ext = CPU_CFG_OFFSET(ext_supm), 2847 .implied_multi_exts = { 2848 CPU_CFG_OFFSET(ext_ssnpm), CPU_CFG_OFFSET(ext_smnpm), 2849 2850 RISCV_IMPLIED_EXTS_RULE_END 2851 }, 2852 }; 2853 2854 static RISCVCPUImpliedExtsRule SSPM_IMPLIED = { 2855 .ext = CPU_CFG_OFFSET(ext_sspm), 2856 .implied_multi_exts = { 2857 CPU_CFG_OFFSET(ext_smnpm), 2858 2859 RISCV_IMPLIED_EXTS_RULE_END 2860 }, 2861 }; 2862 2863 static RISCVCPUImpliedExtsRule SMCTR_IMPLIED = { 2864 .ext = CPU_CFG_OFFSET(ext_smctr), 2865 .implied_misa_exts = RVS, 2866 .implied_multi_exts = { 2867 CPU_CFG_OFFSET(ext_sscsrind), 2868 2869 RISCV_IMPLIED_EXTS_RULE_END 2870 }, 2871 }; 2872 2873 static RISCVCPUImpliedExtsRule SSCTR_IMPLIED = { 2874 .ext = CPU_CFG_OFFSET(ext_ssctr), 2875 .implied_misa_exts = RVS, 2876 .implied_multi_exts = { 2877 CPU_CFG_OFFSET(ext_sscsrind), 2878 2879 RISCV_IMPLIED_EXTS_RULE_END 2880 }, 2881 }; 2882 2883 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = { 2884 &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED, 2885 &RVM_IMPLIED, &RVV_IMPLIED, NULL 2886 }; 2887 2888 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = { 2889 &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED, 2890 &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED, 2891 &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED, 2892 &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED, 2893 &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED, 2894 &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED, 2895 &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED, 2896 &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED, 2897 &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED, 2898 &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED, 2899 &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED, 2900 &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, &SSCFG_IMPLIED, 2901 &SUPM_IMPLIED, &SSPM_IMPLIED, &SMCTR_IMPLIED, &SSCTR_IMPLIED, 2902 NULL 2903 }; 2904 2905 static const Property riscv_cpu_properties[] = { 2906 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 2907 2908 {.name = "pmu-mask", .info = &prop_pmu_mask}, 2909 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 2910 2911 {.name = "mmu", .info = &prop_mmu}, 2912 {.name = "pmp", .info = &prop_pmp}, 2913 2914 {.name = "priv_spec", .info = &prop_priv_spec}, 2915 {.name = "vext_spec", .info = &prop_vext_spec}, 2916 2917 {.name = "vlen", .info = &prop_vlen}, 2918 {.name = "elen", .info = &prop_elen}, 2919 2920 {.name = "cbom_blocksize", .info = &prop_cbom_blksize}, 2921 {.name = "cbop_blocksize", .info = &prop_cbop_blksize}, 2922 {.name = "cboz_blocksize", .info = &prop_cboz_blksize}, 2923 2924 {.name = "mvendorid", .info = &prop_mvendorid}, 2925 {.name = "mimpid", .info = &prop_mimpid}, 2926 {.name = "marchid", .info = &prop_marchid}, 2927 2928 #ifndef CONFIG_USER_ONLY 2929 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 2930 DEFINE_PROP_UINT64("rnmi-interrupt-vector", RISCVCPU, env.rnmi_irqvec, 2931 DEFAULT_RNMI_IRQVEC), 2932 DEFINE_PROP_UINT64("rnmi-exception-vector", RISCVCPU, env.rnmi_excpvec, 2933 DEFAULT_RNMI_EXCPVEC), 2934 #endif 2935 2936 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 2937 2938 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 2939 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 2940 DEFINE_PROP_BOOL("rvv_vl_half_avl", RISCVCPU, cfg.rvv_vl_half_avl, false), 2941 2942 /* 2943 * write_misa() is marked as experimental for now so mark 2944 * it with -x and default to 'false'. 2945 */ 2946 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 2947 }; 2948 2949 #if defined(TARGET_RISCV64) 2950 static void rva22u64_profile_cpu_init(Object *obj) 2951 { 2952 rv64i_bare_cpu_init(obj); 2953 2954 RVA22U64.enabled = true; 2955 } 2956 2957 static void rva22s64_profile_cpu_init(Object *obj) 2958 { 2959 rv64i_bare_cpu_init(obj); 2960 2961 RVA22S64.enabled = true; 2962 } 2963 2964 static void rva23u64_profile_cpu_init(Object *obj) 2965 { 2966 rv64i_bare_cpu_init(obj); 2967 2968 RVA23U64.enabled = true; 2969 } 2970 2971 static void rva23s64_profile_cpu_init(Object *obj) 2972 { 2973 rv64i_bare_cpu_init(obj); 2974 2975 RVA23S64.enabled = true; 2976 } 2977 #endif 2978 2979 static const gchar *riscv_gdb_arch_name(CPUState *cs) 2980 { 2981 RISCVCPU *cpu = RISCV_CPU(cs); 2982 CPURISCVState *env = &cpu->env; 2983 2984 switch (riscv_cpu_mxl(env)) { 2985 case MXL_RV32: 2986 return "riscv:rv32"; 2987 case MXL_RV64: 2988 case MXL_RV128: 2989 return "riscv:rv64"; 2990 default: 2991 g_assert_not_reached(); 2992 } 2993 } 2994 2995 #ifndef CONFIG_USER_ONLY 2996 static int64_t riscv_get_arch_id(CPUState *cs) 2997 { 2998 RISCVCPU *cpu = RISCV_CPU(cs); 2999 3000 return cpu->env.mhartid; 3001 } 3002 3003 #include "hw/core/sysemu-cpu-ops.h" 3004 3005 static const struct SysemuCPUOps riscv_sysemu_ops = { 3006 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 3007 .write_elf64_note = riscv_cpu_write_elf64_note, 3008 .write_elf32_note = riscv_cpu_write_elf32_note, 3009 .legacy_vmsd = &vmstate_riscv_cpu, 3010 }; 3011 #endif 3012 3013 static void riscv_cpu_common_class_init(ObjectClass *c, void *data) 3014 { 3015 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 3016 CPUClass *cc = CPU_CLASS(c); 3017 DeviceClass *dc = DEVICE_CLASS(c); 3018 ResettableClass *rc = RESETTABLE_CLASS(c); 3019 3020 device_class_set_parent_realize(dc, riscv_cpu_realize, 3021 &mcc->parent_realize); 3022 3023 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 3024 &mcc->parent_phases); 3025 3026 cc->class_by_name = riscv_cpu_class_by_name; 3027 cc->has_work = riscv_cpu_has_work; 3028 cc->mmu_index = riscv_cpu_mmu_index; 3029 cc->dump_state = riscv_cpu_dump_state; 3030 cc->set_pc = riscv_cpu_set_pc; 3031 cc->get_pc = riscv_cpu_get_pc; 3032 cc->gdb_read_register = riscv_cpu_gdb_read_register; 3033 cc->gdb_write_register = riscv_cpu_gdb_write_register; 3034 cc->gdb_stop_before_watchpoint = true; 3035 cc->disas_set_info = riscv_cpu_disas_set_info; 3036 #ifndef CONFIG_USER_ONLY 3037 cc->sysemu_ops = &riscv_sysemu_ops; 3038 cc->get_arch_id = riscv_get_arch_id; 3039 #endif 3040 cc->gdb_arch_name = riscv_gdb_arch_name; 3041 3042 device_class_set_props(dc, riscv_cpu_properties); 3043 } 3044 3045 static void riscv_cpu_class_init(ObjectClass *c, void *data) 3046 { 3047 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 3048 3049 mcc->misa_mxl_max = (uint32_t)(uintptr_t)data; 3050 riscv_cpu_validate_misa_mxl(mcc); 3051 } 3052 3053 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 3054 int max_str_len) 3055 { 3056 const RISCVIsaExtData *edata; 3057 char *old = *isa_str; 3058 char *new = *isa_str; 3059 3060 for (edata = isa_edata_arr; edata && edata->name; edata++) { 3061 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 3062 new = g_strconcat(old, "_", edata->name, NULL); 3063 g_free(old); 3064 old = new; 3065 } 3066 } 3067 3068 *isa_str = new; 3069 } 3070 3071 char *riscv_isa_string(RISCVCPU *cpu) 3072 { 3073 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 3074 int i; 3075 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 3076 char *isa_str = g_new(char, maxlen); 3077 int xlen = riscv_cpu_max_xlen(mcc); 3078 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen); 3079 3080 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 3081 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 3082 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 3083 } 3084 } 3085 *p = '\0'; 3086 if (!cpu->cfg.short_isa_string) { 3087 riscv_isa_string_ext(cpu, &isa_str, maxlen); 3088 } 3089 return isa_str; 3090 } 3091 3092 #ifndef CONFIG_USER_ONLY 3093 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count) 3094 { 3095 int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr); 3096 char **extensions = g_new(char *, maxlen); 3097 3098 for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 3099 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 3100 extensions[*count] = g_new(char, 2); 3101 snprintf(extensions[*count], 2, "%c", 3102 qemu_tolower(riscv_single_letter_exts[i])); 3103 (*count)++; 3104 } 3105 } 3106 3107 for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) { 3108 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 3109 extensions[*count] = g_strdup(edata->name); 3110 (*count)++; 3111 } 3112 } 3113 3114 return extensions; 3115 } 3116 3117 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename) 3118 { 3119 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 3120 const size_t maxlen = sizeof("rv128i"); 3121 g_autofree char *isa_base = g_new(char, maxlen); 3122 g_autofree char *riscv_isa; 3123 char **isa_extensions; 3124 int count = 0; 3125 int xlen = riscv_cpu_max_xlen(mcc); 3126 3127 riscv_isa = riscv_isa_string(cpu); 3128 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa); 3129 3130 snprintf(isa_base, maxlen, "rv%di", xlen); 3131 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base); 3132 3133 isa_extensions = riscv_isa_extensions_list(cpu, &count); 3134 qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions", 3135 isa_extensions, count); 3136 3137 for (int i = 0; i < count; i++) { 3138 g_free(isa_extensions[i]); 3139 } 3140 3141 g_free(isa_extensions); 3142 } 3143 #endif 3144 3145 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \ 3146 { \ 3147 .name = (type_name), \ 3148 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 3149 .instance_init = (initfn), \ 3150 .class_init = riscv_cpu_class_init, \ 3151 .class_data = (void *)(misa_mxl_max) \ 3152 } 3153 3154 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \ 3155 { \ 3156 .name = (type_name), \ 3157 .parent = TYPE_RISCV_VENDOR_CPU, \ 3158 .instance_init = (initfn), \ 3159 .class_init = riscv_cpu_class_init, \ 3160 .class_data = (void *)(misa_mxl_max) \ 3161 } 3162 3163 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \ 3164 { \ 3165 .name = (type_name), \ 3166 .parent = TYPE_RISCV_BARE_CPU, \ 3167 .instance_init = (initfn), \ 3168 .class_init = riscv_cpu_class_init, \ 3169 .class_data = (void *)(misa_mxl_max) \ 3170 } 3171 3172 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \ 3173 { \ 3174 .name = (type_name), \ 3175 .parent = TYPE_RISCV_BARE_CPU, \ 3176 .instance_init = (initfn), \ 3177 .class_init = riscv_cpu_class_init, \ 3178 .class_data = (void *)(misa_mxl_max) \ 3179 } 3180 3181 static const TypeInfo riscv_cpu_type_infos[] = { 3182 { 3183 .name = TYPE_RISCV_CPU, 3184 .parent = TYPE_CPU, 3185 .instance_size = sizeof(RISCVCPU), 3186 .instance_align = __alignof(RISCVCPU), 3187 .instance_init = riscv_cpu_init, 3188 .instance_post_init = riscv_cpu_post_init, 3189 .abstract = true, 3190 .class_size = sizeof(RISCVCPUClass), 3191 .class_init = riscv_cpu_common_class_init, 3192 }, 3193 { 3194 .name = TYPE_RISCV_DYNAMIC_CPU, 3195 .parent = TYPE_RISCV_CPU, 3196 .abstract = true, 3197 }, 3198 { 3199 .name = TYPE_RISCV_VENDOR_CPU, 3200 .parent = TYPE_RISCV_CPU, 3201 .abstract = true, 3202 }, 3203 { 3204 .name = TYPE_RISCV_BARE_CPU, 3205 .parent = TYPE_RISCV_CPU, 3206 .instance_init = riscv_bare_cpu_init, 3207 .abstract = true, 3208 }, 3209 #if defined(TARGET_RISCV32) 3210 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), 3211 #elif defined(TARGET_RISCV64) 3212 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), 3213 #endif 3214 3215 #if defined(TARGET_RISCV32) || \ 3216 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 3217 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), 3218 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), 3219 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), 3220 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init), 3221 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), 3222 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init), 3223 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init), 3224 #endif 3225 3226 #if (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 3227 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX32, MXL_RV32, riscv_max_cpu_init), 3228 #endif 3229 3230 #if defined(TARGET_RISCV64) 3231 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), 3232 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), 3233 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), 3234 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init), 3235 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init), 3236 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_TT_ASCALON, MXL_RV64, rv64_tt_ascalon_cpu_init), 3237 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init), 3238 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_XIANGSHAN_NANHU, 3239 MXL_RV64, rv64_xiangshan_nanhu_cpu_init), 3240 #ifdef CONFIG_TCG 3241 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init), 3242 #endif /* CONFIG_TCG */ 3243 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init), 3244 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init), 3245 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init), 3246 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init), 3247 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23U64, MXL_RV64, rva23u64_profile_cpu_init), 3248 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23S64, MXL_RV64, rva23s64_profile_cpu_init), 3249 #endif /* TARGET_RISCV64 */ 3250 }; 3251 3252 DEFINE_TYPES(riscv_cpu_type_infos) 3253