1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "qapi/error.h" 28 #include "qapi/visitor.h" 29 #include "qemu/error-report.h" 30 #include "hw/qdev-properties.h" 31 #include "hw/core/qdev-prop-internal.h" 32 #include "migration/vmstate.h" 33 #include "fpu/softfloat-helpers.h" 34 #include "system/device_tree.h" 35 #include "system/kvm.h" 36 #include "system/tcg.h" 37 #include "kvm/kvm_riscv.h" 38 #include "tcg/tcg-cpu.h" 39 #include "tcg/tcg.h" 40 41 /* RISC-V CPU definitions */ 42 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH"; 43 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 44 RVC, RVS, RVU, RVH, RVG, RVB, 0}; 45 46 /* 47 * From vector_helper.c 48 * Note that vector data is stored in host-endian 64-bit chunks, 49 * so addressing bytes needs a host-endian fixup. 50 */ 51 #if HOST_BIG_ENDIAN 52 #define BYTE(x) ((x) ^ 7) 53 #else 54 #define BYTE(x) (x) 55 #endif 56 57 bool riscv_cpu_is_32bit(RISCVCPU *cpu) 58 { 59 return riscv_cpu_mxl(&cpu->env) == MXL_RV32; 60 } 61 62 /* Hash that stores general user set numeric options */ 63 static GHashTable *general_user_opts; 64 65 static void cpu_option_add_user_setting(const char *optname, uint32_t value) 66 { 67 g_hash_table_insert(general_user_opts, (gpointer)optname, 68 GUINT_TO_POINTER(value)); 69 } 70 71 bool riscv_cpu_option_set(const char *optname) 72 { 73 return g_hash_table_contains(general_user_opts, optname); 74 } 75 76 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 77 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 78 79 /* 80 * Here are the ordering rules of extension naming defined by RISC-V 81 * specification : 82 * 1. All extensions should be separated from other multi-letter extensions 83 * by an underscore. 84 * 2. The first letter following the 'Z' conventionally indicates the most 85 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 86 * If multiple 'Z' extensions are named, they should be ordered first 87 * by category, then alphabetically within a category. 88 * 3. Standard supervisor-level extensions (starts with 'S') should be 89 * listed after standard unprivileged extensions. If multiple 90 * supervisor-level extensions are listed, they should be ordered 91 * alphabetically. 92 * 4. Non-standard extensions (starts with 'X') must be listed after all 93 * standard extensions. They must be separated from other multi-letter 94 * extensions by an underscore. 95 * 96 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 97 * instead. 98 */ 99 const RISCVIsaExtData isa_edata_arr[] = { 100 ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b), 101 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 102 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop), 103 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 104 ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11), 105 ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11), 106 ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11), 107 ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, ext_ziccrse), 108 ISA_EXT_DATA_ENTRY(zicfilp, PRIV_VERSION_1_12_0, ext_zicfilp), 109 ISA_EXT_DATA_ENTRY(zicfiss, PRIV_VERSION_1_13_0, ext_zicfiss), 110 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 111 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 112 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 113 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 114 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 115 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 116 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 117 ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop), 118 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 119 ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_12), 120 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), 121 ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha), 122 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), 123 ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b), 124 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), 125 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 126 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 127 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 128 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 129 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 130 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 131 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 132 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 133 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 134 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 135 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 136 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 137 ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop), 138 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 139 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 140 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 141 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 142 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 143 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 144 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 145 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 146 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 147 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 148 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 149 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 150 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 151 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 152 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 153 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 154 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 155 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 156 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 157 ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso), 158 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 159 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 160 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 161 ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x), 162 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 163 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 164 ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x), 165 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 166 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 167 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 168 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 169 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 170 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 171 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 172 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 173 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 174 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 175 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 176 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 177 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 178 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 179 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 180 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 181 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 182 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 183 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 184 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 185 ISA_EXT_DATA_ENTRY(shcounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 186 ISA_EXT_DATA_ENTRY(sha, PRIV_VERSION_1_12_0, ext_sha), 187 ISA_EXT_DATA_ENTRY(shgatpa, PRIV_VERSION_1_12_0, has_priv_1_12), 188 ISA_EXT_DATA_ENTRY(shtvala, PRIV_VERSION_1_12_0, has_priv_1_12), 189 ISA_EXT_DATA_ENTRY(shvsatpa, PRIV_VERSION_1_12_0, has_priv_1_12), 190 ISA_EXT_DATA_ENTRY(shvstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 191 ISA_EXT_DATA_ENTRY(shvstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 192 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 193 ISA_EXT_DATA_ENTRY(smcdeleg, PRIV_VERSION_1_13_0, ext_smcdeleg), 194 ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf), 195 ISA_EXT_DATA_ENTRY(smcsrind, PRIV_VERSION_1_13_0, ext_smcsrind), 196 ISA_EXT_DATA_ENTRY(smdbltrp, PRIV_VERSION_1_13_0, ext_smdbltrp), 197 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 198 ISA_EXT_DATA_ENTRY(smrnmi, PRIV_VERSION_1_12_0, ext_smrnmi), 199 ISA_EXT_DATA_ENTRY(smmpm, PRIV_VERSION_1_13_0, ext_smmpm), 200 ISA_EXT_DATA_ENTRY(smnpm, PRIV_VERSION_1_13_0, ext_smnpm), 201 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 202 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 203 ISA_EXT_DATA_ENTRY(ssccfg, PRIV_VERSION_1_13_0, ext_ssccfg), 204 ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11), 205 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 206 ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 207 ISA_EXT_DATA_ENTRY(sscsrind, PRIV_VERSION_1_12_0, ext_sscsrind), 208 ISA_EXT_DATA_ENTRY(ssdbltrp, PRIV_VERSION_1_13_0, ext_ssdbltrp), 209 ISA_EXT_DATA_ENTRY(ssnpm, PRIV_VERSION_1_13_0, ext_ssnpm), 210 ISA_EXT_DATA_ENTRY(sspm, PRIV_VERSION_1_13_0, ext_sspm), 211 ISA_EXT_DATA_ENTRY(ssstateen, PRIV_VERSION_1_12_0, ext_ssstateen), 212 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 213 ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 214 ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 215 ISA_EXT_DATA_ENTRY(ssu64xl, PRIV_VERSION_1_12_0, has_priv_1_12), 216 ISA_EXT_DATA_ENTRY(supm, PRIV_VERSION_1_13_0, ext_supm), 217 ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade), 218 ISA_EXT_DATA_ENTRY(smctr, PRIV_VERSION_1_12_0, ext_smctr), 219 ISA_EXT_DATA_ENTRY(ssctr, PRIV_VERSION_1_12_0, ext_ssctr), 220 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 221 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 222 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 223 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 224 ISA_EXT_DATA_ENTRY(svukte, PRIV_VERSION_1_13_0, ext_svukte), 225 ISA_EXT_DATA_ENTRY(svvptc, PRIV_VERSION_1_13_0, ext_svvptc), 226 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 227 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 228 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 229 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 230 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 231 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 232 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 233 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 234 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 235 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 236 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 237 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 238 239 { }, 240 }; 241 242 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 243 { 244 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 245 246 return *ext_enabled; 247 } 248 249 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 250 { 251 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 252 253 *ext_enabled = en; 254 } 255 256 bool riscv_cpu_is_vendor(Object *cpu_obj) 257 { 258 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; 259 } 260 261 const char * const riscv_int_regnames[] = { 262 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 263 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 264 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 265 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 266 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 267 }; 268 269 const char * const riscv_int_regnamesh[] = { 270 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 271 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 272 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 273 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 274 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 275 "x30h/t5h", "x31h/t6h" 276 }; 277 278 const char * const riscv_fpr_regnames[] = { 279 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 280 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 281 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 282 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 283 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 284 "f30/ft10", "f31/ft11" 285 }; 286 287 const char * const riscv_rvv_regnames[] = { 288 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 289 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 290 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 291 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 292 "v28", "v29", "v30", "v31" 293 }; 294 295 static const char * const riscv_excp_names[] = { 296 "misaligned_fetch", 297 "fault_fetch", 298 "illegal_instruction", 299 "breakpoint", 300 "misaligned_load", 301 "fault_load", 302 "misaligned_store", 303 "fault_store", 304 "user_ecall", 305 "supervisor_ecall", 306 "hypervisor_ecall", 307 "machine_ecall", 308 "exec_page_fault", 309 "load_page_fault", 310 "reserved", 311 "store_page_fault", 312 "double_trap", 313 "reserved", 314 "reserved", 315 "reserved", 316 "guest_exec_page_fault", 317 "guest_load_page_fault", 318 "reserved", 319 "guest_store_page_fault", 320 }; 321 322 static const char * const riscv_intr_names[] = { 323 "u_software", 324 "s_software", 325 "vs_software", 326 "m_software", 327 "u_timer", 328 "s_timer", 329 "vs_timer", 330 "m_timer", 331 "u_external", 332 "s_external", 333 "vs_external", 334 "m_external", 335 "reserved", 336 "reserved", 337 "reserved", 338 "reserved" 339 }; 340 341 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 342 { 343 if (async) { 344 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 345 riscv_intr_names[cause] : "(unknown)"; 346 } else { 347 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 348 riscv_excp_names[cause] : "(unknown)"; 349 } 350 } 351 352 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) 353 { 354 env->misa_ext_mask = env->misa_ext = ext; 355 } 356 357 int riscv_cpu_max_xlen(RISCVCPUClass *mcc) 358 { 359 return 16 << mcc->misa_mxl_max; 360 } 361 362 #ifndef CONFIG_USER_ONLY 363 static uint8_t satp_mode_from_str(const char *satp_mode_str) 364 { 365 if (!strncmp(satp_mode_str, "mbare", 5)) { 366 return VM_1_10_MBARE; 367 } 368 369 if (!strncmp(satp_mode_str, "sv32", 4)) { 370 return VM_1_10_SV32; 371 } 372 373 if (!strncmp(satp_mode_str, "sv39", 4)) { 374 return VM_1_10_SV39; 375 } 376 377 if (!strncmp(satp_mode_str, "sv48", 4)) { 378 return VM_1_10_SV48; 379 } 380 381 if (!strncmp(satp_mode_str, "sv57", 4)) { 382 return VM_1_10_SV57; 383 } 384 385 if (!strncmp(satp_mode_str, "sv64", 4)) { 386 return VM_1_10_SV64; 387 } 388 389 g_assert_not_reached(); 390 } 391 392 static uint8_t satp_mode_max_from_map(uint32_t map) 393 { 394 /* 395 * 'map = 0' will make us return (31 - 32), which C will 396 * happily overflow to UINT_MAX. There's no good result to 397 * return if 'map = 0' (e.g. returning 0 will be ambiguous 398 * with the result for 'map = 1'). 399 * 400 * Assert out if map = 0. Callers will have to deal with 401 * it outside of this function. 402 */ 403 g_assert(map > 0); 404 405 /* map here has at least one bit set, so no problem with clz */ 406 return 31 - __builtin_clz(map); 407 } 408 409 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 410 { 411 if (is_32_bit) { 412 switch (satp_mode) { 413 case VM_1_10_SV32: 414 return "sv32"; 415 case VM_1_10_MBARE: 416 return "none"; 417 } 418 } else { 419 switch (satp_mode) { 420 case VM_1_10_SV64: 421 return "sv64"; 422 case VM_1_10_SV57: 423 return "sv57"; 424 case VM_1_10_SV48: 425 return "sv48"; 426 case VM_1_10_SV39: 427 return "sv39"; 428 case VM_1_10_MBARE: 429 return "none"; 430 } 431 } 432 433 g_assert_not_reached(); 434 } 435 436 static void set_satp_mode_max_supported(RISCVCPU *cpu, 437 uint8_t satp_mode) 438 { 439 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 440 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 441 442 for (int i = 0; i <= satp_mode; ++i) { 443 if (valid_vm[i]) { 444 cpu->cfg.satp_mode.supported |= (1 << i); 445 } 446 } 447 448 assert(cpu->cfg.satp_mode.supported & (1 << satp_mode)); 449 cpu->cfg.max_satp_mode = satp_mode; 450 } 451 452 /* Set the satp mode to the max supported */ 453 static void set_satp_mode_default_map(RISCVCPU *cpu) 454 { 455 /* 456 * Bare CPUs do not default to the max available. 457 * Users must set a valid satp_mode in the command 458 * line. Otherwise, leave the existing max_satp_mode 459 * in place. 460 */ 461 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { 462 warn_report("No satp mode set. Defaulting to 'bare'"); 463 cpu->cfg.max_satp_mode = VM_1_10_MBARE; 464 } 465 } 466 #endif 467 468 static void riscv_max_cpu_init(Object *obj) 469 { 470 RISCVCPU *cpu = RISCV_CPU(obj); 471 CPURISCVState *env = &cpu->env; 472 473 cpu->cfg.mmu = true; 474 cpu->cfg.pmp = true; 475 476 env->priv_ver = PRIV_VERSION_LATEST; 477 #ifndef CONFIG_USER_ONLY 478 set_satp_mode_max_supported(RISCV_CPU(obj), 479 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 480 VM_1_10_SV32 : VM_1_10_SV57); 481 #endif 482 } 483 484 #if defined(TARGET_RISCV64) 485 static void rv64_base_cpu_init(Object *obj) 486 { 487 RISCVCPU *cpu = RISCV_CPU(obj); 488 CPURISCVState *env = &cpu->env; 489 490 cpu->cfg.mmu = true; 491 cpu->cfg.pmp = true; 492 493 /* Set latest version of privileged specification */ 494 env->priv_ver = PRIV_VERSION_LATEST; 495 #ifndef CONFIG_USER_ONLY 496 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 497 #endif 498 } 499 500 static void rv64_sifive_u_cpu_init(Object *obj) 501 { 502 RISCVCPU *cpu = RISCV_CPU(obj); 503 CPURISCVState *env = &cpu->env; 504 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 505 env->priv_ver = PRIV_VERSION_1_10_0; 506 #ifndef CONFIG_USER_ONLY 507 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 508 #endif 509 510 /* inherited from parent obj via riscv_cpu_init() */ 511 cpu->cfg.ext_zifencei = true; 512 cpu->cfg.ext_zicsr = true; 513 cpu->cfg.mmu = true; 514 cpu->cfg.pmp = true; 515 } 516 517 static void rv64_sifive_e_cpu_init(Object *obj) 518 { 519 CPURISCVState *env = &RISCV_CPU(obj)->env; 520 RISCVCPU *cpu = RISCV_CPU(obj); 521 522 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 523 env->priv_ver = PRIV_VERSION_1_10_0; 524 #ifndef CONFIG_USER_ONLY 525 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 526 #endif 527 528 /* inherited from parent obj via riscv_cpu_init() */ 529 cpu->cfg.ext_zifencei = true; 530 cpu->cfg.ext_zicsr = true; 531 cpu->cfg.pmp = true; 532 } 533 534 static void rv64_thead_c906_cpu_init(Object *obj) 535 { 536 CPURISCVState *env = &RISCV_CPU(obj)->env; 537 RISCVCPU *cpu = RISCV_CPU(obj); 538 539 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU); 540 env->priv_ver = PRIV_VERSION_1_11_0; 541 542 cpu->cfg.ext_zfa = true; 543 cpu->cfg.ext_zfh = true; 544 cpu->cfg.mmu = true; 545 cpu->cfg.ext_xtheadba = true; 546 cpu->cfg.ext_xtheadbb = true; 547 cpu->cfg.ext_xtheadbs = true; 548 cpu->cfg.ext_xtheadcmo = true; 549 cpu->cfg.ext_xtheadcondmov = true; 550 cpu->cfg.ext_xtheadfmemidx = true; 551 cpu->cfg.ext_xtheadmac = true; 552 cpu->cfg.ext_xtheadmemidx = true; 553 cpu->cfg.ext_xtheadmempair = true; 554 cpu->cfg.ext_xtheadsync = true; 555 556 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 557 #ifndef CONFIG_USER_ONLY 558 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 559 th_register_custom_csrs(cpu); 560 #endif 561 562 /* inherited from parent obj via riscv_cpu_init() */ 563 cpu->cfg.pmp = true; 564 } 565 566 static void rv64_veyron_v1_cpu_init(Object *obj) 567 { 568 CPURISCVState *env = &RISCV_CPU(obj)->env; 569 RISCVCPU *cpu = RISCV_CPU(obj); 570 571 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH); 572 env->priv_ver = PRIV_VERSION_1_12_0; 573 574 /* Enable ISA extensions */ 575 cpu->cfg.mmu = true; 576 cpu->cfg.ext_zifencei = true; 577 cpu->cfg.ext_zicsr = true; 578 cpu->cfg.pmp = true; 579 cpu->cfg.ext_zicbom = true; 580 cpu->cfg.cbom_blocksize = 64; 581 cpu->cfg.cboz_blocksize = 64; 582 cpu->cfg.ext_zicboz = true; 583 cpu->cfg.ext_smaia = true; 584 cpu->cfg.ext_ssaia = true; 585 cpu->cfg.ext_sscofpmf = true; 586 cpu->cfg.ext_sstc = true; 587 cpu->cfg.ext_svinval = true; 588 cpu->cfg.ext_svnapot = true; 589 cpu->cfg.ext_svpbmt = true; 590 cpu->cfg.ext_smstateen = true; 591 cpu->cfg.ext_zba = true; 592 cpu->cfg.ext_zbb = true; 593 cpu->cfg.ext_zbc = true; 594 cpu->cfg.ext_zbs = true; 595 cpu->cfg.ext_XVentanaCondOps = true; 596 597 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 598 cpu->cfg.marchid = VEYRON_V1_MARCHID; 599 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 600 601 #ifndef CONFIG_USER_ONLY 602 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 603 #endif 604 } 605 606 /* Tenstorrent Ascalon */ 607 static void rv64_tt_ascalon_cpu_init(Object *obj) 608 { 609 CPURISCVState *env = &RISCV_CPU(obj)->env; 610 RISCVCPU *cpu = RISCV_CPU(obj); 611 612 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH | RVV); 613 env->priv_ver = PRIV_VERSION_1_13_0; 614 615 /* Enable ISA extensions */ 616 cpu->cfg.mmu = true; 617 cpu->cfg.vlenb = 256 >> 3; 618 cpu->cfg.elen = 64; 619 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 620 cpu->cfg.rvv_ma_all_1s = true; 621 cpu->cfg.rvv_ta_all_1s = true; 622 cpu->cfg.misa_w = true; 623 cpu->cfg.pmp = true; 624 cpu->cfg.cbom_blocksize = 64; 625 cpu->cfg.cbop_blocksize = 64; 626 cpu->cfg.cboz_blocksize = 64; 627 cpu->cfg.ext_zic64b = true; 628 cpu->cfg.ext_zicbom = true; 629 cpu->cfg.ext_zicbop = true; 630 cpu->cfg.ext_zicboz = true; 631 cpu->cfg.ext_zicntr = true; 632 cpu->cfg.ext_zicond = true; 633 cpu->cfg.ext_zicsr = true; 634 cpu->cfg.ext_zifencei = true; 635 cpu->cfg.ext_zihintntl = true; 636 cpu->cfg.ext_zihintpause = true; 637 cpu->cfg.ext_zihpm = true; 638 cpu->cfg.ext_zimop = true; 639 cpu->cfg.ext_zawrs = true; 640 cpu->cfg.ext_zfa = true; 641 cpu->cfg.ext_zfbfmin = true; 642 cpu->cfg.ext_zfh = true; 643 cpu->cfg.ext_zfhmin = true; 644 cpu->cfg.ext_zcb = true; 645 cpu->cfg.ext_zcmop = true; 646 cpu->cfg.ext_zba = true; 647 cpu->cfg.ext_zbb = true; 648 cpu->cfg.ext_zbs = true; 649 cpu->cfg.ext_zkt = true; 650 cpu->cfg.ext_zvbb = true; 651 cpu->cfg.ext_zvbc = true; 652 cpu->cfg.ext_zvfbfmin = true; 653 cpu->cfg.ext_zvfbfwma = true; 654 cpu->cfg.ext_zvfh = true; 655 cpu->cfg.ext_zvfhmin = true; 656 cpu->cfg.ext_zvkng = true; 657 cpu->cfg.ext_smaia = true; 658 cpu->cfg.ext_smstateen = true; 659 cpu->cfg.ext_ssaia = true; 660 cpu->cfg.ext_sscofpmf = true; 661 cpu->cfg.ext_sstc = true; 662 cpu->cfg.ext_svade = true; 663 cpu->cfg.ext_svinval = true; 664 cpu->cfg.ext_svnapot = true; 665 cpu->cfg.ext_svpbmt = true; 666 667 #ifndef CONFIG_USER_ONLY 668 set_satp_mode_max_supported(cpu, VM_1_10_SV57); 669 #endif 670 } 671 672 static void rv64_xiangshan_nanhu_cpu_init(Object *obj) 673 { 674 CPURISCVState *env = &RISCV_CPU(obj)->env; 675 RISCVCPU *cpu = RISCV_CPU(obj); 676 677 riscv_cpu_set_misa_ext(env, RVG | RVC | RVB | RVS | RVU); 678 env->priv_ver = PRIV_VERSION_1_12_0; 679 680 /* Enable ISA extensions */ 681 cpu->cfg.ext_zbc = true; 682 cpu->cfg.ext_zbkb = true; 683 cpu->cfg.ext_zbkc = true; 684 cpu->cfg.ext_zbkx = true; 685 cpu->cfg.ext_zknd = true; 686 cpu->cfg.ext_zkne = true; 687 cpu->cfg.ext_zknh = true; 688 cpu->cfg.ext_zksed = true; 689 cpu->cfg.ext_zksh = true; 690 cpu->cfg.ext_svinval = true; 691 692 cpu->cfg.mmu = true; 693 cpu->cfg.pmp = true; 694 695 #ifndef CONFIG_USER_ONLY 696 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 697 #endif 698 } 699 700 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) 701 static void rv128_base_cpu_init(Object *obj) 702 { 703 RISCVCPU *cpu = RISCV_CPU(obj); 704 CPURISCVState *env = &cpu->env; 705 706 cpu->cfg.mmu = true; 707 cpu->cfg.pmp = true; 708 709 /* Set latest version of privileged specification */ 710 env->priv_ver = PRIV_VERSION_LATEST; 711 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 712 } 713 #endif /* CONFIG_TCG && !CONFIG_USER_ONLY */ 714 715 static void rv64i_bare_cpu_init(Object *obj) 716 { 717 CPURISCVState *env = &RISCV_CPU(obj)->env; 718 riscv_cpu_set_misa_ext(env, RVI); 719 } 720 721 static void rv64e_bare_cpu_init(Object *obj) 722 { 723 CPURISCVState *env = &RISCV_CPU(obj)->env; 724 riscv_cpu_set_misa_ext(env, RVE); 725 } 726 727 #endif /* !TARGET_RISCV64 */ 728 729 #if defined(TARGET_RISCV32) || \ 730 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 731 732 static void rv32_base_cpu_init(Object *obj) 733 { 734 RISCVCPU *cpu = RISCV_CPU(obj); 735 CPURISCVState *env = &cpu->env; 736 737 cpu->cfg.mmu = true; 738 cpu->cfg.pmp = true; 739 740 /* Set latest version of privileged specification */ 741 env->priv_ver = PRIV_VERSION_LATEST; 742 #ifndef CONFIG_USER_ONLY 743 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 744 #endif 745 } 746 747 static void rv32_sifive_u_cpu_init(Object *obj) 748 { 749 RISCVCPU *cpu = RISCV_CPU(obj); 750 CPURISCVState *env = &cpu->env; 751 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 752 env->priv_ver = PRIV_VERSION_1_10_0; 753 #ifndef CONFIG_USER_ONLY 754 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 755 #endif 756 757 /* inherited from parent obj via riscv_cpu_init() */ 758 cpu->cfg.ext_zifencei = true; 759 cpu->cfg.ext_zicsr = true; 760 cpu->cfg.mmu = true; 761 cpu->cfg.pmp = true; 762 } 763 764 static void rv32_sifive_e_cpu_init(Object *obj) 765 { 766 CPURISCVState *env = &RISCV_CPU(obj)->env; 767 RISCVCPU *cpu = RISCV_CPU(obj); 768 769 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 770 env->priv_ver = PRIV_VERSION_1_10_0; 771 #ifndef CONFIG_USER_ONLY 772 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 773 #endif 774 775 /* inherited from parent obj via riscv_cpu_init() */ 776 cpu->cfg.ext_zifencei = true; 777 cpu->cfg.ext_zicsr = true; 778 cpu->cfg.pmp = true; 779 } 780 781 static void rv32_ibex_cpu_init(Object *obj) 782 { 783 CPURISCVState *env = &RISCV_CPU(obj)->env; 784 RISCVCPU *cpu = RISCV_CPU(obj); 785 786 riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU); 787 env->priv_ver = PRIV_VERSION_1_12_0; 788 #ifndef CONFIG_USER_ONLY 789 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 790 #endif 791 /* inherited from parent obj via riscv_cpu_init() */ 792 cpu->cfg.ext_zifencei = true; 793 cpu->cfg.ext_zicsr = true; 794 cpu->cfg.pmp = true; 795 cpu->cfg.ext_smepmp = true; 796 797 cpu->cfg.ext_zba = true; 798 cpu->cfg.ext_zbb = true; 799 cpu->cfg.ext_zbc = true; 800 cpu->cfg.ext_zbs = true; 801 } 802 803 static void rv32_imafcu_nommu_cpu_init(Object *obj) 804 { 805 CPURISCVState *env = &RISCV_CPU(obj)->env; 806 RISCVCPU *cpu = RISCV_CPU(obj); 807 808 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU); 809 env->priv_ver = PRIV_VERSION_1_10_0; 810 #ifndef CONFIG_USER_ONLY 811 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 812 #endif 813 814 /* inherited from parent obj via riscv_cpu_init() */ 815 cpu->cfg.ext_zifencei = true; 816 cpu->cfg.ext_zicsr = true; 817 cpu->cfg.pmp = true; 818 } 819 820 static void rv32i_bare_cpu_init(Object *obj) 821 { 822 CPURISCVState *env = &RISCV_CPU(obj)->env; 823 riscv_cpu_set_misa_ext(env, RVI); 824 } 825 826 static void rv32e_bare_cpu_init(Object *obj) 827 { 828 CPURISCVState *env = &RISCV_CPU(obj)->env; 829 riscv_cpu_set_misa_ext(env, RVE); 830 } 831 #endif 832 833 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 834 { 835 ObjectClass *oc; 836 char *typename; 837 char **cpuname; 838 839 cpuname = g_strsplit(cpu_model, ",", 1); 840 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 841 oc = object_class_by_name(typename); 842 g_strfreev(cpuname); 843 g_free(typename); 844 845 return oc; 846 } 847 848 char *riscv_cpu_get_name(RISCVCPU *cpu) 849 { 850 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 851 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 852 853 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 854 855 return cpu_model_from_type(typename); 856 } 857 858 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 859 { 860 RISCVCPU *cpu = RISCV_CPU(cs); 861 CPURISCVState *env = &cpu->env; 862 int i, j; 863 uint8_t *p; 864 865 #if !defined(CONFIG_USER_ONLY) 866 if (riscv_has_ext(env, RVH)) { 867 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 868 } 869 #endif 870 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 871 #ifndef CONFIG_USER_ONLY 872 { 873 static const int dump_csrs[] = { 874 CSR_MHARTID, 875 CSR_MSTATUS, 876 CSR_MSTATUSH, 877 /* 878 * CSR_SSTATUS is intentionally omitted here as its value 879 * can be figured out by looking at CSR_MSTATUS 880 */ 881 CSR_HSTATUS, 882 CSR_VSSTATUS, 883 CSR_MIP, 884 CSR_MIE, 885 CSR_MIDELEG, 886 CSR_HIDELEG, 887 CSR_MEDELEG, 888 CSR_HEDELEG, 889 CSR_MTVEC, 890 CSR_STVEC, 891 CSR_VSTVEC, 892 CSR_MEPC, 893 CSR_SEPC, 894 CSR_VSEPC, 895 CSR_MCAUSE, 896 CSR_SCAUSE, 897 CSR_VSCAUSE, 898 CSR_MTVAL, 899 CSR_STVAL, 900 CSR_HTVAL, 901 CSR_MTVAL2, 902 CSR_MSCRATCH, 903 CSR_SSCRATCH, 904 CSR_SATP, 905 }; 906 907 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 908 int csrno = dump_csrs[i]; 909 target_ulong val = 0; 910 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 911 912 /* 913 * Rely on the smode, hmode, etc, predicates within csr.c 914 * to do the filtering of the registers that are present. 915 */ 916 if (res == RISCV_EXCP_NONE) { 917 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 918 csr_ops[csrno].name, val); 919 } 920 } 921 } 922 #endif 923 924 for (i = 0; i < 32; i++) { 925 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 926 riscv_int_regnames[i], env->gpr[i]); 927 if ((i & 3) == 3) { 928 qemu_fprintf(f, "\n"); 929 } 930 } 931 if (flags & CPU_DUMP_FPU) { 932 target_ulong val = 0; 933 RISCVException res = riscv_csrrw_debug(env, CSR_FCSR, &val, 0, 0); 934 if (res == RISCV_EXCP_NONE) { 935 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 936 csr_ops[CSR_FCSR].name, val); 937 } 938 for (i = 0; i < 32; i++) { 939 qemu_fprintf(f, " %-8s %016" PRIx64, 940 riscv_fpr_regnames[i], env->fpr[i]); 941 if ((i & 3) == 3) { 942 qemu_fprintf(f, "\n"); 943 } 944 } 945 } 946 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 947 static const int dump_rvv_csrs[] = { 948 CSR_VSTART, 949 CSR_VXSAT, 950 CSR_VXRM, 951 CSR_VCSR, 952 CSR_VL, 953 CSR_VTYPE, 954 CSR_VLENB, 955 }; 956 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 957 int csrno = dump_rvv_csrs[i]; 958 target_ulong val = 0; 959 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 960 961 /* 962 * Rely on the smode, hmode, etc, predicates within csr.c 963 * to do the filtering of the registers that are present. 964 */ 965 if (res == RISCV_EXCP_NONE) { 966 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 967 csr_ops[csrno].name, val); 968 } 969 } 970 uint16_t vlenb = cpu->cfg.vlenb; 971 972 for (i = 0; i < 32; i++) { 973 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 974 p = (uint8_t *)env->vreg; 975 for (j = vlenb - 1 ; j >= 0; j--) { 976 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 977 } 978 qemu_fprintf(f, "\n"); 979 } 980 } 981 } 982 983 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 984 { 985 RISCVCPU *cpu = RISCV_CPU(cs); 986 CPURISCVState *env = &cpu->env; 987 988 if (env->xl == MXL_RV32) { 989 env->pc = (int32_t)value; 990 } else { 991 env->pc = value; 992 } 993 } 994 995 static vaddr riscv_cpu_get_pc(CPUState *cs) 996 { 997 RISCVCPU *cpu = RISCV_CPU(cs); 998 CPURISCVState *env = &cpu->env; 999 1000 /* Match cpu_get_tb_cpu_state. */ 1001 if (env->xl == MXL_RV32) { 1002 return env->pc & UINT32_MAX; 1003 } 1004 return env->pc; 1005 } 1006 1007 #ifndef CONFIG_USER_ONLY 1008 bool riscv_cpu_has_work(CPUState *cs) 1009 { 1010 RISCVCPU *cpu = RISCV_CPU(cs); 1011 CPURISCVState *env = &cpu->env; 1012 /* 1013 * Definition of the WFI instruction requires it to ignore the privilege 1014 * mode and delegation registers, but respect individual enables 1015 */ 1016 return riscv_cpu_all_pending(env) != 0 || 1017 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 1018 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 1019 } 1020 #endif /* !CONFIG_USER_ONLY */ 1021 1022 static void riscv_cpu_reset_hold(Object *obj, ResetType type) 1023 { 1024 #ifndef CONFIG_USER_ONLY 1025 uint8_t iprio; 1026 int i, irq, rdzero; 1027 #endif 1028 CPUState *cs = CPU(obj); 1029 RISCVCPU *cpu = RISCV_CPU(cs); 1030 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1031 CPURISCVState *env = &cpu->env; 1032 1033 if (mcc->parent_phases.hold) { 1034 mcc->parent_phases.hold(obj, type); 1035 } 1036 #ifndef CONFIG_USER_ONLY 1037 env->misa_mxl = mcc->misa_mxl_max; 1038 env->priv = PRV_M; 1039 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 1040 if (env->misa_mxl > MXL_RV32) { 1041 /* 1042 * The reset status of SXL/UXL is undefined, but mstatus is WARL 1043 * and we must ensure that the value after init is valid for read. 1044 */ 1045 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 1046 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 1047 if (riscv_has_ext(env, RVH)) { 1048 env->vsstatus = set_field(env->vsstatus, 1049 MSTATUS64_SXL, env->misa_mxl); 1050 env->vsstatus = set_field(env->vsstatus, 1051 MSTATUS64_UXL, env->misa_mxl); 1052 env->mstatus_hs = set_field(env->mstatus_hs, 1053 MSTATUS64_SXL, env->misa_mxl); 1054 env->mstatus_hs = set_field(env->mstatus_hs, 1055 MSTATUS64_UXL, env->misa_mxl); 1056 } 1057 if (riscv_cpu_cfg(env)->ext_smdbltrp) { 1058 env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 1); 1059 } 1060 } 1061 env->mcause = 0; 1062 env->miclaim = MIP_SGEIP; 1063 env->pc = env->resetvec; 1064 env->bins = 0; 1065 env->two_stage_lookup = false; 1066 1067 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 1068 (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ? 1069 MENVCFG_ADUE : 0); 1070 env->henvcfg = 0; 1071 1072 /* Initialized default priorities of local interrupts. */ 1073 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 1074 iprio = riscv_cpu_default_priority(i); 1075 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 1076 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 1077 env->hviprio[i] = 0; 1078 } 1079 i = 0; 1080 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 1081 if (!rdzero) { 1082 env->hviprio[irq] = env->miprio[irq]; 1083 } 1084 i++; 1085 } 1086 1087 /* 1088 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor 1089 * extension is enabled. 1090 */ 1091 if (riscv_has_ext(env, RVH)) { 1092 env->mideleg |= HS_MODE_INTERRUPTS; 1093 } 1094 1095 /* 1096 * Clear mseccfg and unlock all the PMP entries upon reset. 1097 * This is allowed as per the priv and smepmp specifications 1098 * and is needed to clear stale entries across reboots. 1099 */ 1100 if (riscv_cpu_cfg(env)->ext_smepmp) { 1101 env->mseccfg = 0; 1102 } 1103 1104 pmp_unlock_entries(env); 1105 #else 1106 env->priv = PRV_U; 1107 env->senvcfg = 0; 1108 env->menvcfg = 0; 1109 #endif 1110 1111 /* on reset elp is clear */ 1112 env->elp = false; 1113 /* on reset ssp is set to 0 */ 1114 env->ssp = 0; 1115 1116 env->xl = riscv_cpu_mxl(env); 1117 cs->exception_index = RISCV_EXCP_NONE; 1118 env->load_res = -1; 1119 set_default_nan_mode(1, &env->fp_status); 1120 /* Default NaN value: sign bit clear, frac msb set */ 1121 set_float_default_nan_pattern(0b01000000, &env->fp_status); 1122 env->vill = true; 1123 1124 #ifndef CONFIG_USER_ONLY 1125 if (cpu->cfg.debug) { 1126 riscv_trigger_reset_hold(env); 1127 } 1128 1129 if (cpu->cfg.ext_smrnmi) { 1130 env->rnmip = 0; 1131 env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false); 1132 } 1133 1134 if (kvm_enabled()) { 1135 kvm_riscv_reset_vcpu(cpu); 1136 } 1137 #endif 1138 } 1139 1140 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 1141 { 1142 RISCVCPU *cpu = RISCV_CPU(s); 1143 CPURISCVState *env = &cpu->env; 1144 info->target_info = &cpu->cfg; 1145 1146 /* 1147 * A couple of bits in MSTATUS set the endianness: 1148 * - MSTATUS_UBE (User-mode), 1149 * - MSTATUS_SBE (Supervisor-mode), 1150 * - MSTATUS_MBE (Machine-mode) 1151 * but we don't implement that yet. 1152 */ 1153 info->endian = BFD_ENDIAN_LITTLE; 1154 1155 switch (env->xl) { 1156 case MXL_RV32: 1157 info->print_insn = print_insn_riscv32; 1158 break; 1159 case MXL_RV64: 1160 info->print_insn = print_insn_riscv64; 1161 break; 1162 case MXL_RV128: 1163 info->print_insn = print_insn_riscv128; 1164 break; 1165 default: 1166 g_assert_not_reached(); 1167 } 1168 } 1169 1170 #ifndef CONFIG_USER_ONLY 1171 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1172 { 1173 bool rv32 = riscv_cpu_is_32bit(cpu); 1174 uint8_t satp_mode_map_max; 1175 1176 if (cpu->cfg.max_satp_mode == -1) { 1177 /* The CPU wants the hypervisor to decide which satp mode to allow */ 1178 return; 1179 } 1180 1181 if (cpu->cfg.satp_mode.map == 0) { 1182 if (cpu->cfg.satp_mode.init == 0) { 1183 /* If unset by the user, we fallback to the default satp mode. */ 1184 set_satp_mode_default_map(cpu); 1185 } else { 1186 /* 1187 * Find the lowest level that was disabled and then enable the 1188 * first valid level below which can be found in 1189 * valid_vm_1_10_32/64. 1190 */ 1191 for (int i = 1; i < 16; ++i) { 1192 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1193 (cpu->cfg.satp_mode.supported & (1 << i))) { 1194 for (int j = i - 1; j >= 0; --j) { 1195 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1196 cpu->cfg.max_satp_mode = j; 1197 return; 1198 } 1199 } 1200 } 1201 } 1202 } 1203 return; 1204 } 1205 1206 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1207 1208 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1209 if (satp_mode_map_max > cpu->cfg.max_satp_mode) { 1210 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1211 satp_mode_str(satp_mode_map_max, rv32), 1212 satp_mode_str(cpu->cfg.max_satp_mode, rv32)); 1213 return; 1214 } 1215 1216 /* 1217 * Make sure the user did not ask for an invalid configuration as per 1218 * the specification. 1219 */ 1220 if (!rv32) { 1221 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1222 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1223 (cpu->cfg.satp_mode.init & (1 << i)) && 1224 (cpu->cfg.satp_mode.supported & (1 << i))) { 1225 error_setg(errp, "cannot disable %s satp mode if %s " 1226 "is enabled", satp_mode_str(i, false), 1227 satp_mode_str(satp_mode_map_max, false)); 1228 return; 1229 } 1230 } 1231 } 1232 1233 cpu->cfg.max_satp_mode = satp_mode_map_max; 1234 } 1235 #endif 1236 1237 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1238 { 1239 Error *local_err = NULL; 1240 1241 #ifndef CONFIG_USER_ONLY 1242 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1243 if (local_err != NULL) { 1244 error_propagate(errp, local_err); 1245 return; 1246 } 1247 #endif 1248 1249 if (tcg_enabled()) { 1250 riscv_tcg_cpu_finalize_features(cpu, &local_err); 1251 if (local_err != NULL) { 1252 error_propagate(errp, local_err); 1253 return; 1254 } 1255 riscv_tcg_cpu_finalize_dynamic_decoder(cpu); 1256 } else if (kvm_enabled()) { 1257 riscv_kvm_cpu_finalize_features(cpu, &local_err); 1258 if (local_err != NULL) { 1259 error_propagate(errp, local_err); 1260 return; 1261 } 1262 } 1263 } 1264 1265 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1266 { 1267 CPUState *cs = CPU(dev); 1268 RISCVCPU *cpu = RISCV_CPU(dev); 1269 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1270 Error *local_err = NULL; 1271 1272 cpu_exec_realizefn(cs, &local_err); 1273 if (local_err != NULL) { 1274 error_propagate(errp, local_err); 1275 return; 1276 } 1277 1278 riscv_cpu_finalize_features(cpu, &local_err); 1279 if (local_err != NULL) { 1280 error_propagate(errp, local_err); 1281 return; 1282 } 1283 1284 riscv_cpu_register_gdb_regs_for_features(cs); 1285 1286 #ifndef CONFIG_USER_ONLY 1287 if (cpu->cfg.debug) { 1288 riscv_trigger_realize(&cpu->env); 1289 } 1290 #endif 1291 1292 qemu_init_vcpu(cs); 1293 cpu_reset(cs); 1294 1295 mcc->parent_realize(dev, errp); 1296 } 1297 1298 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 1299 { 1300 if (tcg_enabled()) { 1301 return riscv_cpu_tcg_compatible(cpu); 1302 } 1303 1304 return true; 1305 } 1306 1307 #ifndef CONFIG_USER_ONLY 1308 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1309 void *opaque, Error **errp) 1310 { 1311 RISCVSATPMap *satp_map = opaque; 1312 uint8_t satp = satp_mode_from_str(name); 1313 bool value; 1314 1315 value = satp_map->map & (1 << satp); 1316 1317 visit_type_bool(v, name, &value, errp); 1318 } 1319 1320 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1321 void *opaque, Error **errp) 1322 { 1323 RISCVSATPMap *satp_map = opaque; 1324 uint8_t satp = satp_mode_from_str(name); 1325 bool value; 1326 1327 if (!visit_type_bool(v, name, &value, errp)) { 1328 return; 1329 } 1330 1331 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1332 satp_map->init |= 1 << satp; 1333 } 1334 1335 void riscv_add_satp_mode_properties(Object *obj) 1336 { 1337 RISCVCPU *cpu = RISCV_CPU(obj); 1338 1339 if (cpu->env.misa_mxl == MXL_RV32) { 1340 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1341 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1342 } else { 1343 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1344 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1345 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1346 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1347 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1348 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1349 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1350 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1351 } 1352 } 1353 1354 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1355 { 1356 RISCVCPU *cpu = RISCV_CPU(opaque); 1357 CPURISCVState *env = &cpu->env; 1358 1359 if (irq < IRQ_LOCAL_MAX) { 1360 switch (irq) { 1361 case IRQ_U_SOFT: 1362 case IRQ_S_SOFT: 1363 case IRQ_VS_SOFT: 1364 case IRQ_M_SOFT: 1365 case IRQ_U_TIMER: 1366 case IRQ_S_TIMER: 1367 case IRQ_VS_TIMER: 1368 case IRQ_M_TIMER: 1369 case IRQ_U_EXT: 1370 case IRQ_VS_EXT: 1371 case IRQ_M_EXT: 1372 if (kvm_enabled()) { 1373 kvm_riscv_set_irq(cpu, irq, level); 1374 } else { 1375 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1376 } 1377 break; 1378 case IRQ_S_EXT: 1379 if (kvm_enabled()) { 1380 kvm_riscv_set_irq(cpu, irq, level); 1381 } else { 1382 env->external_seip = level; 1383 riscv_cpu_update_mip(env, 1 << irq, 1384 BOOL_TO_MASK(level | env->software_seip)); 1385 } 1386 break; 1387 default: 1388 g_assert_not_reached(); 1389 } 1390 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1391 /* Require H-extension for handling guest local interrupts */ 1392 if (!riscv_has_ext(env, RVH)) { 1393 g_assert_not_reached(); 1394 } 1395 1396 /* Compute bit position in HGEIP CSR */ 1397 irq = irq - IRQ_LOCAL_MAX + 1; 1398 if (env->geilen < irq) { 1399 g_assert_not_reached(); 1400 } 1401 1402 /* Update HGEIP CSR */ 1403 env->hgeip &= ~((target_ulong)1 << irq); 1404 if (level) { 1405 env->hgeip |= (target_ulong)1 << irq; 1406 } 1407 1408 /* Update mip.SGEIP bit */ 1409 riscv_cpu_update_mip(env, MIP_SGEIP, 1410 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1411 } else { 1412 g_assert_not_reached(); 1413 } 1414 } 1415 1416 static void riscv_cpu_set_nmi(void *opaque, int irq, int level) 1417 { 1418 riscv_cpu_set_rnmi(RISCV_CPU(opaque), irq, level); 1419 } 1420 #endif /* CONFIG_USER_ONLY */ 1421 1422 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1423 { 1424 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1425 } 1426 1427 static void riscv_cpu_post_init(Object *obj) 1428 { 1429 accel_cpu_instance_init(CPU(obj)); 1430 } 1431 1432 static void riscv_cpu_init(Object *obj) 1433 { 1434 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1435 RISCVCPU *cpu = RISCV_CPU(obj); 1436 CPURISCVState *env = &cpu->env; 1437 1438 env->misa_mxl = mcc->misa_mxl_max; 1439 1440 #ifndef CONFIG_USER_ONLY 1441 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1442 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1443 qdev_init_gpio_in_named(DEVICE(cpu), riscv_cpu_set_nmi, 1444 "riscv.cpu.rnmi", RNMI_MAX); 1445 #endif /* CONFIG_USER_ONLY */ 1446 1447 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal); 1448 1449 /* 1450 * The timer and performance counters extensions were supported 1451 * in QEMU before they were added as discrete extensions in the 1452 * ISA. To keep compatibility we'll always default them to 'true' 1453 * for all CPUs. Each accelerator will decide what to do when 1454 * users disable them. 1455 */ 1456 RISCV_CPU(obj)->cfg.ext_zicntr = true; 1457 RISCV_CPU(obj)->cfg.ext_zihpm = true; 1458 1459 /* Default values for non-bool cpu properties */ 1460 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); 1461 cpu->cfg.vlenb = 128 >> 3; 1462 cpu->cfg.elen = 64; 1463 cpu->cfg.cbom_blocksize = 64; 1464 cpu->cfg.cbop_blocksize = 64; 1465 cpu->cfg.cboz_blocksize = 64; 1466 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1467 cpu->cfg.max_satp_mode = -1; 1468 } 1469 1470 static void riscv_bare_cpu_init(Object *obj) 1471 { 1472 RISCVCPU *cpu = RISCV_CPU(obj); 1473 1474 /* 1475 * Bare CPUs do not inherit the timer and performance 1476 * counters from the parent class (see riscv_cpu_init() 1477 * for info on why the parent enables them). 1478 * 1479 * Users have to explicitly enable these counters for 1480 * bare CPUs. 1481 */ 1482 cpu->cfg.ext_zicntr = false; 1483 cpu->cfg.ext_zihpm = false; 1484 1485 /* Set to QEMU's first supported priv version */ 1486 cpu->env.priv_ver = PRIV_VERSION_1_10_0; 1487 1488 /* 1489 * Support all available satp_mode settings. The default 1490 * value will be set to MBARE if the user doesn't set 1491 * satp_mode manually (see set_satp_mode_default()). 1492 */ 1493 #ifndef CONFIG_USER_ONLY 1494 set_satp_mode_max_supported(RISCV_CPU(obj), 1495 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 1496 VM_1_10_SV32 : VM_1_10_SV57); 1497 #endif 1498 } 1499 1500 typedef struct misa_ext_info { 1501 const char *name; 1502 const char *description; 1503 } MISAExtInfo; 1504 1505 #define MISA_INFO_IDX(_bit) \ 1506 __builtin_ctz(_bit) 1507 1508 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1509 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1510 1511 static const MISAExtInfo misa_ext_info_arr[] = { 1512 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1513 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1514 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1515 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1516 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1517 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1518 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1519 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1520 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1521 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1522 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1523 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1524 MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)") 1525 }; 1526 1527 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) 1528 { 1529 CPUClass *cc = CPU_CLASS(mcc); 1530 1531 /* Validate that MISA_MXL is set properly. */ 1532 switch (mcc->misa_mxl_max) { 1533 #ifdef TARGET_RISCV64 1534 case MXL_RV64: 1535 case MXL_RV128: 1536 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1537 break; 1538 #endif 1539 case MXL_RV32: 1540 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1541 break; 1542 default: 1543 g_assert_not_reached(); 1544 } 1545 } 1546 1547 static int riscv_validate_misa_info_idx(uint32_t bit) 1548 { 1549 int idx; 1550 1551 /* 1552 * Our lowest valid input (RVA) is 1 and 1553 * __builtin_ctz() is UB with zero. 1554 */ 1555 g_assert(bit != 0); 1556 idx = MISA_INFO_IDX(bit); 1557 1558 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1559 return idx; 1560 } 1561 1562 const char *riscv_get_misa_ext_name(uint32_t bit) 1563 { 1564 int idx = riscv_validate_misa_info_idx(bit); 1565 const char *val = misa_ext_info_arr[idx].name; 1566 1567 g_assert(val != NULL); 1568 return val; 1569 } 1570 1571 const char *riscv_get_misa_ext_description(uint32_t bit) 1572 { 1573 int idx = riscv_validate_misa_info_idx(bit); 1574 const char *val = misa_ext_info_arr[idx].description; 1575 1576 g_assert(val != NULL); 1577 return val; 1578 } 1579 1580 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1581 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1582 .enabled = _defval} 1583 1584 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1585 /* Defaults for standard extensions */ 1586 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1587 MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false), 1588 MULTI_EXT_CFG_BOOL("smcsrind", ext_smcsrind, false), 1589 MULTI_EXT_CFG_BOOL("smcdeleg", ext_smcdeleg, false), 1590 MULTI_EXT_CFG_BOOL("sscsrind", ext_sscsrind, false), 1591 MULTI_EXT_CFG_BOOL("ssccfg", ext_ssccfg, false), 1592 MULTI_EXT_CFG_BOOL("smctr", ext_smctr, false), 1593 MULTI_EXT_CFG_BOOL("ssctr", ext_ssctr, false), 1594 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1595 MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false), 1596 MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false), 1597 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1598 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1599 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1600 MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false), 1601 MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false), 1602 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), 1603 MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false), 1604 MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false), 1605 MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), 1606 MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), 1607 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1608 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1609 MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false), 1610 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1611 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1612 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1613 MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false), 1614 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1615 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1616 MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false), 1617 MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false), 1618 MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false), 1619 MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false), 1620 MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false), 1621 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1622 MULTI_EXT_CFG_BOOL("ssnpm", ext_ssnpm, false), 1623 MULTI_EXT_CFG_BOOL("sspm", ext_sspm, false), 1624 MULTI_EXT_CFG_BOOL("supm", ext_supm, false), 1625 1626 MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false), 1627 MULTI_EXT_CFG_BOOL("smdbltrp", ext_smdbltrp, false), 1628 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1629 MULTI_EXT_CFG_BOOL("smrnmi", ext_smrnmi, false), 1630 MULTI_EXT_CFG_BOOL("smmpm", ext_smmpm, false), 1631 MULTI_EXT_CFG_BOOL("smnpm", ext_smnpm, false), 1632 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1633 MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false), 1634 MULTI_EXT_CFG_BOOL("ssdbltrp", ext_ssdbltrp, false), 1635 MULTI_EXT_CFG_BOOL("svade", ext_svade, false), 1636 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1637 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1638 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1639 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1640 MULTI_EXT_CFG_BOOL("svvptc", ext_svvptc, true), 1641 1642 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1643 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1644 1645 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1646 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1647 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1648 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1649 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1650 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1651 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1652 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1653 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1654 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1655 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1656 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1657 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1658 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1659 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1660 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1661 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1662 MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false), 1663 1664 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1665 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1666 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1667 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1668 1669 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1670 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true), 1671 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1672 1673 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1674 1675 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1676 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1677 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1678 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1679 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1680 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1681 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1682 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1683 1684 /* Vector cryptography extensions */ 1685 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1686 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1687 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false), 1688 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1689 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1690 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1691 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1692 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1693 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1694 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1695 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1696 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1697 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1698 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1699 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1700 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1701 1702 { }, 1703 }; 1704 1705 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1706 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1707 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1708 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1709 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1710 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1711 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1712 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1713 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1714 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1715 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1716 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1717 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1718 1719 { }, 1720 }; 1721 1722 /* These are experimental so mark with 'x-' */ 1723 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1724 MULTI_EXT_CFG_BOOL("x-svukte", ext_svukte, false), 1725 1726 { }, 1727 }; 1728 1729 /* 1730 * 'Named features' is the name we give to extensions that we 1731 * don't want to expose to users. They are either immutable 1732 * (always enabled/disable) or they'll vary depending on 1733 * the resulting CPU state. They have riscv,isa strings 1734 * and priv_ver like regular extensions. 1735 */ 1736 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { 1737 MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true), 1738 MULTI_EXT_CFG_BOOL("ssstateen", ext_ssstateen, true), 1739 MULTI_EXT_CFG_BOOL("sha", ext_sha, true), 1740 MULTI_EXT_CFG_BOOL("ziccrse", ext_ziccrse, true), 1741 1742 { }, 1743 }; 1744 1745 /* Deprecated entries marked for future removal */ 1746 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1747 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1748 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1749 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1750 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1751 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1752 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1753 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1754 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1755 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1756 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1757 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1758 1759 { }, 1760 }; 1761 1762 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname, 1763 Error **errp) 1764 { 1765 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1766 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'", 1767 cpuname, propname); 1768 } 1769 1770 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1771 void *opaque, Error **errp) 1772 { 1773 RISCVCPU *cpu = RISCV_CPU(obj); 1774 uint8_t pmu_num, curr_pmu_num; 1775 uint32_t pmu_mask; 1776 1777 visit_type_uint8(v, name, &pmu_num, errp); 1778 1779 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask); 1780 1781 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) { 1782 cpu_set_prop_err(cpu, name, errp); 1783 error_append_hint(errp, "Current '%s' val: %u\n", 1784 name, curr_pmu_num); 1785 return; 1786 } 1787 1788 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1789 error_setg(errp, "Number of counters exceeds maximum available"); 1790 return; 1791 } 1792 1793 if (pmu_num == 0) { 1794 pmu_mask = 0; 1795 } else { 1796 pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1797 } 1798 1799 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1800 cpu->cfg.pmu_mask = pmu_mask; 1801 cpu_option_add_user_setting("pmu-mask", pmu_mask); 1802 } 1803 1804 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1805 void *opaque, Error **errp) 1806 { 1807 RISCVCPU *cpu = RISCV_CPU(obj); 1808 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1809 1810 visit_type_uint8(v, name, &pmu_num, errp); 1811 } 1812 1813 static const PropertyInfo prop_pmu_num = { 1814 .type = "int8", 1815 .description = "pmu-num", 1816 .get = prop_pmu_num_get, 1817 .set = prop_pmu_num_set, 1818 }; 1819 1820 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name, 1821 void *opaque, Error **errp) 1822 { 1823 RISCVCPU *cpu = RISCV_CPU(obj); 1824 uint32_t value; 1825 uint8_t pmu_num; 1826 1827 visit_type_uint32(v, name, &value, errp); 1828 1829 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) { 1830 cpu_set_prop_err(cpu, name, errp); 1831 error_append_hint(errp, "Current '%s' val: %x\n", 1832 name, cpu->cfg.pmu_mask); 1833 return; 1834 } 1835 1836 pmu_num = ctpop32(value); 1837 1838 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1839 error_setg(errp, "Number of counters exceeds maximum available"); 1840 return; 1841 } 1842 1843 cpu_option_add_user_setting(name, value); 1844 cpu->cfg.pmu_mask = value; 1845 } 1846 1847 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name, 1848 void *opaque, Error **errp) 1849 { 1850 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask; 1851 1852 visit_type_uint8(v, name, &pmu_mask, errp); 1853 } 1854 1855 static const PropertyInfo prop_pmu_mask = { 1856 .type = "int8", 1857 .description = "pmu-mask", 1858 .get = prop_pmu_mask_get, 1859 .set = prop_pmu_mask_set, 1860 }; 1861 1862 static void prop_mmu_set(Object *obj, Visitor *v, const char *name, 1863 void *opaque, Error **errp) 1864 { 1865 RISCVCPU *cpu = RISCV_CPU(obj); 1866 bool value; 1867 1868 visit_type_bool(v, name, &value, errp); 1869 1870 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) { 1871 cpu_set_prop_err(cpu, "mmu", errp); 1872 return; 1873 } 1874 1875 cpu_option_add_user_setting(name, value); 1876 cpu->cfg.mmu = value; 1877 } 1878 1879 static void prop_mmu_get(Object *obj, Visitor *v, const char *name, 1880 void *opaque, Error **errp) 1881 { 1882 bool value = RISCV_CPU(obj)->cfg.mmu; 1883 1884 visit_type_bool(v, name, &value, errp); 1885 } 1886 1887 static const PropertyInfo prop_mmu = { 1888 .type = "bool", 1889 .description = "mmu", 1890 .get = prop_mmu_get, 1891 .set = prop_mmu_set, 1892 }; 1893 1894 static void prop_pmp_set(Object *obj, Visitor *v, const char *name, 1895 void *opaque, Error **errp) 1896 { 1897 RISCVCPU *cpu = RISCV_CPU(obj); 1898 bool value; 1899 1900 visit_type_bool(v, name, &value, errp); 1901 1902 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) { 1903 cpu_set_prop_err(cpu, name, errp); 1904 return; 1905 } 1906 1907 cpu_option_add_user_setting(name, value); 1908 cpu->cfg.pmp = value; 1909 } 1910 1911 static void prop_pmp_get(Object *obj, Visitor *v, const char *name, 1912 void *opaque, Error **errp) 1913 { 1914 bool value = RISCV_CPU(obj)->cfg.pmp; 1915 1916 visit_type_bool(v, name, &value, errp); 1917 } 1918 1919 static const PropertyInfo prop_pmp = { 1920 .type = "bool", 1921 .description = "pmp", 1922 .get = prop_pmp_get, 1923 .set = prop_pmp_set, 1924 }; 1925 1926 static int priv_spec_from_str(const char *priv_spec_str) 1927 { 1928 int priv_version = -1; 1929 1930 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) { 1931 priv_version = PRIV_VERSION_1_13_0; 1932 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) { 1933 priv_version = PRIV_VERSION_1_12_0; 1934 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) { 1935 priv_version = PRIV_VERSION_1_11_0; 1936 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) { 1937 priv_version = PRIV_VERSION_1_10_0; 1938 } 1939 1940 return priv_version; 1941 } 1942 1943 const char *priv_spec_to_str(int priv_version) 1944 { 1945 switch (priv_version) { 1946 case PRIV_VERSION_1_10_0: 1947 return PRIV_VER_1_10_0_STR; 1948 case PRIV_VERSION_1_11_0: 1949 return PRIV_VER_1_11_0_STR; 1950 case PRIV_VERSION_1_12_0: 1951 return PRIV_VER_1_12_0_STR; 1952 case PRIV_VERSION_1_13_0: 1953 return PRIV_VER_1_13_0_STR; 1954 default: 1955 return NULL; 1956 } 1957 } 1958 1959 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name, 1960 void *opaque, Error **errp) 1961 { 1962 RISCVCPU *cpu = RISCV_CPU(obj); 1963 g_autofree char *value = NULL; 1964 int priv_version = -1; 1965 1966 visit_type_str(v, name, &value, errp); 1967 1968 priv_version = priv_spec_from_str(value); 1969 if (priv_version < 0) { 1970 error_setg(errp, "Unsupported privilege spec version '%s'", value); 1971 return; 1972 } 1973 1974 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) { 1975 cpu_set_prop_err(cpu, name, errp); 1976 error_append_hint(errp, "Current '%s' val: %s\n", name, 1977 object_property_get_str(obj, name, NULL)); 1978 return; 1979 } 1980 1981 cpu_option_add_user_setting(name, priv_version); 1982 cpu->env.priv_ver = priv_version; 1983 } 1984 1985 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name, 1986 void *opaque, Error **errp) 1987 { 1988 RISCVCPU *cpu = RISCV_CPU(obj); 1989 const char *value = priv_spec_to_str(cpu->env.priv_ver); 1990 1991 visit_type_str(v, name, (char **)&value, errp); 1992 } 1993 1994 static const PropertyInfo prop_priv_spec = { 1995 .type = "str", 1996 .description = "priv_spec", 1997 /* FIXME enum? */ 1998 .get = prop_priv_spec_get, 1999 .set = prop_priv_spec_set, 2000 }; 2001 2002 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name, 2003 void *opaque, Error **errp) 2004 { 2005 RISCVCPU *cpu = RISCV_CPU(obj); 2006 g_autofree char *value = NULL; 2007 2008 visit_type_str(v, name, &value, errp); 2009 2010 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) { 2011 error_setg(errp, "Unsupported vector spec version '%s'", value); 2012 return; 2013 } 2014 2015 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0); 2016 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 2017 } 2018 2019 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name, 2020 void *opaque, Error **errp) 2021 { 2022 const char *value = VEXT_VER_1_00_0_STR; 2023 2024 visit_type_str(v, name, (char **)&value, errp); 2025 } 2026 2027 static const PropertyInfo prop_vext_spec = { 2028 .type = "str", 2029 .description = "vext_spec", 2030 /* FIXME enum? */ 2031 .get = prop_vext_spec_get, 2032 .set = prop_vext_spec_set, 2033 }; 2034 2035 static void prop_vlen_set(Object *obj, Visitor *v, const char *name, 2036 void *opaque, Error **errp) 2037 { 2038 RISCVCPU *cpu = RISCV_CPU(obj); 2039 uint16_t cpu_vlen = cpu->cfg.vlenb << 3; 2040 uint16_t value; 2041 2042 if (!visit_type_uint16(v, name, &value, errp)) { 2043 return; 2044 } 2045 2046 if (!is_power_of_2(value)) { 2047 error_setg(errp, "Vector extension VLEN must be power of 2"); 2048 return; 2049 } 2050 2051 if (value != cpu_vlen && riscv_cpu_is_vendor(obj)) { 2052 cpu_set_prop_err(cpu, name, errp); 2053 error_append_hint(errp, "Current '%s' val: %u\n", 2054 name, cpu_vlen); 2055 return; 2056 } 2057 2058 cpu_option_add_user_setting(name, value); 2059 cpu->cfg.vlenb = value >> 3; 2060 } 2061 2062 static void prop_vlen_get(Object *obj, Visitor *v, const char *name, 2063 void *opaque, Error **errp) 2064 { 2065 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3; 2066 2067 visit_type_uint16(v, name, &value, errp); 2068 } 2069 2070 static const PropertyInfo prop_vlen = { 2071 .type = "uint16", 2072 .description = "vlen", 2073 .get = prop_vlen_get, 2074 .set = prop_vlen_set, 2075 }; 2076 2077 static void prop_elen_set(Object *obj, Visitor *v, const char *name, 2078 void *opaque, Error **errp) 2079 { 2080 RISCVCPU *cpu = RISCV_CPU(obj); 2081 uint16_t value; 2082 2083 if (!visit_type_uint16(v, name, &value, errp)) { 2084 return; 2085 } 2086 2087 if (!is_power_of_2(value)) { 2088 error_setg(errp, "Vector extension ELEN must be power of 2"); 2089 return; 2090 } 2091 2092 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) { 2093 cpu_set_prop_err(cpu, name, errp); 2094 error_append_hint(errp, "Current '%s' val: %u\n", 2095 name, cpu->cfg.elen); 2096 return; 2097 } 2098 2099 cpu_option_add_user_setting(name, value); 2100 cpu->cfg.elen = value; 2101 } 2102 2103 static void prop_elen_get(Object *obj, Visitor *v, const char *name, 2104 void *opaque, Error **errp) 2105 { 2106 uint16_t value = RISCV_CPU(obj)->cfg.elen; 2107 2108 visit_type_uint16(v, name, &value, errp); 2109 } 2110 2111 static const PropertyInfo prop_elen = { 2112 .type = "uint16", 2113 .description = "elen", 2114 .get = prop_elen_get, 2115 .set = prop_elen_set, 2116 }; 2117 2118 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name, 2119 void *opaque, Error **errp) 2120 { 2121 RISCVCPU *cpu = RISCV_CPU(obj); 2122 uint16_t value; 2123 2124 if (!visit_type_uint16(v, name, &value, errp)) { 2125 return; 2126 } 2127 2128 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) { 2129 cpu_set_prop_err(cpu, name, errp); 2130 error_append_hint(errp, "Current '%s' val: %u\n", 2131 name, cpu->cfg.cbom_blocksize); 2132 return; 2133 } 2134 2135 cpu_option_add_user_setting(name, value); 2136 cpu->cfg.cbom_blocksize = value; 2137 } 2138 2139 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name, 2140 void *opaque, Error **errp) 2141 { 2142 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize; 2143 2144 visit_type_uint16(v, name, &value, errp); 2145 } 2146 2147 static const PropertyInfo prop_cbom_blksize = { 2148 .type = "uint16", 2149 .description = "cbom_blocksize", 2150 .get = prop_cbom_blksize_get, 2151 .set = prop_cbom_blksize_set, 2152 }; 2153 2154 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name, 2155 void *opaque, Error **errp) 2156 { 2157 RISCVCPU *cpu = RISCV_CPU(obj); 2158 uint16_t value; 2159 2160 if (!visit_type_uint16(v, name, &value, errp)) { 2161 return; 2162 } 2163 2164 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) { 2165 cpu_set_prop_err(cpu, name, errp); 2166 error_append_hint(errp, "Current '%s' val: %u\n", 2167 name, cpu->cfg.cbop_blocksize); 2168 return; 2169 } 2170 2171 cpu_option_add_user_setting(name, value); 2172 cpu->cfg.cbop_blocksize = value; 2173 } 2174 2175 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name, 2176 void *opaque, Error **errp) 2177 { 2178 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize; 2179 2180 visit_type_uint16(v, name, &value, errp); 2181 } 2182 2183 static const PropertyInfo prop_cbop_blksize = { 2184 .type = "uint16", 2185 .description = "cbop_blocksize", 2186 .get = prop_cbop_blksize_get, 2187 .set = prop_cbop_blksize_set, 2188 }; 2189 2190 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name, 2191 void *opaque, Error **errp) 2192 { 2193 RISCVCPU *cpu = RISCV_CPU(obj); 2194 uint16_t value; 2195 2196 if (!visit_type_uint16(v, name, &value, errp)) { 2197 return; 2198 } 2199 2200 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) { 2201 cpu_set_prop_err(cpu, name, errp); 2202 error_append_hint(errp, "Current '%s' val: %u\n", 2203 name, cpu->cfg.cboz_blocksize); 2204 return; 2205 } 2206 2207 cpu_option_add_user_setting(name, value); 2208 cpu->cfg.cboz_blocksize = value; 2209 } 2210 2211 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name, 2212 void *opaque, Error **errp) 2213 { 2214 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize; 2215 2216 visit_type_uint16(v, name, &value, errp); 2217 } 2218 2219 static const PropertyInfo prop_cboz_blksize = { 2220 .type = "uint16", 2221 .description = "cboz_blocksize", 2222 .get = prop_cboz_blksize_get, 2223 .set = prop_cboz_blksize_set, 2224 }; 2225 2226 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name, 2227 void *opaque, Error **errp) 2228 { 2229 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2230 RISCVCPU *cpu = RISCV_CPU(obj); 2231 uint32_t prev_val = cpu->cfg.mvendorid; 2232 uint32_t value; 2233 2234 if (!visit_type_uint32(v, name, &value, errp)) { 2235 return; 2236 } 2237 2238 if (!dynamic_cpu && prev_val != value) { 2239 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2240 object_get_typename(obj), prev_val); 2241 return; 2242 } 2243 2244 cpu->cfg.mvendorid = value; 2245 } 2246 2247 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name, 2248 void *opaque, Error **errp) 2249 { 2250 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; 2251 2252 visit_type_uint32(v, name, &value, errp); 2253 } 2254 2255 static const PropertyInfo prop_mvendorid = { 2256 .type = "uint32", 2257 .description = "mvendorid", 2258 .get = prop_mvendorid_get, 2259 .set = prop_mvendorid_set, 2260 }; 2261 2262 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name, 2263 void *opaque, Error **errp) 2264 { 2265 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2266 RISCVCPU *cpu = RISCV_CPU(obj); 2267 uint64_t prev_val = cpu->cfg.mimpid; 2268 uint64_t value; 2269 2270 if (!visit_type_uint64(v, name, &value, errp)) { 2271 return; 2272 } 2273 2274 if (!dynamic_cpu && prev_val != value) { 2275 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2276 object_get_typename(obj), prev_val); 2277 return; 2278 } 2279 2280 cpu->cfg.mimpid = value; 2281 } 2282 2283 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name, 2284 void *opaque, Error **errp) 2285 { 2286 uint64_t value = RISCV_CPU(obj)->cfg.mimpid; 2287 2288 visit_type_uint64(v, name, &value, errp); 2289 } 2290 2291 static const PropertyInfo prop_mimpid = { 2292 .type = "uint64", 2293 .description = "mimpid", 2294 .get = prop_mimpid_get, 2295 .set = prop_mimpid_set, 2296 }; 2297 2298 static void prop_marchid_set(Object *obj, Visitor *v, const char *name, 2299 void *opaque, Error **errp) 2300 { 2301 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2302 RISCVCPU *cpu = RISCV_CPU(obj); 2303 uint64_t prev_val = cpu->cfg.marchid; 2304 uint64_t value, invalid_val; 2305 uint32_t mxlen = 0; 2306 2307 if (!visit_type_uint64(v, name, &value, errp)) { 2308 return; 2309 } 2310 2311 if (!dynamic_cpu && prev_val != value) { 2312 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2313 object_get_typename(obj), prev_val); 2314 return; 2315 } 2316 2317 switch (riscv_cpu_mxl(&cpu->env)) { 2318 case MXL_RV32: 2319 mxlen = 32; 2320 break; 2321 case MXL_RV64: 2322 case MXL_RV128: 2323 mxlen = 64; 2324 break; 2325 default: 2326 g_assert_not_reached(); 2327 } 2328 2329 invalid_val = 1LL << (mxlen - 1); 2330 2331 if (value == invalid_val) { 2332 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2333 "and the remaining bits zero", mxlen); 2334 return; 2335 } 2336 2337 cpu->cfg.marchid = value; 2338 } 2339 2340 static void prop_marchid_get(Object *obj, Visitor *v, const char *name, 2341 void *opaque, Error **errp) 2342 { 2343 uint64_t value = RISCV_CPU(obj)->cfg.marchid; 2344 2345 visit_type_uint64(v, name, &value, errp); 2346 } 2347 2348 static const PropertyInfo prop_marchid = { 2349 .type = "uint64", 2350 .description = "marchid", 2351 .get = prop_marchid_get, 2352 .set = prop_marchid_set, 2353 }; 2354 2355 /* 2356 * RVA22U64 defines some 'named features' that are cache 2357 * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa 2358 * and Zicclsm. They are always implemented in TCG and 2359 * doesn't need to be manually enabled by the profile. 2360 */ 2361 static RISCVCPUProfile RVA22U64 = { 2362 .u_parent = NULL, 2363 .s_parent = NULL, 2364 .name = "rva22u64", 2365 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVB | RVU, 2366 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2367 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2368 .ext_offsets = { 2369 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause), 2370 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb), 2371 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin), 2372 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr), 2373 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom), 2374 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz), 2375 2376 /* mandatory named features for this profile */ 2377 CPU_CFG_OFFSET(ext_zic64b), 2378 2379 RISCV_PROFILE_EXT_LIST_END 2380 } 2381 }; 2382 2383 /* 2384 * As with RVA22U64, RVA22S64 also defines 'named features'. 2385 * 2386 * Cache related features that we consider enabled since we don't 2387 * implement cache: Ssccptr 2388 * 2389 * Other named features that we already implement: Sstvecd, Sstvala, 2390 * Sscounterenw 2391 * 2392 * The remaining features/extensions comes from RVA22U64. 2393 */ 2394 static RISCVCPUProfile RVA22S64 = { 2395 .u_parent = &RVA22U64, 2396 .s_parent = NULL, 2397 .name = "rva22s64", 2398 .misa_ext = RVS, 2399 .priv_spec = PRIV_VERSION_1_12_0, 2400 .satp_mode = VM_1_10_SV39, 2401 .ext_offsets = { 2402 /* rva22s64 exts */ 2403 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt), 2404 CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade), 2405 2406 RISCV_PROFILE_EXT_LIST_END 2407 } 2408 }; 2409 2410 /* 2411 * All mandatory extensions from RVA22U64 are present 2412 * in RVA23U64 so set RVA22 as a parent. We need to 2413 * declare just the newly added mandatory extensions. 2414 */ 2415 static RISCVCPUProfile RVA23U64 = { 2416 .u_parent = &RVA22U64, 2417 .s_parent = NULL, 2418 .name = "rva23u64", 2419 .misa_ext = RVV, 2420 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2421 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2422 .ext_offsets = { 2423 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zvbb), 2424 CPU_CFG_OFFSET(ext_zvkt), CPU_CFG_OFFSET(ext_zihintntl), 2425 CPU_CFG_OFFSET(ext_zicond), CPU_CFG_OFFSET(ext_zimop), 2426 CPU_CFG_OFFSET(ext_zcmop), CPU_CFG_OFFSET(ext_zcb), 2427 CPU_CFG_OFFSET(ext_zfa), CPU_CFG_OFFSET(ext_zawrs), 2428 CPU_CFG_OFFSET(ext_supm), 2429 2430 RISCV_PROFILE_EXT_LIST_END 2431 } 2432 }; 2433 2434 /* 2435 * As with RVA23U64, RVA23S64 also defines 'named features'. 2436 * 2437 * Cache related features that we consider enabled since we don't 2438 * implement cache: Ssccptr 2439 * 2440 * Other named features that we already implement: Sstvecd, Sstvala, 2441 * Sscounterenw, Ssu64xl 2442 * 2443 * The remaining features/extensions comes from RVA23S64. 2444 */ 2445 static RISCVCPUProfile RVA23S64 = { 2446 .u_parent = &RVA23U64, 2447 .s_parent = &RVA22S64, 2448 .name = "rva23s64", 2449 .misa_ext = RVS, 2450 .priv_spec = PRIV_VERSION_1_13_0, 2451 .satp_mode = VM_1_10_SV39, 2452 .ext_offsets = { 2453 /* New in RVA23S64 */ 2454 CPU_CFG_OFFSET(ext_svnapot), CPU_CFG_OFFSET(ext_sstc), 2455 CPU_CFG_OFFSET(ext_sscofpmf), CPU_CFG_OFFSET(ext_ssnpm), 2456 2457 /* Named features: Sha */ 2458 CPU_CFG_OFFSET(ext_sha), 2459 2460 RISCV_PROFILE_EXT_LIST_END 2461 } 2462 }; 2463 2464 RISCVCPUProfile *riscv_profiles[] = { 2465 &RVA22U64, 2466 &RVA22S64, 2467 &RVA23U64, 2468 &RVA23S64, 2469 NULL, 2470 }; 2471 2472 static RISCVCPUImpliedExtsRule RVA_IMPLIED = { 2473 .is_misa = true, 2474 .ext = RVA, 2475 .implied_multi_exts = { 2476 CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo), 2477 2478 RISCV_IMPLIED_EXTS_RULE_END 2479 }, 2480 }; 2481 2482 static RISCVCPUImpliedExtsRule RVD_IMPLIED = { 2483 .is_misa = true, 2484 .ext = RVD, 2485 .implied_misa_exts = RVF, 2486 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2487 }; 2488 2489 static RISCVCPUImpliedExtsRule RVF_IMPLIED = { 2490 .is_misa = true, 2491 .ext = RVF, 2492 .implied_multi_exts = { 2493 CPU_CFG_OFFSET(ext_zicsr), 2494 2495 RISCV_IMPLIED_EXTS_RULE_END 2496 }, 2497 }; 2498 2499 static RISCVCPUImpliedExtsRule RVM_IMPLIED = { 2500 .is_misa = true, 2501 .ext = RVM, 2502 .implied_multi_exts = { 2503 CPU_CFG_OFFSET(ext_zmmul), 2504 2505 RISCV_IMPLIED_EXTS_RULE_END 2506 }, 2507 }; 2508 2509 static RISCVCPUImpliedExtsRule RVV_IMPLIED = { 2510 .is_misa = true, 2511 .ext = RVV, 2512 .implied_multi_exts = { 2513 CPU_CFG_OFFSET(ext_zve64d), 2514 2515 RISCV_IMPLIED_EXTS_RULE_END 2516 }, 2517 }; 2518 2519 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = { 2520 .ext = CPU_CFG_OFFSET(ext_zcb), 2521 .implied_multi_exts = { 2522 CPU_CFG_OFFSET(ext_zca), 2523 2524 RISCV_IMPLIED_EXTS_RULE_END 2525 }, 2526 }; 2527 2528 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = { 2529 .ext = CPU_CFG_OFFSET(ext_zcd), 2530 .implied_misa_exts = RVD, 2531 .implied_multi_exts = { 2532 CPU_CFG_OFFSET(ext_zca), 2533 2534 RISCV_IMPLIED_EXTS_RULE_END 2535 }, 2536 }; 2537 2538 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = { 2539 .ext = CPU_CFG_OFFSET(ext_zce), 2540 .implied_multi_exts = { 2541 CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp), 2542 CPU_CFG_OFFSET(ext_zcmt), 2543 2544 RISCV_IMPLIED_EXTS_RULE_END 2545 }, 2546 }; 2547 2548 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = { 2549 .ext = CPU_CFG_OFFSET(ext_zcf), 2550 .implied_misa_exts = RVF, 2551 .implied_multi_exts = { 2552 CPU_CFG_OFFSET(ext_zca), 2553 2554 RISCV_IMPLIED_EXTS_RULE_END 2555 }, 2556 }; 2557 2558 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = { 2559 .ext = CPU_CFG_OFFSET(ext_zcmp), 2560 .implied_multi_exts = { 2561 CPU_CFG_OFFSET(ext_zca), 2562 2563 RISCV_IMPLIED_EXTS_RULE_END 2564 }, 2565 }; 2566 2567 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = { 2568 .ext = CPU_CFG_OFFSET(ext_zcmt), 2569 .implied_multi_exts = { 2570 CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr), 2571 2572 RISCV_IMPLIED_EXTS_RULE_END 2573 }, 2574 }; 2575 2576 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = { 2577 .ext = CPU_CFG_OFFSET(ext_zdinx), 2578 .implied_multi_exts = { 2579 CPU_CFG_OFFSET(ext_zfinx), 2580 2581 RISCV_IMPLIED_EXTS_RULE_END 2582 }, 2583 }; 2584 2585 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = { 2586 .ext = CPU_CFG_OFFSET(ext_zfa), 2587 .implied_misa_exts = RVF, 2588 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2589 }; 2590 2591 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = { 2592 .ext = CPU_CFG_OFFSET(ext_zfbfmin), 2593 .implied_misa_exts = RVF, 2594 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2595 }; 2596 2597 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = { 2598 .ext = CPU_CFG_OFFSET(ext_zfh), 2599 .implied_multi_exts = { 2600 CPU_CFG_OFFSET(ext_zfhmin), 2601 2602 RISCV_IMPLIED_EXTS_RULE_END 2603 }, 2604 }; 2605 2606 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = { 2607 .ext = CPU_CFG_OFFSET(ext_zfhmin), 2608 .implied_misa_exts = RVF, 2609 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2610 }; 2611 2612 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = { 2613 .ext = CPU_CFG_OFFSET(ext_zfinx), 2614 .implied_multi_exts = { 2615 CPU_CFG_OFFSET(ext_zicsr), 2616 2617 RISCV_IMPLIED_EXTS_RULE_END 2618 }, 2619 }; 2620 2621 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = { 2622 .ext = CPU_CFG_OFFSET(ext_zhinx), 2623 .implied_multi_exts = { 2624 CPU_CFG_OFFSET(ext_zhinxmin), 2625 2626 RISCV_IMPLIED_EXTS_RULE_END 2627 }, 2628 }; 2629 2630 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = { 2631 .ext = CPU_CFG_OFFSET(ext_zhinxmin), 2632 .implied_multi_exts = { 2633 CPU_CFG_OFFSET(ext_zfinx), 2634 2635 RISCV_IMPLIED_EXTS_RULE_END 2636 }, 2637 }; 2638 2639 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = { 2640 .ext = CPU_CFG_OFFSET(ext_zicntr), 2641 .implied_multi_exts = { 2642 CPU_CFG_OFFSET(ext_zicsr), 2643 2644 RISCV_IMPLIED_EXTS_RULE_END 2645 }, 2646 }; 2647 2648 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = { 2649 .ext = CPU_CFG_OFFSET(ext_zihpm), 2650 .implied_multi_exts = { 2651 CPU_CFG_OFFSET(ext_zicsr), 2652 2653 RISCV_IMPLIED_EXTS_RULE_END 2654 }, 2655 }; 2656 2657 static RISCVCPUImpliedExtsRule ZK_IMPLIED = { 2658 .ext = CPU_CFG_OFFSET(ext_zk), 2659 .implied_multi_exts = { 2660 CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr), 2661 CPU_CFG_OFFSET(ext_zkt), 2662 2663 RISCV_IMPLIED_EXTS_RULE_END 2664 }, 2665 }; 2666 2667 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = { 2668 .ext = CPU_CFG_OFFSET(ext_zkn), 2669 .implied_multi_exts = { 2670 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2671 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne), 2672 CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh), 2673 2674 RISCV_IMPLIED_EXTS_RULE_END 2675 }, 2676 }; 2677 2678 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = { 2679 .ext = CPU_CFG_OFFSET(ext_zks), 2680 .implied_multi_exts = { 2681 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2682 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed), 2683 CPU_CFG_OFFSET(ext_zksh), 2684 2685 RISCV_IMPLIED_EXTS_RULE_END 2686 }, 2687 }; 2688 2689 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = { 2690 .ext = CPU_CFG_OFFSET(ext_zvbb), 2691 .implied_multi_exts = { 2692 CPU_CFG_OFFSET(ext_zvkb), 2693 2694 RISCV_IMPLIED_EXTS_RULE_END 2695 }, 2696 }; 2697 2698 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = { 2699 .ext = CPU_CFG_OFFSET(ext_zve32f), 2700 .implied_misa_exts = RVF, 2701 .implied_multi_exts = { 2702 CPU_CFG_OFFSET(ext_zve32x), 2703 2704 RISCV_IMPLIED_EXTS_RULE_END 2705 }, 2706 }; 2707 2708 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = { 2709 .ext = CPU_CFG_OFFSET(ext_zve32x), 2710 .implied_multi_exts = { 2711 CPU_CFG_OFFSET(ext_zicsr), 2712 2713 RISCV_IMPLIED_EXTS_RULE_END 2714 }, 2715 }; 2716 2717 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = { 2718 .ext = CPU_CFG_OFFSET(ext_zve64d), 2719 .implied_misa_exts = RVD, 2720 .implied_multi_exts = { 2721 CPU_CFG_OFFSET(ext_zve64f), 2722 2723 RISCV_IMPLIED_EXTS_RULE_END 2724 }, 2725 }; 2726 2727 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = { 2728 .ext = CPU_CFG_OFFSET(ext_zve64f), 2729 .implied_misa_exts = RVF, 2730 .implied_multi_exts = { 2731 CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x), 2732 2733 RISCV_IMPLIED_EXTS_RULE_END 2734 }, 2735 }; 2736 2737 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = { 2738 .ext = CPU_CFG_OFFSET(ext_zve64x), 2739 .implied_multi_exts = { 2740 CPU_CFG_OFFSET(ext_zve32x), 2741 2742 RISCV_IMPLIED_EXTS_RULE_END 2743 }, 2744 }; 2745 2746 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = { 2747 .ext = CPU_CFG_OFFSET(ext_zvfbfmin), 2748 .implied_multi_exts = { 2749 CPU_CFG_OFFSET(ext_zve32f), 2750 2751 RISCV_IMPLIED_EXTS_RULE_END 2752 }, 2753 }; 2754 2755 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = { 2756 .ext = CPU_CFG_OFFSET(ext_zvfbfwma), 2757 .implied_multi_exts = { 2758 CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin), 2759 2760 RISCV_IMPLIED_EXTS_RULE_END 2761 }, 2762 }; 2763 2764 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = { 2765 .ext = CPU_CFG_OFFSET(ext_zvfh), 2766 .implied_multi_exts = { 2767 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin), 2768 2769 RISCV_IMPLIED_EXTS_RULE_END 2770 }, 2771 }; 2772 2773 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = { 2774 .ext = CPU_CFG_OFFSET(ext_zvfhmin), 2775 .implied_multi_exts = { 2776 CPU_CFG_OFFSET(ext_zve32f), 2777 2778 RISCV_IMPLIED_EXTS_RULE_END 2779 }, 2780 }; 2781 2782 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = { 2783 .ext = CPU_CFG_OFFSET(ext_zvkn), 2784 .implied_multi_exts = { 2785 CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb), 2786 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2787 2788 RISCV_IMPLIED_EXTS_RULE_END 2789 }, 2790 }; 2791 2792 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = { 2793 .ext = CPU_CFG_OFFSET(ext_zvknc), 2794 .implied_multi_exts = { 2795 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc), 2796 2797 RISCV_IMPLIED_EXTS_RULE_END 2798 }, 2799 }; 2800 2801 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = { 2802 .ext = CPU_CFG_OFFSET(ext_zvkng), 2803 .implied_multi_exts = { 2804 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg), 2805 2806 RISCV_IMPLIED_EXTS_RULE_END 2807 }, 2808 }; 2809 2810 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = { 2811 .ext = CPU_CFG_OFFSET(ext_zvknhb), 2812 .implied_multi_exts = { 2813 CPU_CFG_OFFSET(ext_zve64x), 2814 2815 RISCV_IMPLIED_EXTS_RULE_END 2816 }, 2817 }; 2818 2819 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = { 2820 .ext = CPU_CFG_OFFSET(ext_zvks), 2821 .implied_multi_exts = { 2822 CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh), 2823 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2824 2825 RISCV_IMPLIED_EXTS_RULE_END 2826 }, 2827 }; 2828 2829 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = { 2830 .ext = CPU_CFG_OFFSET(ext_zvksc), 2831 .implied_multi_exts = { 2832 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc), 2833 2834 RISCV_IMPLIED_EXTS_RULE_END 2835 }, 2836 }; 2837 2838 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = { 2839 .ext = CPU_CFG_OFFSET(ext_zvksg), 2840 .implied_multi_exts = { 2841 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg), 2842 2843 RISCV_IMPLIED_EXTS_RULE_END 2844 }, 2845 }; 2846 2847 static RISCVCPUImpliedExtsRule SSCFG_IMPLIED = { 2848 .ext = CPU_CFG_OFFSET(ext_ssccfg), 2849 .implied_multi_exts = { 2850 CPU_CFG_OFFSET(ext_smcsrind), CPU_CFG_OFFSET(ext_sscsrind), 2851 CPU_CFG_OFFSET(ext_smcdeleg), 2852 2853 RISCV_IMPLIED_EXTS_RULE_END 2854 }, 2855 }; 2856 2857 static RISCVCPUImpliedExtsRule SUPM_IMPLIED = { 2858 .ext = CPU_CFG_OFFSET(ext_supm), 2859 .implied_multi_exts = { 2860 CPU_CFG_OFFSET(ext_ssnpm), CPU_CFG_OFFSET(ext_smnpm), 2861 2862 RISCV_IMPLIED_EXTS_RULE_END 2863 }, 2864 }; 2865 2866 static RISCVCPUImpliedExtsRule SSPM_IMPLIED = { 2867 .ext = CPU_CFG_OFFSET(ext_sspm), 2868 .implied_multi_exts = { 2869 CPU_CFG_OFFSET(ext_smnpm), 2870 2871 RISCV_IMPLIED_EXTS_RULE_END 2872 }, 2873 }; 2874 2875 static RISCVCPUImpliedExtsRule SMCTR_IMPLIED = { 2876 .ext = CPU_CFG_OFFSET(ext_smctr), 2877 .implied_misa_exts = RVS, 2878 .implied_multi_exts = { 2879 CPU_CFG_OFFSET(ext_sscsrind), 2880 2881 RISCV_IMPLIED_EXTS_RULE_END 2882 }, 2883 }; 2884 2885 static RISCVCPUImpliedExtsRule SSCTR_IMPLIED = { 2886 .ext = CPU_CFG_OFFSET(ext_ssctr), 2887 .implied_misa_exts = RVS, 2888 .implied_multi_exts = { 2889 CPU_CFG_OFFSET(ext_sscsrind), 2890 2891 RISCV_IMPLIED_EXTS_RULE_END 2892 }, 2893 }; 2894 2895 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = { 2896 &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED, 2897 &RVM_IMPLIED, &RVV_IMPLIED, NULL 2898 }; 2899 2900 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = { 2901 &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED, 2902 &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED, 2903 &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED, 2904 &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED, 2905 &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED, 2906 &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED, 2907 &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED, 2908 &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED, 2909 &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED, 2910 &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED, 2911 &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED, 2912 &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, &SSCFG_IMPLIED, 2913 &SUPM_IMPLIED, &SSPM_IMPLIED, &SMCTR_IMPLIED, &SSCTR_IMPLIED, 2914 NULL 2915 }; 2916 2917 static const Property riscv_cpu_properties[] = { 2918 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 2919 2920 {.name = "pmu-mask", .info = &prop_pmu_mask}, 2921 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 2922 2923 {.name = "mmu", .info = &prop_mmu}, 2924 {.name = "pmp", .info = &prop_pmp}, 2925 2926 {.name = "priv_spec", .info = &prop_priv_spec}, 2927 {.name = "vext_spec", .info = &prop_vext_spec}, 2928 2929 {.name = "vlen", .info = &prop_vlen}, 2930 {.name = "elen", .info = &prop_elen}, 2931 2932 {.name = "cbom_blocksize", .info = &prop_cbom_blksize}, 2933 {.name = "cbop_blocksize", .info = &prop_cbop_blksize}, 2934 {.name = "cboz_blocksize", .info = &prop_cboz_blksize}, 2935 2936 {.name = "mvendorid", .info = &prop_mvendorid}, 2937 {.name = "mimpid", .info = &prop_mimpid}, 2938 {.name = "marchid", .info = &prop_marchid}, 2939 2940 #ifndef CONFIG_USER_ONLY 2941 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 2942 DEFINE_PROP_UINT64("rnmi-interrupt-vector", RISCVCPU, env.rnmi_irqvec, 2943 DEFAULT_RNMI_IRQVEC), 2944 DEFINE_PROP_UINT64("rnmi-exception-vector", RISCVCPU, env.rnmi_excpvec, 2945 DEFAULT_RNMI_EXCPVEC), 2946 #endif 2947 2948 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 2949 2950 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 2951 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 2952 DEFINE_PROP_BOOL("rvv_vl_half_avl", RISCVCPU, cfg.rvv_vl_half_avl, false), 2953 2954 /* 2955 * write_misa() is marked as experimental for now so mark 2956 * it with -x and default to 'false'. 2957 */ 2958 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 2959 }; 2960 2961 #if defined(TARGET_RISCV64) 2962 static void rva22u64_profile_cpu_init(Object *obj) 2963 { 2964 rv64i_bare_cpu_init(obj); 2965 2966 RVA22U64.enabled = true; 2967 } 2968 2969 static void rva22s64_profile_cpu_init(Object *obj) 2970 { 2971 rv64i_bare_cpu_init(obj); 2972 2973 RVA22S64.enabled = true; 2974 } 2975 2976 static void rva23u64_profile_cpu_init(Object *obj) 2977 { 2978 rv64i_bare_cpu_init(obj); 2979 2980 RVA23U64.enabled = true; 2981 } 2982 2983 static void rva23s64_profile_cpu_init(Object *obj) 2984 { 2985 rv64i_bare_cpu_init(obj); 2986 2987 RVA23S64.enabled = true; 2988 } 2989 #endif 2990 2991 static const gchar *riscv_gdb_arch_name(CPUState *cs) 2992 { 2993 RISCVCPU *cpu = RISCV_CPU(cs); 2994 CPURISCVState *env = &cpu->env; 2995 2996 switch (riscv_cpu_mxl(env)) { 2997 case MXL_RV32: 2998 return "riscv:rv32"; 2999 case MXL_RV64: 3000 case MXL_RV128: 3001 return "riscv:rv64"; 3002 default: 3003 g_assert_not_reached(); 3004 } 3005 } 3006 3007 #ifndef CONFIG_USER_ONLY 3008 static int64_t riscv_get_arch_id(CPUState *cs) 3009 { 3010 RISCVCPU *cpu = RISCV_CPU(cs); 3011 3012 return cpu->env.mhartid; 3013 } 3014 3015 #include "hw/core/sysemu-cpu-ops.h" 3016 3017 static const struct SysemuCPUOps riscv_sysemu_ops = { 3018 .has_work = riscv_cpu_has_work, 3019 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 3020 .write_elf64_note = riscv_cpu_write_elf64_note, 3021 .write_elf32_note = riscv_cpu_write_elf32_note, 3022 .legacy_vmsd = &vmstate_riscv_cpu, 3023 }; 3024 #endif 3025 3026 static void riscv_cpu_common_class_init(ObjectClass *c, const void *data) 3027 { 3028 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 3029 CPUClass *cc = CPU_CLASS(c); 3030 DeviceClass *dc = DEVICE_CLASS(c); 3031 ResettableClass *rc = RESETTABLE_CLASS(c); 3032 3033 device_class_set_parent_realize(dc, riscv_cpu_realize, 3034 &mcc->parent_realize); 3035 3036 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 3037 &mcc->parent_phases); 3038 3039 cc->class_by_name = riscv_cpu_class_by_name; 3040 cc->dump_state = riscv_cpu_dump_state; 3041 cc->set_pc = riscv_cpu_set_pc; 3042 cc->get_pc = riscv_cpu_get_pc; 3043 cc->gdb_read_register = riscv_cpu_gdb_read_register; 3044 cc->gdb_write_register = riscv_cpu_gdb_write_register; 3045 cc->gdb_stop_before_watchpoint = true; 3046 cc->disas_set_info = riscv_cpu_disas_set_info; 3047 #ifndef CONFIG_USER_ONLY 3048 cc->sysemu_ops = &riscv_sysemu_ops; 3049 cc->get_arch_id = riscv_get_arch_id; 3050 #endif 3051 cc->gdb_arch_name = riscv_gdb_arch_name; 3052 #ifdef CONFIG_TCG 3053 cc->tcg_ops = &riscv_tcg_ops; 3054 #endif /* CONFIG_TCG */ 3055 3056 device_class_set_props(dc, riscv_cpu_properties); 3057 } 3058 3059 static void riscv_cpu_class_init(ObjectClass *c, const void *data) 3060 { 3061 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 3062 3063 mcc->misa_mxl_max = (RISCVMXL)GPOINTER_TO_UINT(data); 3064 riscv_cpu_validate_misa_mxl(mcc); 3065 } 3066 3067 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 3068 int max_str_len) 3069 { 3070 const RISCVIsaExtData *edata; 3071 char *old = *isa_str; 3072 char *new = *isa_str; 3073 3074 for (edata = isa_edata_arr; edata && edata->name; edata++) { 3075 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 3076 new = g_strconcat(old, "_", edata->name, NULL); 3077 g_free(old); 3078 old = new; 3079 } 3080 } 3081 3082 *isa_str = new; 3083 } 3084 3085 char *riscv_isa_string(RISCVCPU *cpu) 3086 { 3087 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 3088 int i; 3089 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 3090 char *isa_str = g_new(char, maxlen); 3091 int xlen = riscv_cpu_max_xlen(mcc); 3092 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen); 3093 3094 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 3095 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 3096 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 3097 } 3098 } 3099 *p = '\0'; 3100 if (!cpu->cfg.short_isa_string) { 3101 riscv_isa_string_ext(cpu, &isa_str, maxlen); 3102 } 3103 return isa_str; 3104 } 3105 3106 #ifndef CONFIG_USER_ONLY 3107 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count) 3108 { 3109 int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr); 3110 char **extensions = g_new(char *, maxlen); 3111 3112 for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 3113 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 3114 extensions[*count] = g_new(char, 2); 3115 snprintf(extensions[*count], 2, "%c", 3116 qemu_tolower(riscv_single_letter_exts[i])); 3117 (*count)++; 3118 } 3119 } 3120 3121 for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) { 3122 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 3123 extensions[*count] = g_strdup(edata->name); 3124 (*count)++; 3125 } 3126 } 3127 3128 return extensions; 3129 } 3130 3131 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename) 3132 { 3133 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 3134 const size_t maxlen = sizeof("rv128i"); 3135 g_autofree char *isa_base = g_new(char, maxlen); 3136 g_autofree char *riscv_isa; 3137 char **isa_extensions; 3138 int count = 0; 3139 int xlen = riscv_cpu_max_xlen(mcc); 3140 3141 riscv_isa = riscv_isa_string(cpu); 3142 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa); 3143 3144 snprintf(isa_base, maxlen, "rv%di", xlen); 3145 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base); 3146 3147 isa_extensions = riscv_isa_extensions_list(cpu, &count); 3148 qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions", 3149 isa_extensions, count); 3150 3151 for (int i = 0; i < count; i++) { 3152 g_free(isa_extensions[i]); 3153 } 3154 3155 g_free(isa_extensions); 3156 } 3157 #endif 3158 3159 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \ 3160 { \ 3161 .name = (type_name), \ 3162 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 3163 .instance_init = (initfn), \ 3164 .class_init = riscv_cpu_class_init, \ 3165 .class_data = GUINT_TO_POINTER(misa_mxl_max) \ 3166 } 3167 3168 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \ 3169 { \ 3170 .name = (type_name), \ 3171 .parent = TYPE_RISCV_VENDOR_CPU, \ 3172 .instance_init = (initfn), \ 3173 .class_init = riscv_cpu_class_init, \ 3174 .class_data = GUINT_TO_POINTER(misa_mxl_max) \ 3175 } 3176 3177 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \ 3178 { \ 3179 .name = (type_name), \ 3180 .parent = TYPE_RISCV_BARE_CPU, \ 3181 .instance_init = (initfn), \ 3182 .class_init = riscv_cpu_class_init, \ 3183 .class_data = GUINT_TO_POINTER(misa_mxl_max) \ 3184 } 3185 3186 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \ 3187 { \ 3188 .name = (type_name), \ 3189 .parent = TYPE_RISCV_BARE_CPU, \ 3190 .instance_init = (initfn), \ 3191 .class_init = riscv_cpu_class_init, \ 3192 .class_data = GUINT_TO_POINTER(misa_mxl_max) \ 3193 } 3194 3195 static const TypeInfo riscv_cpu_type_infos[] = { 3196 { 3197 .name = TYPE_RISCV_CPU, 3198 .parent = TYPE_CPU, 3199 .instance_size = sizeof(RISCVCPU), 3200 .instance_align = __alignof(RISCVCPU), 3201 .instance_init = riscv_cpu_init, 3202 .instance_post_init = riscv_cpu_post_init, 3203 .abstract = true, 3204 .class_size = sizeof(RISCVCPUClass), 3205 .class_init = riscv_cpu_common_class_init, 3206 }, 3207 { 3208 .name = TYPE_RISCV_DYNAMIC_CPU, 3209 .parent = TYPE_RISCV_CPU, 3210 .abstract = true, 3211 }, 3212 { 3213 .name = TYPE_RISCV_VENDOR_CPU, 3214 .parent = TYPE_RISCV_CPU, 3215 .abstract = true, 3216 }, 3217 { 3218 .name = TYPE_RISCV_BARE_CPU, 3219 .parent = TYPE_RISCV_CPU, 3220 .instance_init = riscv_bare_cpu_init, 3221 .abstract = true, 3222 }, 3223 #if defined(TARGET_RISCV32) 3224 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), 3225 #elif defined(TARGET_RISCV64) 3226 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), 3227 #endif 3228 3229 #if defined(TARGET_RISCV32) || \ 3230 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 3231 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), 3232 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), 3233 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), 3234 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init), 3235 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), 3236 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init), 3237 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init), 3238 #endif 3239 3240 #if (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 3241 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX32, MXL_RV32, riscv_max_cpu_init), 3242 #endif 3243 3244 #if defined(TARGET_RISCV64) 3245 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), 3246 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), 3247 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), 3248 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init), 3249 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init), 3250 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_TT_ASCALON, MXL_RV64, rv64_tt_ascalon_cpu_init), 3251 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init), 3252 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_XIANGSHAN_NANHU, 3253 MXL_RV64, rv64_xiangshan_nanhu_cpu_init), 3254 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) 3255 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init), 3256 #endif /* CONFIG_TCG && !CONFIG_USER_ONLY */ 3257 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init), 3258 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init), 3259 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init), 3260 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init), 3261 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23U64, MXL_RV64, rva23u64_profile_cpu_init), 3262 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23S64, MXL_RV64, rva23s64_profile_cpu_init), 3263 #endif /* TARGET_RISCV64 */ 3264 }; 3265 3266 DEFINE_TYPES(riscv_cpu_type_infos) 3267