1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "qapi/error.h" 28 #include "qapi/visitor.h" 29 #include "qemu/error-report.h" 30 #include "hw/qdev-properties.h" 31 #include "hw/core/qdev-prop-internal.h" 32 #include "migration/vmstate.h" 33 #include "fpu/softfloat-helpers.h" 34 #include "system/device_tree.h" 35 #include "system/kvm.h" 36 #include "system/tcg.h" 37 #include "kvm/kvm_riscv.h" 38 #include "tcg/tcg-cpu.h" 39 #include "tcg/tcg.h" 40 41 /* RISC-V CPU definitions */ 42 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH"; 43 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 44 RVC, RVS, RVU, RVH, RVG, RVB, 0}; 45 46 /* 47 * From vector_helper.c 48 * Note that vector data is stored in host-endian 64-bit chunks, 49 * so addressing bytes needs a host-endian fixup. 50 */ 51 #if HOST_BIG_ENDIAN 52 #define BYTE(x) ((x) ^ 7) 53 #else 54 #define BYTE(x) (x) 55 #endif 56 57 bool riscv_cpu_is_32bit(RISCVCPU *cpu) 58 { 59 return riscv_cpu_mxl(&cpu->env) == MXL_RV32; 60 } 61 62 /* Hash that stores general user set numeric options */ 63 static GHashTable *general_user_opts; 64 65 static void cpu_option_add_user_setting(const char *optname, uint32_t value) 66 { 67 g_hash_table_insert(general_user_opts, (gpointer)optname, 68 GUINT_TO_POINTER(value)); 69 } 70 71 bool riscv_cpu_option_set(const char *optname) 72 { 73 return g_hash_table_contains(general_user_opts, optname); 74 } 75 76 static void riscv_cpu_cfg_merge(RISCVCPUConfig *dest, const RISCVCPUConfig *src) 77 { 78 #define BOOL_FIELD(x) dest->x |= src->x; 79 #define TYPED_FIELD(type, x, default_) if (src->x != default_) dest->x = src->x; 80 #include "cpu_cfg_fields.h.inc" 81 } 82 83 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 84 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 85 86 /* 87 * Here are the ordering rules of extension naming defined by RISC-V 88 * specification : 89 * 1. All extensions should be separated from other multi-letter extensions 90 * by an underscore. 91 * 2. The first letter following the 'Z' conventionally indicates the most 92 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 93 * If multiple 'Z' extensions are named, they should be ordered first 94 * by category, then alphabetically within a category. 95 * 3. Standard supervisor-level extensions (starts with 'S') should be 96 * listed after standard unprivileged extensions. If multiple 97 * supervisor-level extensions are listed, they should be ordered 98 * alphabetically. 99 * 4. Non-standard extensions (starts with 'X') must be listed after all 100 * standard extensions. They must be separated from other multi-letter 101 * extensions by an underscore. 102 * 103 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 104 * instead. 105 */ 106 const RISCVIsaExtData isa_edata_arr[] = { 107 ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b), 108 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 109 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop), 110 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 111 ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11), 112 ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11), 113 ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11), 114 ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, ext_ziccrse), 115 ISA_EXT_DATA_ENTRY(zicfilp, PRIV_VERSION_1_12_0, ext_zicfilp), 116 ISA_EXT_DATA_ENTRY(zicfiss, PRIV_VERSION_1_13_0, ext_zicfiss), 117 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 118 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 119 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 120 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 121 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 122 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 123 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 124 ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop), 125 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 126 ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_12), 127 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), 128 ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha), 129 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), 130 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), 131 ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b), 132 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 133 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 134 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 135 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 136 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 137 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 138 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 139 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 140 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 141 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 142 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 143 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 144 ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop), 145 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 146 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 147 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 148 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 149 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 150 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 151 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 152 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 153 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 154 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 155 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 156 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 157 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 158 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 159 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 160 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 161 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 162 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 163 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 164 ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso), 165 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 166 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 167 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 168 ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x), 169 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 170 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 171 ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x), 172 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 173 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 174 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 175 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 176 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 177 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 178 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 179 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 180 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 181 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 182 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 183 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 184 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 185 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 186 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 187 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 188 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 189 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 190 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 191 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 192 ISA_EXT_DATA_ENTRY(sdtrig, PRIV_VERSION_1_12_0, debug), 193 ISA_EXT_DATA_ENTRY(shcounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 194 ISA_EXT_DATA_ENTRY(sha, PRIV_VERSION_1_12_0, ext_sha), 195 ISA_EXT_DATA_ENTRY(shgatpa, PRIV_VERSION_1_12_0, has_priv_1_12), 196 ISA_EXT_DATA_ENTRY(shtvala, PRIV_VERSION_1_12_0, has_priv_1_12), 197 ISA_EXT_DATA_ENTRY(shvsatpa, PRIV_VERSION_1_12_0, has_priv_1_12), 198 ISA_EXT_DATA_ENTRY(shvstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 199 ISA_EXT_DATA_ENTRY(shvstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 200 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 201 ISA_EXT_DATA_ENTRY(smcdeleg, PRIV_VERSION_1_13_0, ext_smcdeleg), 202 ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf), 203 ISA_EXT_DATA_ENTRY(smcsrind, PRIV_VERSION_1_13_0, ext_smcsrind), 204 ISA_EXT_DATA_ENTRY(smdbltrp, PRIV_VERSION_1_13_0, ext_smdbltrp), 205 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 206 ISA_EXT_DATA_ENTRY(smrnmi, PRIV_VERSION_1_12_0, ext_smrnmi), 207 ISA_EXT_DATA_ENTRY(smmpm, PRIV_VERSION_1_13_0, ext_smmpm), 208 ISA_EXT_DATA_ENTRY(smnpm, PRIV_VERSION_1_13_0, ext_smnpm), 209 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 210 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 211 ISA_EXT_DATA_ENTRY(ssccfg, PRIV_VERSION_1_13_0, ext_ssccfg), 212 ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11), 213 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 214 ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 215 ISA_EXT_DATA_ENTRY(sscsrind, PRIV_VERSION_1_12_0, ext_sscsrind), 216 ISA_EXT_DATA_ENTRY(ssdbltrp, PRIV_VERSION_1_13_0, ext_ssdbltrp), 217 ISA_EXT_DATA_ENTRY(ssnpm, PRIV_VERSION_1_13_0, ext_ssnpm), 218 ISA_EXT_DATA_ENTRY(sspm, PRIV_VERSION_1_13_0, ext_sspm), 219 ISA_EXT_DATA_ENTRY(ssstateen, PRIV_VERSION_1_12_0, ext_ssstateen), 220 ISA_EXT_DATA_ENTRY(ssstrict, PRIV_VERSION_1_12_0, has_priv_1_12), 221 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 222 ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 223 ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 224 ISA_EXT_DATA_ENTRY(ssu64xl, PRIV_VERSION_1_12_0, has_priv_1_12), 225 ISA_EXT_DATA_ENTRY(supm, PRIV_VERSION_1_13_0, ext_supm), 226 ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade), 227 ISA_EXT_DATA_ENTRY(smctr, PRIV_VERSION_1_12_0, ext_smctr), 228 ISA_EXT_DATA_ENTRY(ssctr, PRIV_VERSION_1_12_0, ext_ssctr), 229 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 230 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 231 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 232 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 233 ISA_EXT_DATA_ENTRY(svukte, PRIV_VERSION_1_13_0, ext_svukte), 234 ISA_EXT_DATA_ENTRY(svvptc, PRIV_VERSION_1_13_0, ext_svvptc), 235 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 236 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 237 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 238 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 239 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 240 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 241 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 242 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 243 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 244 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 245 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 246 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 247 248 { }, 249 }; 250 251 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 252 { 253 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 254 255 return *ext_enabled; 256 } 257 258 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 259 { 260 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 261 262 *ext_enabled = en; 263 } 264 265 bool riscv_cpu_is_vendor(Object *cpu_obj) 266 { 267 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; 268 } 269 270 const char * const riscv_int_regnames[] = { 271 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 272 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 273 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 274 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 275 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 276 }; 277 278 const char * const riscv_int_regnamesh[] = { 279 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 280 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 281 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 282 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 283 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 284 "x30h/t5h", "x31h/t6h" 285 }; 286 287 const char * const riscv_fpr_regnames[] = { 288 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 289 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 290 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 291 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 292 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 293 "f30/ft10", "f31/ft11" 294 }; 295 296 const char * const riscv_rvv_regnames[] = { 297 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 298 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 299 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 300 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 301 "v28", "v29", "v30", "v31" 302 }; 303 304 static const char * const riscv_excp_names[] = { 305 "misaligned_fetch", 306 "fault_fetch", 307 "illegal_instruction", 308 "breakpoint", 309 "misaligned_load", 310 "fault_load", 311 "misaligned_store", 312 "fault_store", 313 "user_ecall", 314 "supervisor_ecall", 315 "hypervisor_ecall", 316 "machine_ecall", 317 "exec_page_fault", 318 "load_page_fault", 319 "reserved", 320 "store_page_fault", 321 "double_trap", 322 "reserved", 323 "reserved", 324 "reserved", 325 "guest_exec_page_fault", 326 "guest_load_page_fault", 327 "reserved", 328 "guest_store_page_fault", 329 }; 330 331 static const char * const riscv_intr_names[] = { 332 "u_software", 333 "s_software", 334 "vs_software", 335 "m_software", 336 "u_timer", 337 "s_timer", 338 "vs_timer", 339 "m_timer", 340 "u_external", 341 "s_external", 342 "vs_external", 343 "m_external", 344 "reserved", 345 "reserved", 346 "reserved", 347 "reserved" 348 }; 349 350 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 351 { 352 if (async) { 353 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 354 riscv_intr_names[cause] : "(unknown)"; 355 } else { 356 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 357 riscv_excp_names[cause] : "(unknown)"; 358 } 359 } 360 361 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) 362 { 363 env->misa_ext_mask = env->misa_ext = ext; 364 } 365 366 int riscv_cpu_max_xlen(RISCVCPUClass *mcc) 367 { 368 return 16 << mcc->def->misa_mxl_max; 369 } 370 371 #ifndef CONFIG_USER_ONLY 372 static uint8_t satp_mode_from_str(const char *satp_mode_str) 373 { 374 if (!strncmp(satp_mode_str, "mbare", 5)) { 375 return VM_1_10_MBARE; 376 } 377 378 if (!strncmp(satp_mode_str, "sv32", 4)) { 379 return VM_1_10_SV32; 380 } 381 382 if (!strncmp(satp_mode_str, "sv39", 4)) { 383 return VM_1_10_SV39; 384 } 385 386 if (!strncmp(satp_mode_str, "sv48", 4)) { 387 return VM_1_10_SV48; 388 } 389 390 if (!strncmp(satp_mode_str, "sv57", 4)) { 391 return VM_1_10_SV57; 392 } 393 394 if (!strncmp(satp_mode_str, "sv64", 4)) { 395 return VM_1_10_SV64; 396 } 397 398 g_assert_not_reached(); 399 } 400 401 static uint8_t satp_mode_max_from_map(uint32_t map) 402 { 403 /* 404 * 'map = 0' will make us return (31 - 32), which C will 405 * happily overflow to UINT_MAX. There's no good result to 406 * return if 'map = 0' (e.g. returning 0 will be ambiguous 407 * with the result for 'map = 1'). 408 * 409 * Assert out if map = 0. Callers will have to deal with 410 * it outside of this function. 411 */ 412 g_assert(map > 0); 413 414 /* map here has at least one bit set, so no problem with clz */ 415 return 31 - __builtin_clz(map); 416 } 417 418 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 419 { 420 if (is_32_bit) { 421 switch (satp_mode) { 422 case VM_1_10_SV32: 423 return "sv32"; 424 case VM_1_10_MBARE: 425 return "none"; 426 } 427 } else { 428 switch (satp_mode) { 429 case VM_1_10_SV64: 430 return "sv64"; 431 case VM_1_10_SV57: 432 return "sv57"; 433 case VM_1_10_SV48: 434 return "sv48"; 435 case VM_1_10_SV39: 436 return "sv39"; 437 case VM_1_10_MBARE: 438 return "none"; 439 } 440 } 441 442 g_assert_not_reached(); 443 } 444 445 static bool get_satp_mode_supported(RISCVCPU *cpu, uint16_t *supported) 446 { 447 bool rv32 = riscv_cpu_is_32bit(cpu); 448 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 449 int satp_mode = cpu->cfg.max_satp_mode; 450 451 if (satp_mode == -1) { 452 return false; 453 } 454 455 *supported = 0; 456 for (int i = 0; i <= satp_mode; ++i) { 457 if (valid_vm[i]) { 458 *supported |= (1 << i); 459 } 460 } 461 return true; 462 } 463 464 /* Set the satp mode to the max supported */ 465 static void set_satp_mode_default_map(RISCVCPU *cpu) 466 { 467 /* 468 * Bare CPUs do not default to the max available. 469 * Users must set a valid satp_mode in the command 470 * line. Otherwise, leave the existing max_satp_mode 471 * in place. 472 */ 473 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { 474 warn_report("No satp mode set. Defaulting to 'bare'"); 475 cpu->cfg.max_satp_mode = VM_1_10_MBARE; 476 } 477 } 478 #endif 479 480 #ifndef CONFIG_USER_ONLY 481 static void riscv_register_custom_csrs(RISCVCPU *cpu, const RISCVCSR *csr_list) 482 { 483 for (size_t i = 0; csr_list[i].csr_ops.name; i++) { 484 int csrno = csr_list[i].csrno; 485 const riscv_csr_operations *csr_ops = &csr_list[i].csr_ops; 486 if (!csr_list[i].insertion_test || csr_list[i].insertion_test(cpu)) { 487 riscv_set_csr_ops(csrno, csr_ops); 488 } 489 } 490 } 491 #endif 492 493 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 494 { 495 ObjectClass *oc; 496 char *typename; 497 char **cpuname; 498 499 cpuname = g_strsplit(cpu_model, ",", 1); 500 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 501 oc = object_class_by_name(typename); 502 g_strfreev(cpuname); 503 g_free(typename); 504 505 return oc; 506 } 507 508 char *riscv_cpu_get_name(RISCVCPU *cpu) 509 { 510 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 511 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 512 513 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 514 515 return cpu_model_from_type(typename); 516 } 517 518 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 519 { 520 RISCVCPU *cpu = RISCV_CPU(cs); 521 CPURISCVState *env = &cpu->env; 522 int i, j; 523 uint8_t *p; 524 525 #if !defined(CONFIG_USER_ONLY) 526 if (riscv_has_ext(env, RVH)) { 527 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 528 } 529 #endif 530 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 531 #ifndef CONFIG_USER_ONLY 532 { 533 static const int dump_csrs[] = { 534 CSR_MHARTID, 535 CSR_MSTATUS, 536 CSR_MSTATUSH, 537 /* 538 * CSR_SSTATUS is intentionally omitted here as its value 539 * can be figured out by looking at CSR_MSTATUS 540 */ 541 CSR_HSTATUS, 542 CSR_VSSTATUS, 543 CSR_MIP, 544 CSR_MIE, 545 CSR_MIDELEG, 546 CSR_HIDELEG, 547 CSR_MEDELEG, 548 CSR_HEDELEG, 549 CSR_MTVEC, 550 CSR_STVEC, 551 CSR_VSTVEC, 552 CSR_MEPC, 553 CSR_SEPC, 554 CSR_VSEPC, 555 CSR_MCAUSE, 556 CSR_SCAUSE, 557 CSR_VSCAUSE, 558 CSR_MTVAL, 559 CSR_STVAL, 560 CSR_HTVAL, 561 CSR_MTVAL2, 562 CSR_MSCRATCH, 563 CSR_SSCRATCH, 564 CSR_SATP, 565 }; 566 567 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 568 int csrno = dump_csrs[i]; 569 target_ulong val = 0; 570 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 571 572 /* 573 * Rely on the smode, hmode, etc, predicates within csr.c 574 * to do the filtering of the registers that are present. 575 */ 576 if (res == RISCV_EXCP_NONE) { 577 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 578 csr_ops[csrno].name, val); 579 } 580 } 581 } 582 #endif 583 584 for (i = 0; i < 32; i++) { 585 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 586 riscv_int_regnames[i], env->gpr[i]); 587 if ((i & 3) == 3) { 588 qemu_fprintf(f, "\n"); 589 } 590 } 591 if (flags & CPU_DUMP_FPU) { 592 target_ulong val = 0; 593 RISCVException res = riscv_csrrw_debug(env, CSR_FCSR, &val, 0, 0); 594 if (res == RISCV_EXCP_NONE) { 595 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 596 csr_ops[CSR_FCSR].name, val); 597 } 598 for (i = 0; i < 32; i++) { 599 qemu_fprintf(f, " %-8s %016" PRIx64, 600 riscv_fpr_regnames[i], env->fpr[i]); 601 if ((i & 3) == 3) { 602 qemu_fprintf(f, "\n"); 603 } 604 } 605 } 606 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 607 static const int dump_rvv_csrs[] = { 608 CSR_VSTART, 609 CSR_VXSAT, 610 CSR_VXRM, 611 CSR_VCSR, 612 CSR_VL, 613 CSR_VTYPE, 614 CSR_VLENB, 615 }; 616 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 617 int csrno = dump_rvv_csrs[i]; 618 target_ulong val = 0; 619 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 620 621 /* 622 * Rely on the smode, hmode, etc, predicates within csr.c 623 * to do the filtering of the registers that are present. 624 */ 625 if (res == RISCV_EXCP_NONE) { 626 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 627 csr_ops[csrno].name, val); 628 } 629 } 630 uint16_t vlenb = cpu->cfg.vlenb; 631 632 for (i = 0; i < 32; i++) { 633 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 634 p = (uint8_t *)env->vreg; 635 for (j = vlenb - 1 ; j >= 0; j--) { 636 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 637 } 638 qemu_fprintf(f, "\n"); 639 } 640 } 641 } 642 643 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 644 { 645 RISCVCPU *cpu = RISCV_CPU(cs); 646 CPURISCVState *env = &cpu->env; 647 648 if (env->xl == MXL_RV32) { 649 env->pc = (int32_t)value; 650 } else { 651 env->pc = value; 652 } 653 } 654 655 static vaddr riscv_cpu_get_pc(CPUState *cs) 656 { 657 RISCVCPU *cpu = RISCV_CPU(cs); 658 CPURISCVState *env = &cpu->env; 659 660 /* Match cpu_get_tb_cpu_state. */ 661 if (env->xl == MXL_RV32) { 662 return env->pc & UINT32_MAX; 663 } 664 return env->pc; 665 } 666 667 #ifndef CONFIG_USER_ONLY 668 bool riscv_cpu_has_work(CPUState *cs) 669 { 670 RISCVCPU *cpu = RISCV_CPU(cs); 671 CPURISCVState *env = &cpu->env; 672 /* 673 * Definition of the WFI instruction requires it to ignore the privilege 674 * mode and delegation registers, but respect individual enables 675 */ 676 return riscv_cpu_all_pending(env) != 0 || 677 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 678 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 679 } 680 #endif /* !CONFIG_USER_ONLY */ 681 682 static void riscv_cpu_reset_hold(Object *obj, ResetType type) 683 { 684 #ifndef CONFIG_USER_ONLY 685 uint8_t iprio; 686 int i, irq, rdzero; 687 #endif 688 CPUState *cs = CPU(obj); 689 RISCVCPU *cpu = RISCV_CPU(cs); 690 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 691 CPURISCVState *env = &cpu->env; 692 693 if (mcc->parent_phases.hold) { 694 mcc->parent_phases.hold(obj, type); 695 } 696 #ifndef CONFIG_USER_ONLY 697 env->misa_mxl = mcc->def->misa_mxl_max; 698 env->priv = PRV_M; 699 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 700 if (env->misa_mxl > MXL_RV32) { 701 /* 702 * The reset status of SXL/UXL is undefined, but mstatus is WARL 703 * and we must ensure that the value after init is valid for read. 704 */ 705 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 706 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 707 if (riscv_has_ext(env, RVH)) { 708 env->vsstatus = set_field(env->vsstatus, 709 MSTATUS64_SXL, env->misa_mxl); 710 env->vsstatus = set_field(env->vsstatus, 711 MSTATUS64_UXL, env->misa_mxl); 712 env->mstatus_hs = set_field(env->mstatus_hs, 713 MSTATUS64_SXL, env->misa_mxl); 714 env->mstatus_hs = set_field(env->mstatus_hs, 715 MSTATUS64_UXL, env->misa_mxl); 716 } 717 if (riscv_cpu_cfg(env)->ext_smdbltrp) { 718 env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 1); 719 } 720 } 721 env->mcause = 0; 722 env->miclaim = MIP_SGEIP; 723 env->pc = env->resetvec; 724 env->bins = 0; 725 env->two_stage_lookup = false; 726 727 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 728 (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ? 729 MENVCFG_ADUE : 0); 730 env->henvcfg = 0; 731 732 /* Initialized default priorities of local interrupts. */ 733 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 734 iprio = riscv_cpu_default_priority(i); 735 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 736 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 737 env->hviprio[i] = 0; 738 } 739 i = 0; 740 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 741 if (!rdzero) { 742 env->hviprio[irq] = env->miprio[irq]; 743 } 744 i++; 745 } 746 747 /* 748 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor 749 * extension is enabled. 750 */ 751 if (riscv_has_ext(env, RVH)) { 752 env->mideleg |= HS_MODE_INTERRUPTS; 753 } 754 755 /* 756 * Clear mseccfg and unlock all the PMP entries upon reset. 757 * This is allowed as per the priv and smepmp specifications 758 * and is needed to clear stale entries across reboots. 759 */ 760 if (riscv_cpu_cfg(env)->ext_smepmp) { 761 env->mseccfg = 0; 762 } 763 764 pmp_unlock_entries(env); 765 #else 766 env->priv = PRV_U; 767 env->senvcfg = 0; 768 env->menvcfg = 0; 769 #endif 770 771 /* on reset elp is clear */ 772 env->elp = false; 773 /* on reset ssp is set to 0 */ 774 env->ssp = 0; 775 776 env->xl = riscv_cpu_mxl(env); 777 cs->exception_index = RISCV_EXCP_NONE; 778 env->load_res = -1; 779 set_default_nan_mode(1, &env->fp_status); 780 /* Default NaN value: sign bit clear, frac msb set */ 781 set_float_default_nan_pattern(0b01000000, &env->fp_status); 782 env->vill = true; 783 784 #ifndef CONFIG_USER_ONLY 785 if (cpu->cfg.debug) { 786 riscv_trigger_reset_hold(env); 787 } 788 789 if (cpu->cfg.ext_smrnmi) { 790 env->rnmip = 0; 791 env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false); 792 } 793 794 if (kvm_enabled()) { 795 kvm_riscv_reset_vcpu(cpu); 796 } 797 #endif 798 } 799 800 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 801 { 802 RISCVCPU *cpu = RISCV_CPU(s); 803 CPURISCVState *env = &cpu->env; 804 info->target_info = &cpu->cfg; 805 806 /* 807 * A couple of bits in MSTATUS set the endianness: 808 * - MSTATUS_UBE (User-mode), 809 * - MSTATUS_SBE (Supervisor-mode), 810 * - MSTATUS_MBE (Machine-mode) 811 * but we don't implement that yet. 812 */ 813 info->endian = BFD_ENDIAN_LITTLE; 814 815 switch (env->xl) { 816 case MXL_RV32: 817 info->print_insn = print_insn_riscv32; 818 break; 819 case MXL_RV64: 820 info->print_insn = print_insn_riscv64; 821 break; 822 case MXL_RV128: 823 info->print_insn = print_insn_riscv128; 824 break; 825 default: 826 g_assert_not_reached(); 827 } 828 } 829 830 #ifndef CONFIG_USER_ONLY 831 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 832 { 833 bool rv32 = riscv_cpu_is_32bit(cpu); 834 uint16_t supported; 835 uint8_t satp_mode_map_max; 836 837 if (!get_satp_mode_supported(cpu, &supported)) { 838 /* The CPU wants the hypervisor to decide which satp mode to allow */ 839 return; 840 } 841 842 if (cpu->satp_modes.map == 0) { 843 if (cpu->satp_modes.init == 0) { 844 /* If unset by the user, we fallback to the default satp mode. */ 845 set_satp_mode_default_map(cpu); 846 } else { 847 /* 848 * Find the lowest level that was disabled and then enable the 849 * first valid level below which can be found in 850 * valid_vm_1_10_32/64. 851 */ 852 for (int i = 1; i < 16; ++i) { 853 if ((cpu->satp_modes.init & (1 << i)) && 854 supported & (1 << i)) { 855 for (int j = i - 1; j >= 0; --j) { 856 if (supported & (1 << j)) { 857 cpu->cfg.max_satp_mode = j; 858 return; 859 } 860 } 861 } 862 } 863 } 864 return; 865 } 866 867 satp_mode_map_max = satp_mode_max_from_map(cpu->satp_modes.map); 868 869 /* Make sure the user asked for a supported configuration (HW and qemu) */ 870 if (satp_mode_map_max > cpu->cfg.max_satp_mode) { 871 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 872 satp_mode_str(satp_mode_map_max, rv32), 873 satp_mode_str(cpu->cfg.max_satp_mode, rv32)); 874 return; 875 } 876 877 /* 878 * Make sure the user did not ask for an invalid configuration as per 879 * the specification. 880 */ 881 if (!rv32) { 882 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 883 if (!(cpu->satp_modes.map & (1 << i)) && 884 (cpu->satp_modes.init & (1 << i)) && 885 (supported & (1 << i))) { 886 error_setg(errp, "cannot disable %s satp mode if %s " 887 "is enabled", satp_mode_str(i, false), 888 satp_mode_str(satp_mode_map_max, false)); 889 return; 890 } 891 } 892 } 893 894 cpu->cfg.max_satp_mode = satp_mode_map_max; 895 } 896 #endif 897 898 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 899 { 900 Error *local_err = NULL; 901 902 #ifndef CONFIG_USER_ONLY 903 riscv_cpu_satp_mode_finalize(cpu, &local_err); 904 if (local_err != NULL) { 905 error_propagate(errp, local_err); 906 return; 907 } 908 #endif 909 910 if (tcg_enabled()) { 911 riscv_tcg_cpu_finalize_features(cpu, &local_err); 912 if (local_err != NULL) { 913 error_propagate(errp, local_err); 914 return; 915 } 916 riscv_tcg_cpu_finalize_dynamic_decoder(cpu); 917 } else if (kvm_enabled()) { 918 riscv_kvm_cpu_finalize_features(cpu, &local_err); 919 if (local_err != NULL) { 920 error_propagate(errp, local_err); 921 return; 922 } 923 } 924 } 925 926 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 927 { 928 CPUState *cs = CPU(dev); 929 RISCVCPU *cpu = RISCV_CPU(dev); 930 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 931 Error *local_err = NULL; 932 933 cpu_exec_realizefn(cs, &local_err); 934 if (local_err != NULL) { 935 error_propagate(errp, local_err); 936 return; 937 } 938 939 riscv_cpu_finalize_features(cpu, &local_err); 940 if (local_err != NULL) { 941 error_propagate(errp, local_err); 942 return; 943 } 944 945 riscv_cpu_register_gdb_regs_for_features(cs); 946 947 #ifndef CONFIG_USER_ONLY 948 if (cpu->cfg.debug) { 949 riscv_trigger_realize(&cpu->env); 950 } 951 #endif 952 953 qemu_init_vcpu(cs); 954 cpu_reset(cs); 955 956 mcc->parent_realize(dev, errp); 957 } 958 959 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 960 { 961 if (tcg_enabled()) { 962 return riscv_cpu_tcg_compatible(cpu); 963 } 964 965 return true; 966 } 967 968 #ifndef CONFIG_USER_ONLY 969 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 970 void *opaque, Error **errp) 971 { 972 RISCVSATPModes *satp_modes = opaque; 973 uint8_t satp = satp_mode_from_str(name); 974 bool value; 975 976 value = satp_modes->map & (1 << satp); 977 978 visit_type_bool(v, name, &value, errp); 979 } 980 981 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 982 void *opaque, Error **errp) 983 { 984 RISCVSATPModes *satp_modes = opaque; 985 uint8_t satp = satp_mode_from_str(name); 986 bool value; 987 988 if (!visit_type_bool(v, name, &value, errp)) { 989 return; 990 } 991 992 satp_modes->map = deposit32(satp_modes->map, satp, 1, value); 993 satp_modes->init |= 1 << satp; 994 } 995 996 void riscv_add_satp_mode_properties(Object *obj) 997 { 998 RISCVCPU *cpu = RISCV_CPU(obj); 999 1000 if (cpu->env.misa_mxl == MXL_RV32) { 1001 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1002 cpu_riscv_set_satp, NULL, &cpu->satp_modes); 1003 } else { 1004 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1005 cpu_riscv_set_satp, NULL, &cpu->satp_modes); 1006 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1007 cpu_riscv_set_satp, NULL, &cpu->satp_modes); 1008 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1009 cpu_riscv_set_satp, NULL, &cpu->satp_modes); 1010 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1011 cpu_riscv_set_satp, NULL, &cpu->satp_modes); 1012 } 1013 } 1014 1015 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1016 { 1017 RISCVCPU *cpu = RISCV_CPU(opaque); 1018 CPURISCVState *env = &cpu->env; 1019 1020 if (irq < IRQ_LOCAL_MAX) { 1021 switch (irq) { 1022 case IRQ_U_SOFT: 1023 case IRQ_S_SOFT: 1024 case IRQ_VS_SOFT: 1025 case IRQ_M_SOFT: 1026 case IRQ_U_TIMER: 1027 case IRQ_S_TIMER: 1028 case IRQ_VS_TIMER: 1029 case IRQ_M_TIMER: 1030 case IRQ_U_EXT: 1031 case IRQ_VS_EXT: 1032 case IRQ_M_EXT: 1033 if (kvm_enabled()) { 1034 kvm_riscv_set_irq(cpu, irq, level); 1035 } else { 1036 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1037 } 1038 break; 1039 case IRQ_S_EXT: 1040 if (kvm_enabled()) { 1041 kvm_riscv_set_irq(cpu, irq, level); 1042 } else { 1043 env->external_seip = level; 1044 riscv_cpu_update_mip(env, 1 << irq, 1045 BOOL_TO_MASK(level | env->software_seip)); 1046 } 1047 break; 1048 default: 1049 g_assert_not_reached(); 1050 } 1051 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1052 /* Require H-extension for handling guest local interrupts */ 1053 if (!riscv_has_ext(env, RVH)) { 1054 g_assert_not_reached(); 1055 } 1056 1057 /* Compute bit position in HGEIP CSR */ 1058 irq = irq - IRQ_LOCAL_MAX + 1; 1059 if (env->geilen < irq) { 1060 g_assert_not_reached(); 1061 } 1062 1063 /* Update HGEIP CSR */ 1064 env->hgeip &= ~((target_ulong)1 << irq); 1065 if (level) { 1066 env->hgeip |= (target_ulong)1 << irq; 1067 } 1068 1069 /* Update mip.SGEIP bit */ 1070 riscv_cpu_update_mip(env, MIP_SGEIP, 1071 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1072 } else { 1073 g_assert_not_reached(); 1074 } 1075 } 1076 1077 static void riscv_cpu_set_nmi(void *opaque, int irq, int level) 1078 { 1079 riscv_cpu_set_rnmi(RISCV_CPU(opaque), irq, level); 1080 } 1081 #endif /* CONFIG_USER_ONLY */ 1082 1083 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1084 { 1085 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1086 } 1087 1088 static void riscv_cpu_init(Object *obj) 1089 { 1090 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1091 RISCVCPU *cpu = RISCV_CPU(obj); 1092 CPURISCVState *env = &cpu->env; 1093 1094 env->misa_mxl = mcc->def->misa_mxl_max; 1095 1096 #ifndef CONFIG_USER_ONLY 1097 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1098 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1099 qdev_init_gpio_in_named(DEVICE(cpu), riscv_cpu_set_nmi, 1100 "riscv.cpu.rnmi", RNMI_MAX); 1101 #endif /* CONFIG_USER_ONLY */ 1102 1103 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal); 1104 1105 /* 1106 * The timer and performance counters extensions were supported 1107 * in QEMU before they were added as discrete extensions in the 1108 * ISA. To keep compatibility we'll always default them to 'true' 1109 * for all CPUs. Each accelerator will decide what to do when 1110 * users disable them. 1111 */ 1112 RISCV_CPU(obj)->cfg.ext_zicntr = !mcc->def->bare; 1113 RISCV_CPU(obj)->cfg.ext_zihpm = !mcc->def->bare; 1114 1115 /* Default values for non-bool cpu properties */ 1116 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); 1117 cpu->cfg.vlenb = 128 >> 3; 1118 cpu->cfg.elen = 64; 1119 cpu->cfg.cbom_blocksize = 64; 1120 cpu->cfg.cbop_blocksize = 64; 1121 cpu->cfg.cboz_blocksize = 64; 1122 cpu->cfg.pmp_regions = 16; 1123 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1124 cpu->cfg.max_satp_mode = -1; 1125 1126 if (mcc->def->profile) { 1127 mcc->def->profile->enabled = true; 1128 } 1129 1130 env->misa_ext_mask = env->misa_ext = mcc->def->misa_ext; 1131 riscv_cpu_cfg_merge(&cpu->cfg, &mcc->def->cfg); 1132 1133 if (mcc->def->priv_spec != RISCV_PROFILE_ATTR_UNUSED) { 1134 cpu->env.priv_ver = mcc->def->priv_spec; 1135 } 1136 if (mcc->def->vext_spec != RISCV_PROFILE_ATTR_UNUSED) { 1137 cpu->env.vext_ver = mcc->def->vext_spec; 1138 } 1139 #ifndef CONFIG_USER_ONLY 1140 if (mcc->def->custom_csrs) { 1141 riscv_register_custom_csrs(cpu, mcc->def->custom_csrs); 1142 } 1143 #endif 1144 1145 accel_cpu_instance_init(CPU(obj)); 1146 } 1147 1148 typedef struct misa_ext_info { 1149 const char *name; 1150 const char *description; 1151 } MISAExtInfo; 1152 1153 #define MISA_INFO_IDX(_bit) \ 1154 __builtin_ctz(_bit) 1155 1156 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1157 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1158 1159 static const MISAExtInfo misa_ext_info_arr[] = { 1160 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1161 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1162 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1163 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1164 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1165 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1166 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1167 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1168 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1169 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1170 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1171 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1172 MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)") 1173 }; 1174 1175 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) 1176 { 1177 CPUClass *cc = CPU_CLASS(mcc); 1178 1179 /* Validate that MISA_MXL is set properly. */ 1180 switch (mcc->def->misa_mxl_max) { 1181 #ifdef TARGET_RISCV64 1182 case MXL_RV64: 1183 case MXL_RV128: 1184 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1185 break; 1186 #endif 1187 case MXL_RV32: 1188 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1189 break; 1190 default: 1191 g_assert_not_reached(); 1192 } 1193 } 1194 1195 static int riscv_validate_misa_info_idx(uint32_t bit) 1196 { 1197 int idx; 1198 1199 /* 1200 * Our lowest valid input (RVA) is 1 and 1201 * __builtin_ctz() is UB with zero. 1202 */ 1203 g_assert(bit != 0); 1204 idx = MISA_INFO_IDX(bit); 1205 1206 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1207 return idx; 1208 } 1209 1210 const char *riscv_get_misa_ext_name(uint32_t bit) 1211 { 1212 int idx = riscv_validate_misa_info_idx(bit); 1213 const char *val = misa_ext_info_arr[idx].name; 1214 1215 g_assert(val != NULL); 1216 return val; 1217 } 1218 1219 const char *riscv_get_misa_ext_description(uint32_t bit) 1220 { 1221 int idx = riscv_validate_misa_info_idx(bit); 1222 const char *val = misa_ext_info_arr[idx].description; 1223 1224 g_assert(val != NULL); 1225 return val; 1226 } 1227 1228 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1229 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1230 .enabled = _defval} 1231 1232 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1233 /* Defaults for standard extensions */ 1234 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1235 MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false), 1236 MULTI_EXT_CFG_BOOL("smcsrind", ext_smcsrind, false), 1237 MULTI_EXT_CFG_BOOL("smcdeleg", ext_smcdeleg, false), 1238 MULTI_EXT_CFG_BOOL("sscsrind", ext_sscsrind, false), 1239 MULTI_EXT_CFG_BOOL("ssccfg", ext_ssccfg, false), 1240 MULTI_EXT_CFG_BOOL("smctr", ext_smctr, false), 1241 MULTI_EXT_CFG_BOOL("ssctr", ext_ssctr, false), 1242 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1243 MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false), 1244 MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false), 1245 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1246 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1247 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1248 MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false), 1249 MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false), 1250 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), 1251 MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false), 1252 MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false), 1253 MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), 1254 MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), 1255 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1256 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1257 MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false), 1258 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1259 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1260 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1261 MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false), 1262 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1263 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1264 MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false), 1265 MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false), 1266 MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false), 1267 MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false), 1268 MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false), 1269 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1270 MULTI_EXT_CFG_BOOL("ssnpm", ext_ssnpm, false), 1271 MULTI_EXT_CFG_BOOL("sspm", ext_sspm, false), 1272 MULTI_EXT_CFG_BOOL("supm", ext_supm, false), 1273 1274 MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false), 1275 MULTI_EXT_CFG_BOOL("smdbltrp", ext_smdbltrp, false), 1276 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1277 MULTI_EXT_CFG_BOOL("smrnmi", ext_smrnmi, false), 1278 MULTI_EXT_CFG_BOOL("smmpm", ext_smmpm, false), 1279 MULTI_EXT_CFG_BOOL("smnpm", ext_smnpm, false), 1280 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1281 MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false), 1282 MULTI_EXT_CFG_BOOL("ssdbltrp", ext_ssdbltrp, false), 1283 MULTI_EXT_CFG_BOOL("svade", ext_svade, false), 1284 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1285 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1286 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1287 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1288 MULTI_EXT_CFG_BOOL("svvptc", ext_svvptc, true), 1289 1290 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1291 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1292 1293 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1294 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1295 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1296 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1297 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1298 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1299 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1300 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1301 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1302 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1303 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1304 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1305 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1306 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1307 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1308 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1309 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1310 MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false), 1311 1312 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1313 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1314 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1315 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1316 1317 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1318 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true), 1319 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1320 1321 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1322 1323 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1324 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1325 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1326 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1327 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1328 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1329 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1330 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1331 1332 /* Vector cryptography extensions */ 1333 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1334 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1335 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false), 1336 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1337 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1338 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1339 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1340 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1341 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1342 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1343 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1344 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1345 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1346 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1347 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1348 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1349 1350 { }, 1351 }; 1352 1353 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1354 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1355 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1356 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1357 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1358 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1359 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1360 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1361 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1362 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1363 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1364 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1365 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1366 1367 { }, 1368 }; 1369 1370 /* These are experimental so mark with 'x-' */ 1371 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1372 MULTI_EXT_CFG_BOOL("x-svukte", ext_svukte, false), 1373 1374 { }, 1375 }; 1376 1377 /* 1378 * 'Named features' is the name we give to extensions that we 1379 * don't want to expose to users. They are either immutable 1380 * (always enabled/disable) or they'll vary depending on 1381 * the resulting CPU state. 1382 * 1383 * Some of them are always enabled depending on priv version 1384 * of the CPU and are declared directly in isa_edata_arr[]. 1385 * The ones listed here have special checks during finalize() 1386 * time and require their own flags like regular extensions. 1387 * See riscv_cpu_update_named_features() for more info. 1388 */ 1389 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { 1390 MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true), 1391 MULTI_EXT_CFG_BOOL("ssstateen", ext_ssstateen, true), 1392 MULTI_EXT_CFG_BOOL("sha", ext_sha, true), 1393 1394 /* 1395 * 'ziccrse' has its own flag because the KVM driver 1396 * wants to enable/disable it on its own accord. 1397 */ 1398 MULTI_EXT_CFG_BOOL("ziccrse", ext_ziccrse, true), 1399 1400 { }, 1401 }; 1402 1403 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname, 1404 Error **errp) 1405 { 1406 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1407 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'", 1408 cpuname, propname); 1409 } 1410 1411 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1412 void *opaque, Error **errp) 1413 { 1414 RISCVCPU *cpu = RISCV_CPU(obj); 1415 uint8_t pmu_num, curr_pmu_num; 1416 uint32_t pmu_mask; 1417 1418 visit_type_uint8(v, name, &pmu_num, errp); 1419 1420 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask); 1421 1422 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) { 1423 cpu_set_prop_err(cpu, name, errp); 1424 error_append_hint(errp, "Current '%s' val: %u\n", 1425 name, curr_pmu_num); 1426 return; 1427 } 1428 1429 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1430 error_setg(errp, "Number of counters exceeds maximum available"); 1431 return; 1432 } 1433 1434 if (pmu_num == 0) { 1435 pmu_mask = 0; 1436 } else { 1437 pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1438 } 1439 1440 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1441 cpu->cfg.pmu_mask = pmu_mask; 1442 cpu_option_add_user_setting("pmu-mask", pmu_mask); 1443 } 1444 1445 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1446 void *opaque, Error **errp) 1447 { 1448 RISCVCPU *cpu = RISCV_CPU(obj); 1449 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1450 1451 visit_type_uint8(v, name, &pmu_num, errp); 1452 } 1453 1454 static const PropertyInfo prop_pmu_num = { 1455 .type = "int8", 1456 .description = "pmu-num", 1457 .get = prop_pmu_num_get, 1458 .set = prop_pmu_num_set, 1459 }; 1460 1461 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name, 1462 void *opaque, Error **errp) 1463 { 1464 RISCVCPU *cpu = RISCV_CPU(obj); 1465 uint32_t value; 1466 uint8_t pmu_num; 1467 1468 visit_type_uint32(v, name, &value, errp); 1469 1470 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) { 1471 cpu_set_prop_err(cpu, name, errp); 1472 error_append_hint(errp, "Current '%s' val: %x\n", 1473 name, cpu->cfg.pmu_mask); 1474 return; 1475 } 1476 1477 pmu_num = ctpop32(value); 1478 1479 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1480 error_setg(errp, "Number of counters exceeds maximum available"); 1481 return; 1482 } 1483 1484 cpu_option_add_user_setting(name, value); 1485 cpu->cfg.pmu_mask = value; 1486 } 1487 1488 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name, 1489 void *opaque, Error **errp) 1490 { 1491 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask; 1492 1493 visit_type_uint8(v, name, &pmu_mask, errp); 1494 } 1495 1496 static const PropertyInfo prop_pmu_mask = { 1497 .type = "int8", 1498 .description = "pmu-mask", 1499 .get = prop_pmu_mask_get, 1500 .set = prop_pmu_mask_set, 1501 }; 1502 1503 static void prop_mmu_set(Object *obj, Visitor *v, const char *name, 1504 void *opaque, Error **errp) 1505 { 1506 RISCVCPU *cpu = RISCV_CPU(obj); 1507 bool value; 1508 1509 visit_type_bool(v, name, &value, errp); 1510 1511 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) { 1512 cpu_set_prop_err(cpu, "mmu", errp); 1513 return; 1514 } 1515 1516 cpu_option_add_user_setting(name, value); 1517 cpu->cfg.mmu = value; 1518 } 1519 1520 static void prop_mmu_get(Object *obj, Visitor *v, const char *name, 1521 void *opaque, Error **errp) 1522 { 1523 bool value = RISCV_CPU(obj)->cfg.mmu; 1524 1525 visit_type_bool(v, name, &value, errp); 1526 } 1527 1528 static const PropertyInfo prop_mmu = { 1529 .type = "bool", 1530 .description = "mmu", 1531 .get = prop_mmu_get, 1532 .set = prop_mmu_set, 1533 }; 1534 1535 static void prop_pmp_set(Object *obj, Visitor *v, const char *name, 1536 void *opaque, Error **errp) 1537 { 1538 RISCVCPU *cpu = RISCV_CPU(obj); 1539 bool value; 1540 1541 visit_type_bool(v, name, &value, errp); 1542 1543 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) { 1544 cpu_set_prop_err(cpu, name, errp); 1545 return; 1546 } 1547 1548 cpu_option_add_user_setting(name, value); 1549 cpu->cfg.pmp = value; 1550 } 1551 1552 static void prop_pmp_get(Object *obj, Visitor *v, const char *name, 1553 void *opaque, Error **errp) 1554 { 1555 bool value = RISCV_CPU(obj)->cfg.pmp; 1556 1557 visit_type_bool(v, name, &value, errp); 1558 } 1559 1560 static const PropertyInfo prop_pmp = { 1561 .type = "bool", 1562 .description = "pmp", 1563 .get = prop_pmp_get, 1564 .set = prop_pmp_set, 1565 }; 1566 1567 static void prop_num_pmp_regions_set(Object *obj, Visitor *v, const char *name, 1568 void *opaque, Error **errp) 1569 { 1570 RISCVCPU *cpu = RISCV_CPU(obj); 1571 uint8_t value; 1572 1573 visit_type_uint8(v, name, &value, errp); 1574 1575 if (cpu->cfg.pmp_regions != value && riscv_cpu_is_vendor(obj)) { 1576 cpu_set_prop_err(cpu, name, errp); 1577 return; 1578 } 1579 1580 if (cpu->env.priv_ver < PRIV_VERSION_1_12_0 && value > OLD_MAX_RISCV_PMPS) { 1581 error_setg(errp, "Number of PMP regions exceeds maximum available"); 1582 return; 1583 } else if (value > MAX_RISCV_PMPS) { 1584 error_setg(errp, "Number of PMP regions exceeds maximum available"); 1585 return; 1586 } 1587 1588 cpu_option_add_user_setting(name, value); 1589 cpu->cfg.pmp_regions = value; 1590 } 1591 1592 static void prop_num_pmp_regions_get(Object *obj, Visitor *v, const char *name, 1593 void *opaque, Error **errp) 1594 { 1595 uint8_t value = RISCV_CPU(obj)->cfg.pmp_regions; 1596 1597 visit_type_uint8(v, name, &value, errp); 1598 } 1599 1600 static const PropertyInfo prop_num_pmp_regions = { 1601 .type = "uint8", 1602 .description = "num-pmp-regions", 1603 .get = prop_num_pmp_regions_get, 1604 .set = prop_num_pmp_regions_set, 1605 }; 1606 1607 static int priv_spec_from_str(const char *priv_spec_str) 1608 { 1609 int priv_version = -1; 1610 1611 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) { 1612 priv_version = PRIV_VERSION_1_13_0; 1613 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) { 1614 priv_version = PRIV_VERSION_1_12_0; 1615 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) { 1616 priv_version = PRIV_VERSION_1_11_0; 1617 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) { 1618 priv_version = PRIV_VERSION_1_10_0; 1619 } 1620 1621 return priv_version; 1622 } 1623 1624 const char *priv_spec_to_str(int priv_version) 1625 { 1626 switch (priv_version) { 1627 case PRIV_VERSION_1_10_0: 1628 return PRIV_VER_1_10_0_STR; 1629 case PRIV_VERSION_1_11_0: 1630 return PRIV_VER_1_11_0_STR; 1631 case PRIV_VERSION_1_12_0: 1632 return PRIV_VER_1_12_0_STR; 1633 case PRIV_VERSION_1_13_0: 1634 return PRIV_VER_1_13_0_STR; 1635 default: 1636 return NULL; 1637 } 1638 } 1639 1640 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name, 1641 void *opaque, Error **errp) 1642 { 1643 RISCVCPU *cpu = RISCV_CPU(obj); 1644 g_autofree char *value = NULL; 1645 int priv_version = -1; 1646 1647 visit_type_str(v, name, &value, errp); 1648 1649 priv_version = priv_spec_from_str(value); 1650 if (priv_version < 0) { 1651 error_setg(errp, "Unsupported privilege spec version '%s'", value); 1652 return; 1653 } 1654 1655 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) { 1656 cpu_set_prop_err(cpu, name, errp); 1657 error_append_hint(errp, "Current '%s' val: %s\n", name, 1658 object_property_get_str(obj, name, NULL)); 1659 return; 1660 } 1661 1662 cpu_option_add_user_setting(name, priv_version); 1663 cpu->env.priv_ver = priv_version; 1664 } 1665 1666 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name, 1667 void *opaque, Error **errp) 1668 { 1669 RISCVCPU *cpu = RISCV_CPU(obj); 1670 const char *value = priv_spec_to_str(cpu->env.priv_ver); 1671 1672 visit_type_str(v, name, (char **)&value, errp); 1673 } 1674 1675 static const PropertyInfo prop_priv_spec = { 1676 .type = "str", 1677 .description = "priv_spec", 1678 /* FIXME enum? */ 1679 .get = prop_priv_spec_get, 1680 .set = prop_priv_spec_set, 1681 }; 1682 1683 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name, 1684 void *opaque, Error **errp) 1685 { 1686 RISCVCPU *cpu = RISCV_CPU(obj); 1687 g_autofree char *value = NULL; 1688 1689 visit_type_str(v, name, &value, errp); 1690 1691 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) { 1692 error_setg(errp, "Unsupported vector spec version '%s'", value); 1693 return; 1694 } 1695 1696 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0); 1697 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1698 } 1699 1700 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name, 1701 void *opaque, Error **errp) 1702 { 1703 const char *value = VEXT_VER_1_00_0_STR; 1704 1705 visit_type_str(v, name, (char **)&value, errp); 1706 } 1707 1708 static const PropertyInfo prop_vext_spec = { 1709 .type = "str", 1710 .description = "vext_spec", 1711 /* FIXME enum? */ 1712 .get = prop_vext_spec_get, 1713 .set = prop_vext_spec_set, 1714 }; 1715 1716 static void prop_vlen_set(Object *obj, Visitor *v, const char *name, 1717 void *opaque, Error **errp) 1718 { 1719 RISCVCPU *cpu = RISCV_CPU(obj); 1720 uint16_t cpu_vlen = cpu->cfg.vlenb << 3; 1721 uint16_t value; 1722 1723 if (!visit_type_uint16(v, name, &value, errp)) { 1724 return; 1725 } 1726 1727 if (!is_power_of_2(value)) { 1728 error_setg(errp, "Vector extension VLEN must be power of 2"); 1729 return; 1730 } 1731 1732 if (value != cpu_vlen && riscv_cpu_is_vendor(obj)) { 1733 cpu_set_prop_err(cpu, name, errp); 1734 error_append_hint(errp, "Current '%s' val: %u\n", 1735 name, cpu_vlen); 1736 return; 1737 } 1738 1739 cpu_option_add_user_setting(name, value); 1740 cpu->cfg.vlenb = value >> 3; 1741 } 1742 1743 static void prop_vlen_get(Object *obj, Visitor *v, const char *name, 1744 void *opaque, Error **errp) 1745 { 1746 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3; 1747 1748 visit_type_uint16(v, name, &value, errp); 1749 } 1750 1751 static const PropertyInfo prop_vlen = { 1752 .type = "uint16", 1753 .description = "vlen", 1754 .get = prop_vlen_get, 1755 .set = prop_vlen_set, 1756 }; 1757 1758 static void prop_elen_set(Object *obj, Visitor *v, const char *name, 1759 void *opaque, Error **errp) 1760 { 1761 RISCVCPU *cpu = RISCV_CPU(obj); 1762 uint16_t value; 1763 1764 if (!visit_type_uint16(v, name, &value, errp)) { 1765 return; 1766 } 1767 1768 if (!is_power_of_2(value)) { 1769 error_setg(errp, "Vector extension ELEN must be power of 2"); 1770 return; 1771 } 1772 1773 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) { 1774 cpu_set_prop_err(cpu, name, errp); 1775 error_append_hint(errp, "Current '%s' val: %u\n", 1776 name, cpu->cfg.elen); 1777 return; 1778 } 1779 1780 cpu_option_add_user_setting(name, value); 1781 cpu->cfg.elen = value; 1782 } 1783 1784 static void prop_elen_get(Object *obj, Visitor *v, const char *name, 1785 void *opaque, Error **errp) 1786 { 1787 uint16_t value = RISCV_CPU(obj)->cfg.elen; 1788 1789 visit_type_uint16(v, name, &value, errp); 1790 } 1791 1792 static const PropertyInfo prop_elen = { 1793 .type = "uint16", 1794 .description = "elen", 1795 .get = prop_elen_get, 1796 .set = prop_elen_set, 1797 }; 1798 1799 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name, 1800 void *opaque, Error **errp) 1801 { 1802 RISCVCPU *cpu = RISCV_CPU(obj); 1803 uint16_t value; 1804 1805 if (!visit_type_uint16(v, name, &value, errp)) { 1806 return; 1807 } 1808 1809 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) { 1810 cpu_set_prop_err(cpu, name, errp); 1811 error_append_hint(errp, "Current '%s' val: %u\n", 1812 name, cpu->cfg.cbom_blocksize); 1813 return; 1814 } 1815 1816 cpu_option_add_user_setting(name, value); 1817 cpu->cfg.cbom_blocksize = value; 1818 } 1819 1820 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name, 1821 void *opaque, Error **errp) 1822 { 1823 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize; 1824 1825 visit_type_uint16(v, name, &value, errp); 1826 } 1827 1828 static const PropertyInfo prop_cbom_blksize = { 1829 .type = "uint16", 1830 .description = "cbom_blocksize", 1831 .get = prop_cbom_blksize_get, 1832 .set = prop_cbom_blksize_set, 1833 }; 1834 1835 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name, 1836 void *opaque, Error **errp) 1837 { 1838 RISCVCPU *cpu = RISCV_CPU(obj); 1839 uint16_t value; 1840 1841 if (!visit_type_uint16(v, name, &value, errp)) { 1842 return; 1843 } 1844 1845 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) { 1846 cpu_set_prop_err(cpu, name, errp); 1847 error_append_hint(errp, "Current '%s' val: %u\n", 1848 name, cpu->cfg.cbop_blocksize); 1849 return; 1850 } 1851 1852 cpu_option_add_user_setting(name, value); 1853 cpu->cfg.cbop_blocksize = value; 1854 } 1855 1856 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name, 1857 void *opaque, Error **errp) 1858 { 1859 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize; 1860 1861 visit_type_uint16(v, name, &value, errp); 1862 } 1863 1864 static const PropertyInfo prop_cbop_blksize = { 1865 .type = "uint16", 1866 .description = "cbop_blocksize", 1867 .get = prop_cbop_blksize_get, 1868 .set = prop_cbop_blksize_set, 1869 }; 1870 1871 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name, 1872 void *opaque, Error **errp) 1873 { 1874 RISCVCPU *cpu = RISCV_CPU(obj); 1875 uint16_t value; 1876 1877 if (!visit_type_uint16(v, name, &value, errp)) { 1878 return; 1879 } 1880 1881 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) { 1882 cpu_set_prop_err(cpu, name, errp); 1883 error_append_hint(errp, "Current '%s' val: %u\n", 1884 name, cpu->cfg.cboz_blocksize); 1885 return; 1886 } 1887 1888 cpu_option_add_user_setting(name, value); 1889 cpu->cfg.cboz_blocksize = value; 1890 } 1891 1892 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name, 1893 void *opaque, Error **errp) 1894 { 1895 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize; 1896 1897 visit_type_uint16(v, name, &value, errp); 1898 } 1899 1900 static const PropertyInfo prop_cboz_blksize = { 1901 .type = "uint16", 1902 .description = "cboz_blocksize", 1903 .get = prop_cboz_blksize_get, 1904 .set = prop_cboz_blksize_set, 1905 }; 1906 1907 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name, 1908 void *opaque, Error **errp) 1909 { 1910 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 1911 RISCVCPU *cpu = RISCV_CPU(obj); 1912 uint32_t prev_val = cpu->cfg.mvendorid; 1913 uint32_t value; 1914 1915 if (!visit_type_uint32(v, name, &value, errp)) { 1916 return; 1917 } 1918 1919 if (!dynamic_cpu && prev_val != value) { 1920 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 1921 object_get_typename(obj), prev_val); 1922 return; 1923 } 1924 1925 cpu->cfg.mvendorid = value; 1926 } 1927 1928 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name, 1929 void *opaque, Error **errp) 1930 { 1931 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; 1932 1933 visit_type_uint32(v, name, &value, errp); 1934 } 1935 1936 static const PropertyInfo prop_mvendorid = { 1937 .type = "uint32", 1938 .description = "mvendorid", 1939 .get = prop_mvendorid_get, 1940 .set = prop_mvendorid_set, 1941 }; 1942 1943 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name, 1944 void *opaque, Error **errp) 1945 { 1946 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 1947 RISCVCPU *cpu = RISCV_CPU(obj); 1948 uint64_t prev_val = cpu->cfg.mimpid; 1949 uint64_t value; 1950 1951 if (!visit_type_uint64(v, name, &value, errp)) { 1952 return; 1953 } 1954 1955 if (!dynamic_cpu && prev_val != value) { 1956 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 1957 object_get_typename(obj), prev_val); 1958 return; 1959 } 1960 1961 cpu->cfg.mimpid = value; 1962 } 1963 1964 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name, 1965 void *opaque, Error **errp) 1966 { 1967 uint64_t value = RISCV_CPU(obj)->cfg.mimpid; 1968 1969 visit_type_uint64(v, name, &value, errp); 1970 } 1971 1972 static const PropertyInfo prop_mimpid = { 1973 .type = "uint64", 1974 .description = "mimpid", 1975 .get = prop_mimpid_get, 1976 .set = prop_mimpid_set, 1977 }; 1978 1979 static void prop_marchid_set(Object *obj, Visitor *v, const char *name, 1980 void *opaque, Error **errp) 1981 { 1982 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 1983 RISCVCPU *cpu = RISCV_CPU(obj); 1984 uint64_t prev_val = cpu->cfg.marchid; 1985 uint64_t value, invalid_val; 1986 uint32_t mxlen = 0; 1987 1988 if (!visit_type_uint64(v, name, &value, errp)) { 1989 return; 1990 } 1991 1992 if (!dynamic_cpu && prev_val != value) { 1993 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 1994 object_get_typename(obj), prev_val); 1995 return; 1996 } 1997 1998 switch (riscv_cpu_mxl(&cpu->env)) { 1999 case MXL_RV32: 2000 mxlen = 32; 2001 break; 2002 case MXL_RV64: 2003 case MXL_RV128: 2004 mxlen = 64; 2005 break; 2006 default: 2007 g_assert_not_reached(); 2008 } 2009 2010 invalid_val = 1LL << (mxlen - 1); 2011 2012 if (value == invalid_val) { 2013 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2014 "and the remaining bits zero", mxlen); 2015 return; 2016 } 2017 2018 cpu->cfg.marchid = value; 2019 } 2020 2021 static void prop_marchid_get(Object *obj, Visitor *v, const char *name, 2022 void *opaque, Error **errp) 2023 { 2024 uint64_t value = RISCV_CPU(obj)->cfg.marchid; 2025 2026 visit_type_uint64(v, name, &value, errp); 2027 } 2028 2029 static const PropertyInfo prop_marchid = { 2030 .type = "uint64", 2031 .description = "marchid", 2032 .get = prop_marchid_get, 2033 .set = prop_marchid_set, 2034 }; 2035 2036 /* 2037 * RVA22U64 defines some 'named features' that are cache 2038 * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa 2039 * and Zicclsm. They are always implemented in TCG and 2040 * doesn't need to be manually enabled by the profile. 2041 */ 2042 static RISCVCPUProfile RVA22U64 = { 2043 .u_parent = NULL, 2044 .s_parent = NULL, 2045 .name = "rva22u64", 2046 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVB | RVU, 2047 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2048 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2049 .ext_offsets = { 2050 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause), 2051 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb), 2052 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin), 2053 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr), 2054 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom), 2055 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz), 2056 2057 /* mandatory named features for this profile */ 2058 CPU_CFG_OFFSET(ext_zic64b), 2059 2060 RISCV_PROFILE_EXT_LIST_END 2061 } 2062 }; 2063 2064 /* 2065 * As with RVA22U64, RVA22S64 also defines 'named features'. 2066 * 2067 * Cache related features that we consider enabled since we don't 2068 * implement cache: Ssccptr 2069 * 2070 * Other named features that we already implement: Sstvecd, Sstvala, 2071 * Sscounterenw 2072 * 2073 * The remaining features/extensions comes from RVA22U64. 2074 */ 2075 static RISCVCPUProfile RVA22S64 = { 2076 .u_parent = &RVA22U64, 2077 .s_parent = NULL, 2078 .name = "rva22s64", 2079 .misa_ext = RVS, 2080 .priv_spec = PRIV_VERSION_1_12_0, 2081 .satp_mode = VM_1_10_SV39, 2082 .ext_offsets = { 2083 /* rva22s64 exts */ 2084 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt), 2085 CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade), 2086 2087 RISCV_PROFILE_EXT_LIST_END 2088 } 2089 }; 2090 2091 /* 2092 * All mandatory extensions from RVA22U64 are present 2093 * in RVA23U64 so set RVA22 as a parent. We need to 2094 * declare just the newly added mandatory extensions. 2095 */ 2096 static RISCVCPUProfile RVA23U64 = { 2097 .u_parent = &RVA22U64, 2098 .s_parent = NULL, 2099 .name = "rva23u64", 2100 .misa_ext = RVV, 2101 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2102 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2103 .ext_offsets = { 2104 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zvbb), 2105 CPU_CFG_OFFSET(ext_zvkt), CPU_CFG_OFFSET(ext_zihintntl), 2106 CPU_CFG_OFFSET(ext_zicond), CPU_CFG_OFFSET(ext_zimop), 2107 CPU_CFG_OFFSET(ext_zcmop), CPU_CFG_OFFSET(ext_zcb), 2108 CPU_CFG_OFFSET(ext_zfa), CPU_CFG_OFFSET(ext_zawrs), 2109 CPU_CFG_OFFSET(ext_supm), 2110 2111 RISCV_PROFILE_EXT_LIST_END 2112 } 2113 }; 2114 2115 /* 2116 * As with RVA23U64, RVA23S64 also defines 'named features'. 2117 * 2118 * Cache related features that we consider enabled since we don't 2119 * implement cache: Ssccptr 2120 * 2121 * Other named features that we already implement: Sstvecd, Sstvala, 2122 * Sscounterenw, Ssu64xl 2123 * 2124 * The remaining features/extensions comes from RVA23S64. 2125 */ 2126 static RISCVCPUProfile RVA23S64 = { 2127 .u_parent = &RVA23U64, 2128 .s_parent = &RVA22S64, 2129 .name = "rva23s64", 2130 .misa_ext = RVS, 2131 .priv_spec = PRIV_VERSION_1_13_0, 2132 .satp_mode = VM_1_10_SV39, 2133 .ext_offsets = { 2134 /* New in RVA23S64 */ 2135 CPU_CFG_OFFSET(ext_svnapot), CPU_CFG_OFFSET(ext_sstc), 2136 CPU_CFG_OFFSET(ext_sscofpmf), CPU_CFG_OFFSET(ext_ssnpm), 2137 2138 /* Named features: Sha */ 2139 CPU_CFG_OFFSET(ext_sha), 2140 2141 RISCV_PROFILE_EXT_LIST_END 2142 } 2143 }; 2144 2145 RISCVCPUProfile *riscv_profiles[] = { 2146 &RVA22U64, 2147 &RVA22S64, 2148 &RVA23U64, 2149 &RVA23S64, 2150 NULL, 2151 }; 2152 2153 static RISCVCPUImpliedExtsRule RVA_IMPLIED = { 2154 .is_misa = true, 2155 .ext = RVA, 2156 .implied_multi_exts = { 2157 CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo), 2158 2159 RISCV_IMPLIED_EXTS_RULE_END 2160 }, 2161 }; 2162 2163 static RISCVCPUImpliedExtsRule RVD_IMPLIED = { 2164 .is_misa = true, 2165 .ext = RVD, 2166 .implied_misa_exts = RVF, 2167 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2168 }; 2169 2170 static RISCVCPUImpliedExtsRule RVF_IMPLIED = { 2171 .is_misa = true, 2172 .ext = RVF, 2173 .implied_multi_exts = { 2174 CPU_CFG_OFFSET(ext_zicsr), 2175 2176 RISCV_IMPLIED_EXTS_RULE_END 2177 }, 2178 }; 2179 2180 static RISCVCPUImpliedExtsRule RVM_IMPLIED = { 2181 .is_misa = true, 2182 .ext = RVM, 2183 .implied_multi_exts = { 2184 CPU_CFG_OFFSET(ext_zmmul), 2185 2186 RISCV_IMPLIED_EXTS_RULE_END 2187 }, 2188 }; 2189 2190 static RISCVCPUImpliedExtsRule RVV_IMPLIED = { 2191 .is_misa = true, 2192 .ext = RVV, 2193 .implied_multi_exts = { 2194 CPU_CFG_OFFSET(ext_zve64d), 2195 2196 RISCV_IMPLIED_EXTS_RULE_END 2197 }, 2198 }; 2199 2200 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = { 2201 .ext = CPU_CFG_OFFSET(ext_zcb), 2202 .implied_multi_exts = { 2203 CPU_CFG_OFFSET(ext_zca), 2204 2205 RISCV_IMPLIED_EXTS_RULE_END 2206 }, 2207 }; 2208 2209 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = { 2210 .ext = CPU_CFG_OFFSET(ext_zcd), 2211 .implied_misa_exts = RVD, 2212 .implied_multi_exts = { 2213 CPU_CFG_OFFSET(ext_zca), 2214 2215 RISCV_IMPLIED_EXTS_RULE_END 2216 }, 2217 }; 2218 2219 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = { 2220 .ext = CPU_CFG_OFFSET(ext_zce), 2221 .implied_multi_exts = { 2222 CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp), 2223 CPU_CFG_OFFSET(ext_zcmt), 2224 2225 RISCV_IMPLIED_EXTS_RULE_END 2226 }, 2227 }; 2228 2229 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = { 2230 .ext = CPU_CFG_OFFSET(ext_zcf), 2231 .implied_misa_exts = RVF, 2232 .implied_multi_exts = { 2233 CPU_CFG_OFFSET(ext_zca), 2234 2235 RISCV_IMPLIED_EXTS_RULE_END 2236 }, 2237 }; 2238 2239 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = { 2240 .ext = CPU_CFG_OFFSET(ext_zcmp), 2241 .implied_multi_exts = { 2242 CPU_CFG_OFFSET(ext_zca), 2243 2244 RISCV_IMPLIED_EXTS_RULE_END 2245 }, 2246 }; 2247 2248 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = { 2249 .ext = CPU_CFG_OFFSET(ext_zcmt), 2250 .implied_multi_exts = { 2251 CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr), 2252 2253 RISCV_IMPLIED_EXTS_RULE_END 2254 }, 2255 }; 2256 2257 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = { 2258 .ext = CPU_CFG_OFFSET(ext_zdinx), 2259 .implied_multi_exts = { 2260 CPU_CFG_OFFSET(ext_zfinx), 2261 2262 RISCV_IMPLIED_EXTS_RULE_END 2263 }, 2264 }; 2265 2266 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = { 2267 .ext = CPU_CFG_OFFSET(ext_zfa), 2268 .implied_misa_exts = RVF, 2269 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2270 }; 2271 2272 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = { 2273 .ext = CPU_CFG_OFFSET(ext_zfbfmin), 2274 .implied_misa_exts = RVF, 2275 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2276 }; 2277 2278 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = { 2279 .ext = CPU_CFG_OFFSET(ext_zfh), 2280 .implied_multi_exts = { 2281 CPU_CFG_OFFSET(ext_zfhmin), 2282 2283 RISCV_IMPLIED_EXTS_RULE_END 2284 }, 2285 }; 2286 2287 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = { 2288 .ext = CPU_CFG_OFFSET(ext_zfhmin), 2289 .implied_misa_exts = RVF, 2290 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2291 }; 2292 2293 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = { 2294 .ext = CPU_CFG_OFFSET(ext_zfinx), 2295 .implied_multi_exts = { 2296 CPU_CFG_OFFSET(ext_zicsr), 2297 2298 RISCV_IMPLIED_EXTS_RULE_END 2299 }, 2300 }; 2301 2302 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = { 2303 .ext = CPU_CFG_OFFSET(ext_zhinx), 2304 .implied_multi_exts = { 2305 CPU_CFG_OFFSET(ext_zhinxmin), 2306 2307 RISCV_IMPLIED_EXTS_RULE_END 2308 }, 2309 }; 2310 2311 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = { 2312 .ext = CPU_CFG_OFFSET(ext_zhinxmin), 2313 .implied_multi_exts = { 2314 CPU_CFG_OFFSET(ext_zfinx), 2315 2316 RISCV_IMPLIED_EXTS_RULE_END 2317 }, 2318 }; 2319 2320 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = { 2321 .ext = CPU_CFG_OFFSET(ext_zicntr), 2322 .implied_multi_exts = { 2323 CPU_CFG_OFFSET(ext_zicsr), 2324 2325 RISCV_IMPLIED_EXTS_RULE_END 2326 }, 2327 }; 2328 2329 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = { 2330 .ext = CPU_CFG_OFFSET(ext_zihpm), 2331 .implied_multi_exts = { 2332 CPU_CFG_OFFSET(ext_zicsr), 2333 2334 RISCV_IMPLIED_EXTS_RULE_END 2335 }, 2336 }; 2337 2338 static RISCVCPUImpliedExtsRule ZK_IMPLIED = { 2339 .ext = CPU_CFG_OFFSET(ext_zk), 2340 .implied_multi_exts = { 2341 CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr), 2342 CPU_CFG_OFFSET(ext_zkt), 2343 2344 RISCV_IMPLIED_EXTS_RULE_END 2345 }, 2346 }; 2347 2348 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = { 2349 .ext = CPU_CFG_OFFSET(ext_zkn), 2350 .implied_multi_exts = { 2351 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2352 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne), 2353 CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh), 2354 2355 RISCV_IMPLIED_EXTS_RULE_END 2356 }, 2357 }; 2358 2359 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = { 2360 .ext = CPU_CFG_OFFSET(ext_zks), 2361 .implied_multi_exts = { 2362 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2363 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed), 2364 CPU_CFG_OFFSET(ext_zksh), 2365 2366 RISCV_IMPLIED_EXTS_RULE_END 2367 }, 2368 }; 2369 2370 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = { 2371 .ext = CPU_CFG_OFFSET(ext_zvbb), 2372 .implied_multi_exts = { 2373 CPU_CFG_OFFSET(ext_zvkb), 2374 2375 RISCV_IMPLIED_EXTS_RULE_END 2376 }, 2377 }; 2378 2379 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = { 2380 .ext = CPU_CFG_OFFSET(ext_zve32f), 2381 .implied_misa_exts = RVF, 2382 .implied_multi_exts = { 2383 CPU_CFG_OFFSET(ext_zve32x), 2384 2385 RISCV_IMPLIED_EXTS_RULE_END 2386 }, 2387 }; 2388 2389 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = { 2390 .ext = CPU_CFG_OFFSET(ext_zve32x), 2391 .implied_multi_exts = { 2392 CPU_CFG_OFFSET(ext_zicsr), 2393 2394 RISCV_IMPLIED_EXTS_RULE_END 2395 }, 2396 }; 2397 2398 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = { 2399 .ext = CPU_CFG_OFFSET(ext_zve64d), 2400 .implied_misa_exts = RVD, 2401 .implied_multi_exts = { 2402 CPU_CFG_OFFSET(ext_zve64f), 2403 2404 RISCV_IMPLIED_EXTS_RULE_END 2405 }, 2406 }; 2407 2408 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = { 2409 .ext = CPU_CFG_OFFSET(ext_zve64f), 2410 .implied_misa_exts = RVF, 2411 .implied_multi_exts = { 2412 CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x), 2413 2414 RISCV_IMPLIED_EXTS_RULE_END 2415 }, 2416 }; 2417 2418 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = { 2419 .ext = CPU_CFG_OFFSET(ext_zve64x), 2420 .implied_multi_exts = { 2421 CPU_CFG_OFFSET(ext_zve32x), 2422 2423 RISCV_IMPLIED_EXTS_RULE_END 2424 }, 2425 }; 2426 2427 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = { 2428 .ext = CPU_CFG_OFFSET(ext_zvfbfmin), 2429 .implied_multi_exts = { 2430 CPU_CFG_OFFSET(ext_zve32f), 2431 2432 RISCV_IMPLIED_EXTS_RULE_END 2433 }, 2434 }; 2435 2436 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = { 2437 .ext = CPU_CFG_OFFSET(ext_zvfbfwma), 2438 .implied_multi_exts = { 2439 CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin), 2440 2441 RISCV_IMPLIED_EXTS_RULE_END 2442 }, 2443 }; 2444 2445 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = { 2446 .ext = CPU_CFG_OFFSET(ext_zvfh), 2447 .implied_multi_exts = { 2448 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin), 2449 2450 RISCV_IMPLIED_EXTS_RULE_END 2451 }, 2452 }; 2453 2454 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = { 2455 .ext = CPU_CFG_OFFSET(ext_zvfhmin), 2456 .implied_multi_exts = { 2457 CPU_CFG_OFFSET(ext_zve32f), 2458 2459 RISCV_IMPLIED_EXTS_RULE_END 2460 }, 2461 }; 2462 2463 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = { 2464 .ext = CPU_CFG_OFFSET(ext_zvkn), 2465 .implied_multi_exts = { 2466 CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb), 2467 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2468 2469 RISCV_IMPLIED_EXTS_RULE_END 2470 }, 2471 }; 2472 2473 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = { 2474 .ext = CPU_CFG_OFFSET(ext_zvknc), 2475 .implied_multi_exts = { 2476 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc), 2477 2478 RISCV_IMPLIED_EXTS_RULE_END 2479 }, 2480 }; 2481 2482 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = { 2483 .ext = CPU_CFG_OFFSET(ext_zvkng), 2484 .implied_multi_exts = { 2485 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg), 2486 2487 RISCV_IMPLIED_EXTS_RULE_END 2488 }, 2489 }; 2490 2491 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = { 2492 .ext = CPU_CFG_OFFSET(ext_zvknhb), 2493 .implied_multi_exts = { 2494 CPU_CFG_OFFSET(ext_zve64x), 2495 2496 RISCV_IMPLIED_EXTS_RULE_END 2497 }, 2498 }; 2499 2500 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = { 2501 .ext = CPU_CFG_OFFSET(ext_zvks), 2502 .implied_multi_exts = { 2503 CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh), 2504 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2505 2506 RISCV_IMPLIED_EXTS_RULE_END 2507 }, 2508 }; 2509 2510 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = { 2511 .ext = CPU_CFG_OFFSET(ext_zvksc), 2512 .implied_multi_exts = { 2513 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc), 2514 2515 RISCV_IMPLIED_EXTS_RULE_END 2516 }, 2517 }; 2518 2519 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = { 2520 .ext = CPU_CFG_OFFSET(ext_zvksg), 2521 .implied_multi_exts = { 2522 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg), 2523 2524 RISCV_IMPLIED_EXTS_RULE_END 2525 }, 2526 }; 2527 2528 static RISCVCPUImpliedExtsRule SSCFG_IMPLIED = { 2529 .ext = CPU_CFG_OFFSET(ext_ssccfg), 2530 .implied_multi_exts = { 2531 CPU_CFG_OFFSET(ext_smcsrind), CPU_CFG_OFFSET(ext_sscsrind), 2532 CPU_CFG_OFFSET(ext_smcdeleg), 2533 2534 RISCV_IMPLIED_EXTS_RULE_END 2535 }, 2536 }; 2537 2538 static RISCVCPUImpliedExtsRule SUPM_IMPLIED = { 2539 .ext = CPU_CFG_OFFSET(ext_supm), 2540 .implied_multi_exts = { 2541 CPU_CFG_OFFSET(ext_ssnpm), CPU_CFG_OFFSET(ext_smnpm), 2542 2543 RISCV_IMPLIED_EXTS_RULE_END 2544 }, 2545 }; 2546 2547 static RISCVCPUImpliedExtsRule SSPM_IMPLIED = { 2548 .ext = CPU_CFG_OFFSET(ext_sspm), 2549 .implied_multi_exts = { 2550 CPU_CFG_OFFSET(ext_smnpm), 2551 2552 RISCV_IMPLIED_EXTS_RULE_END 2553 }, 2554 }; 2555 2556 static RISCVCPUImpliedExtsRule SMCTR_IMPLIED = { 2557 .ext = CPU_CFG_OFFSET(ext_smctr), 2558 .implied_misa_exts = RVS, 2559 .implied_multi_exts = { 2560 CPU_CFG_OFFSET(ext_sscsrind), 2561 2562 RISCV_IMPLIED_EXTS_RULE_END 2563 }, 2564 }; 2565 2566 static RISCVCPUImpliedExtsRule SSCTR_IMPLIED = { 2567 .ext = CPU_CFG_OFFSET(ext_ssctr), 2568 .implied_misa_exts = RVS, 2569 .implied_multi_exts = { 2570 CPU_CFG_OFFSET(ext_sscsrind), 2571 2572 RISCV_IMPLIED_EXTS_RULE_END 2573 }, 2574 }; 2575 2576 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = { 2577 &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED, 2578 &RVM_IMPLIED, &RVV_IMPLIED, NULL 2579 }; 2580 2581 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = { 2582 &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED, 2583 &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED, 2584 &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED, 2585 &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED, 2586 &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED, 2587 &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED, 2588 &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED, 2589 &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED, 2590 &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED, 2591 &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED, 2592 &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED, 2593 &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, &SSCFG_IMPLIED, 2594 &SUPM_IMPLIED, &SSPM_IMPLIED, &SMCTR_IMPLIED, &SSCTR_IMPLIED, 2595 NULL 2596 }; 2597 2598 static const Property riscv_cpu_properties[] = { 2599 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 2600 2601 {.name = "pmu-mask", .info = &prop_pmu_mask}, 2602 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 2603 2604 {.name = "mmu", .info = &prop_mmu}, 2605 {.name = "pmp", .info = &prop_pmp}, 2606 {.name = "num-pmp-regions", .info = &prop_num_pmp_regions}, 2607 2608 {.name = "priv_spec", .info = &prop_priv_spec}, 2609 {.name = "vext_spec", .info = &prop_vext_spec}, 2610 2611 {.name = "vlen", .info = &prop_vlen}, 2612 {.name = "elen", .info = &prop_elen}, 2613 2614 {.name = "cbom_blocksize", .info = &prop_cbom_blksize}, 2615 {.name = "cbop_blocksize", .info = &prop_cbop_blksize}, 2616 {.name = "cboz_blocksize", .info = &prop_cboz_blksize}, 2617 2618 {.name = "mvendorid", .info = &prop_mvendorid}, 2619 {.name = "mimpid", .info = &prop_mimpid}, 2620 {.name = "marchid", .info = &prop_marchid}, 2621 2622 #ifndef CONFIG_USER_ONLY 2623 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 2624 DEFINE_PROP_UINT64("rnmi-interrupt-vector", RISCVCPU, env.rnmi_irqvec, 2625 DEFAULT_RNMI_IRQVEC), 2626 DEFINE_PROP_UINT64("rnmi-exception-vector", RISCVCPU, env.rnmi_excpvec, 2627 DEFAULT_RNMI_EXCPVEC), 2628 #endif 2629 2630 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 2631 2632 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 2633 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 2634 DEFINE_PROP_BOOL("rvv_vl_half_avl", RISCVCPU, cfg.rvv_vl_half_avl, false), 2635 2636 /* 2637 * write_misa() is marked as experimental for now so mark 2638 * it with -x and default to 'false'. 2639 */ 2640 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 2641 }; 2642 2643 static const gchar *riscv_gdb_arch_name(CPUState *cs) 2644 { 2645 RISCVCPU *cpu = RISCV_CPU(cs); 2646 CPURISCVState *env = &cpu->env; 2647 2648 switch (riscv_cpu_mxl(env)) { 2649 case MXL_RV32: 2650 return "riscv:rv32"; 2651 case MXL_RV64: 2652 case MXL_RV128: 2653 return "riscv:rv64"; 2654 default: 2655 g_assert_not_reached(); 2656 } 2657 } 2658 2659 #ifndef CONFIG_USER_ONLY 2660 static int64_t riscv_get_arch_id(CPUState *cs) 2661 { 2662 RISCVCPU *cpu = RISCV_CPU(cs); 2663 2664 return cpu->env.mhartid; 2665 } 2666 2667 #include "hw/core/sysemu-cpu-ops.h" 2668 2669 static const struct SysemuCPUOps riscv_sysemu_ops = { 2670 .has_work = riscv_cpu_has_work, 2671 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 2672 .write_elf64_note = riscv_cpu_write_elf64_note, 2673 .write_elf32_note = riscv_cpu_write_elf32_note, 2674 .legacy_vmsd = &vmstate_riscv_cpu, 2675 }; 2676 #endif 2677 2678 static void riscv_cpu_common_class_init(ObjectClass *c, const void *data) 2679 { 2680 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2681 CPUClass *cc = CPU_CLASS(c); 2682 DeviceClass *dc = DEVICE_CLASS(c); 2683 ResettableClass *rc = RESETTABLE_CLASS(c); 2684 2685 device_class_set_parent_realize(dc, riscv_cpu_realize, 2686 &mcc->parent_realize); 2687 2688 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 2689 &mcc->parent_phases); 2690 2691 cc->class_by_name = riscv_cpu_class_by_name; 2692 cc->dump_state = riscv_cpu_dump_state; 2693 cc->set_pc = riscv_cpu_set_pc; 2694 cc->get_pc = riscv_cpu_get_pc; 2695 cc->gdb_read_register = riscv_cpu_gdb_read_register; 2696 cc->gdb_write_register = riscv_cpu_gdb_write_register; 2697 cc->gdb_stop_before_watchpoint = true; 2698 cc->disas_set_info = riscv_cpu_disas_set_info; 2699 #ifndef CONFIG_USER_ONLY 2700 cc->sysemu_ops = &riscv_sysemu_ops; 2701 cc->get_arch_id = riscv_get_arch_id; 2702 #endif 2703 cc->gdb_arch_name = riscv_gdb_arch_name; 2704 #ifdef CONFIG_TCG 2705 cc->tcg_ops = &riscv_tcg_ops; 2706 #endif /* CONFIG_TCG */ 2707 2708 device_class_set_props(dc, riscv_cpu_properties); 2709 } 2710 2711 static bool profile_extends(RISCVCPUProfile *trial, RISCVCPUProfile *parent) 2712 { 2713 RISCVCPUProfile *curr; 2714 if (!parent) { 2715 return true; 2716 } 2717 2718 curr = trial; 2719 while (curr) { 2720 if (curr == parent) { 2721 return true; 2722 } 2723 curr = curr->u_parent; 2724 } 2725 2726 curr = trial; 2727 while (curr) { 2728 if (curr == parent) { 2729 return true; 2730 } 2731 curr = curr->s_parent; 2732 } 2733 2734 return false; 2735 } 2736 2737 static void riscv_cpu_class_base_init(ObjectClass *c, const void *data) 2738 { 2739 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2740 RISCVCPUClass *pcc = RISCV_CPU_CLASS(object_class_get_parent(c)); 2741 2742 if (pcc->def) { 2743 mcc->def = g_memdup2(pcc->def, sizeof(*pcc->def)); 2744 } else { 2745 mcc->def = g_new0(RISCVCPUDef, 1); 2746 } 2747 2748 if (data) { 2749 const RISCVCPUDef *def = data; 2750 mcc->def->bare |= def->bare; 2751 if (def->profile) { 2752 assert(profile_extends(def->profile, mcc->def->profile)); 2753 assert(mcc->def->bare); 2754 mcc->def->profile = def->profile; 2755 } 2756 if (def->misa_mxl_max) { 2757 assert(def->misa_mxl_max <= MXL_RV128); 2758 mcc->def->misa_mxl_max = def->misa_mxl_max; 2759 2760 #ifndef CONFIG_USER_ONLY 2761 /* 2762 * Hack to simplify CPU class hierarchies that include both 32- and 2763 * 64-bit models: reduce SV39/48/57/64 to SV32 for 32-bit models. 2764 */ 2765 if (mcc->def->misa_mxl_max == MXL_RV32 && 2766 !valid_vm_1_10_32[mcc->def->cfg.max_satp_mode]) { 2767 mcc->def->cfg.max_satp_mode = VM_1_10_SV32; 2768 } 2769 #endif 2770 } 2771 if (def->priv_spec != RISCV_PROFILE_ATTR_UNUSED) { 2772 assert(def->priv_spec <= PRIV_VERSION_LATEST); 2773 mcc->def->priv_spec = def->priv_spec; 2774 } 2775 if (def->vext_spec != RISCV_PROFILE_ATTR_UNUSED) { 2776 assert(def->vext_spec != 0); 2777 mcc->def->vext_spec = def->vext_spec; 2778 } 2779 mcc->def->misa_ext |= def->misa_ext; 2780 2781 riscv_cpu_cfg_merge(&mcc->def->cfg, &def->cfg); 2782 2783 if (def->custom_csrs) { 2784 assert(!mcc->def->custom_csrs); 2785 mcc->def->custom_csrs = def->custom_csrs; 2786 } 2787 } 2788 2789 if (!object_class_is_abstract(c)) { 2790 riscv_cpu_validate_misa_mxl(mcc); 2791 } 2792 } 2793 2794 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 2795 int max_str_len) 2796 { 2797 const RISCVIsaExtData *edata; 2798 char *old = *isa_str; 2799 char *new = *isa_str; 2800 2801 for (edata = isa_edata_arr; edata && edata->name; edata++) { 2802 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 2803 new = g_strconcat(old, "_", edata->name, NULL); 2804 g_free(old); 2805 old = new; 2806 } 2807 } 2808 2809 *isa_str = new; 2810 } 2811 2812 char *riscv_isa_string(RISCVCPU *cpu) 2813 { 2814 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 2815 int i; 2816 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 2817 char *isa_str = g_new(char, maxlen); 2818 int xlen = riscv_cpu_max_xlen(mcc); 2819 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen); 2820 2821 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2822 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2823 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 2824 } 2825 } 2826 *p = '\0'; 2827 if (!cpu->cfg.short_isa_string) { 2828 riscv_isa_string_ext(cpu, &isa_str, maxlen); 2829 } 2830 return isa_str; 2831 } 2832 2833 #ifndef CONFIG_USER_ONLY 2834 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count) 2835 { 2836 int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr); 2837 char **extensions = g_new(char *, maxlen); 2838 2839 for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2840 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2841 extensions[*count] = g_new(char, 2); 2842 snprintf(extensions[*count], 2, "%c", 2843 qemu_tolower(riscv_single_letter_exts[i])); 2844 (*count)++; 2845 } 2846 } 2847 2848 for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) { 2849 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 2850 extensions[*count] = g_strdup(edata->name); 2851 (*count)++; 2852 } 2853 } 2854 2855 return extensions; 2856 } 2857 2858 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename) 2859 { 2860 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 2861 const size_t maxlen = sizeof("rv128i"); 2862 g_autofree char *isa_base = g_new(char, maxlen); 2863 g_autofree char *riscv_isa; 2864 char **isa_extensions; 2865 int count = 0; 2866 int xlen = riscv_cpu_max_xlen(mcc); 2867 2868 riscv_isa = riscv_isa_string(cpu); 2869 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa); 2870 2871 snprintf(isa_base, maxlen, "rv%di", xlen); 2872 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base); 2873 2874 isa_extensions = riscv_isa_extensions_list(cpu, &count); 2875 qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions", 2876 isa_extensions, count); 2877 2878 for (int i = 0; i < count; i++) { 2879 g_free(isa_extensions[i]); 2880 } 2881 2882 g_free(isa_extensions); 2883 } 2884 #endif 2885 2886 #define DEFINE_ABSTRACT_RISCV_CPU(type_name, parent_type_name, ...) \ 2887 { \ 2888 .name = (type_name), \ 2889 .parent = (parent_type_name), \ 2890 .abstract = true, \ 2891 .class_data = &(const RISCVCPUDef) { \ 2892 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, \ 2893 .vext_spec = RISCV_PROFILE_ATTR_UNUSED, \ 2894 .cfg.max_satp_mode = -1, \ 2895 __VA_ARGS__ \ 2896 }, \ 2897 } 2898 2899 #define DEFINE_RISCV_CPU(type_name, parent_type_name, ...) \ 2900 { \ 2901 .name = (type_name), \ 2902 .parent = (parent_type_name), \ 2903 .class_data = &(const RISCVCPUDef) { \ 2904 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, \ 2905 .vext_spec = RISCV_PROFILE_ATTR_UNUSED, \ 2906 .cfg.max_satp_mode = -1, \ 2907 __VA_ARGS__ \ 2908 }, \ 2909 } 2910 2911 #define DEFINE_PROFILE_CPU(type_name, parent_type_name, profile_) \ 2912 DEFINE_RISCV_CPU(type_name, parent_type_name, \ 2913 .profile = &(profile_)) 2914 2915 static const TypeInfo riscv_cpu_type_infos[] = { 2916 { 2917 .name = TYPE_RISCV_CPU, 2918 .parent = TYPE_CPU, 2919 .instance_size = sizeof(RISCVCPU), 2920 .instance_align = __alignof(RISCVCPU), 2921 .instance_init = riscv_cpu_init, 2922 .abstract = true, 2923 .class_size = sizeof(RISCVCPUClass), 2924 .class_init = riscv_cpu_common_class_init, 2925 .class_base_init = riscv_cpu_class_base_init, 2926 }, 2927 2928 DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_DYNAMIC_CPU, TYPE_RISCV_CPU, 2929 .cfg.mmu = true, 2930 .cfg.pmp = true, 2931 .priv_spec = PRIV_VERSION_LATEST, 2932 ), 2933 2934 DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_VENDOR_CPU, TYPE_RISCV_CPU), 2935 DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_BARE_CPU, TYPE_RISCV_CPU, 2936 /* 2937 * Bare CPUs do not inherit the timer and performance 2938 * counters from the parent class (see riscv_cpu_init() 2939 * for info on why the parent enables them). 2940 * 2941 * Users have to explicitly enable these counters for 2942 * bare CPUs. 2943 */ 2944 .bare = true, 2945 2946 /* Set to QEMU's first supported priv version */ 2947 .priv_spec = PRIV_VERSION_1_10_0, 2948 2949 /* 2950 * Support all available satp_mode settings. By default 2951 * only MBARE will be available if the user doesn't enable 2952 * a mode manually (see riscv_cpu_satp_mode_finalize()). 2953 */ 2954 #ifdef TARGET_RISCV32 2955 .cfg.max_satp_mode = VM_1_10_SV32, 2956 #else 2957 .cfg.max_satp_mode = VM_1_10_SV57, 2958 #endif 2959 ), 2960 2961 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_MAX, TYPE_RISCV_DYNAMIC_CPU, 2962 #if defined(TARGET_RISCV32) 2963 .misa_mxl_max = MXL_RV32, 2964 .cfg.max_satp_mode = VM_1_10_SV32, 2965 #elif defined(TARGET_RISCV64) 2966 .misa_mxl_max = MXL_RV64, 2967 .cfg.max_satp_mode = VM_1_10_SV57, 2968 #endif 2969 ), 2970 2971 DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E, TYPE_RISCV_VENDOR_CPU, 2972 .misa_ext = RVI | RVM | RVA | RVC | RVU, 2973 .priv_spec = PRIV_VERSION_1_10_0, 2974 .cfg.max_satp_mode = VM_1_10_MBARE, 2975 .cfg.ext_zifencei = true, 2976 .cfg.ext_zicsr = true, 2977 .cfg.pmp = true, 2978 .cfg.pmp_regions = 8 2979 ), 2980 2981 DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U, TYPE_RISCV_VENDOR_CPU, 2982 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU, 2983 .priv_spec = PRIV_VERSION_1_10_0, 2984 2985 .cfg.max_satp_mode = VM_1_10_SV39, 2986 .cfg.ext_zifencei = true, 2987 .cfg.ext_zicsr = true, 2988 .cfg.mmu = true, 2989 .cfg.pmp = true, 2990 .cfg.pmp_regions = 8 2991 ), 2992 2993 #if defined(TARGET_RISCV32) || \ 2994 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 2995 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_BASE32, TYPE_RISCV_DYNAMIC_CPU, 2996 .cfg.max_satp_mode = VM_1_10_SV32, 2997 .misa_mxl_max = MXL_RV32, 2998 ), 2999 3000 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_IBEX, TYPE_RISCV_VENDOR_CPU, 3001 .misa_mxl_max = MXL_RV32, 3002 .misa_ext = RVI | RVM | RVC | RVU, 3003 .priv_spec = PRIV_VERSION_1_12_0, 3004 .cfg.max_satp_mode = VM_1_10_MBARE, 3005 .cfg.ext_zifencei = true, 3006 .cfg.ext_zicsr = true, 3007 .cfg.pmp = true, 3008 .cfg.ext_smepmp = true, 3009 3010 .cfg.ext_zba = true, 3011 .cfg.ext_zbb = true, 3012 .cfg.ext_zbc = true, 3013 .cfg.ext_zbs = true 3014 ), 3015 3016 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E31, TYPE_RISCV_CPU_SIFIVE_E, 3017 .misa_mxl_max = MXL_RV32 3018 ), 3019 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E34, TYPE_RISCV_CPU_SIFIVE_E, 3020 .misa_mxl_max = MXL_RV32, 3021 .misa_ext = RVF, /* IMAFCU */ 3022 ), 3023 3024 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U34, TYPE_RISCV_CPU_SIFIVE_U, 3025 .misa_mxl_max = MXL_RV32, 3026 ), 3027 3028 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV32I, TYPE_RISCV_BARE_CPU, 3029 .misa_mxl_max = MXL_RV32, 3030 .misa_ext = RVI 3031 ), 3032 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV32E, TYPE_RISCV_BARE_CPU, 3033 .misa_mxl_max = MXL_RV32, 3034 .misa_ext = RVE 3035 ), 3036 #endif 3037 3038 #if (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 3039 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_MAX32, TYPE_RISCV_DYNAMIC_CPU, 3040 .cfg.max_satp_mode = VM_1_10_SV32, 3041 .misa_mxl_max = MXL_RV32, 3042 ), 3043 #endif 3044 3045 #if defined(TARGET_RISCV64) 3046 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_BASE64, TYPE_RISCV_DYNAMIC_CPU, 3047 .cfg.max_satp_mode = VM_1_10_SV57, 3048 .misa_mxl_max = MXL_RV64, 3049 ), 3050 3051 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E51, TYPE_RISCV_CPU_SIFIVE_E, 3052 .misa_mxl_max = MXL_RV64 3053 ), 3054 3055 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U54, TYPE_RISCV_CPU_SIFIVE_U, 3056 .misa_mxl_max = MXL_RV64, 3057 ), 3058 3059 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SHAKTI_C, TYPE_RISCV_CPU_SIFIVE_U, 3060 .misa_mxl_max = MXL_RV64, 3061 ), 3062 3063 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_THEAD_C906, TYPE_RISCV_VENDOR_CPU, 3064 .misa_mxl_max = MXL_RV64, 3065 .misa_ext = RVG | RVC | RVS | RVU, 3066 .priv_spec = PRIV_VERSION_1_11_0, 3067 3068 .cfg.ext_zfa = true, 3069 .cfg.ext_zfh = true, 3070 .cfg.mmu = true, 3071 .cfg.ext_xtheadba = true, 3072 .cfg.ext_xtheadbb = true, 3073 .cfg.ext_xtheadbs = true, 3074 .cfg.ext_xtheadcmo = true, 3075 .cfg.ext_xtheadcondmov = true, 3076 .cfg.ext_xtheadfmemidx = true, 3077 .cfg.ext_xtheadmac = true, 3078 .cfg.ext_xtheadmemidx = true, 3079 .cfg.ext_xtheadmempair = true, 3080 .cfg.ext_xtheadsync = true, 3081 .cfg.pmp = true, 3082 3083 .cfg.mvendorid = THEAD_VENDOR_ID, 3084 3085 .cfg.max_satp_mode = VM_1_10_SV39, 3086 #ifndef CONFIG_USER_ONLY 3087 .custom_csrs = th_csr_list, 3088 #endif 3089 ), 3090 3091 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_TT_ASCALON, TYPE_RISCV_VENDOR_CPU, 3092 .misa_mxl_max = MXL_RV64, 3093 .misa_ext = RVG | RVC | RVS | RVU | RVH | RVV, 3094 .priv_spec = PRIV_VERSION_1_13_0, 3095 .vext_spec = VEXT_VERSION_1_00_0, 3096 3097 /* ISA extensions */ 3098 .cfg.mmu = true, 3099 .cfg.vlenb = 256 >> 3, 3100 .cfg.elen = 64, 3101 .cfg.rvv_ma_all_1s = true, 3102 .cfg.rvv_ta_all_1s = true, 3103 .cfg.misa_w = true, 3104 .cfg.pmp = true, 3105 .cfg.cbom_blocksize = 64, 3106 .cfg.cbop_blocksize = 64, 3107 .cfg.cboz_blocksize = 64, 3108 .cfg.ext_zic64b = true, 3109 .cfg.ext_zicbom = true, 3110 .cfg.ext_zicbop = true, 3111 .cfg.ext_zicboz = true, 3112 .cfg.ext_zicntr = true, 3113 .cfg.ext_zicond = true, 3114 .cfg.ext_zicsr = true, 3115 .cfg.ext_zifencei = true, 3116 .cfg.ext_zihintntl = true, 3117 .cfg.ext_zihintpause = true, 3118 .cfg.ext_zihpm = true, 3119 .cfg.ext_zimop = true, 3120 .cfg.ext_zawrs = true, 3121 .cfg.ext_zfa = true, 3122 .cfg.ext_zfbfmin = true, 3123 .cfg.ext_zfh = true, 3124 .cfg.ext_zfhmin = true, 3125 .cfg.ext_zcb = true, 3126 .cfg.ext_zcmop = true, 3127 .cfg.ext_zba = true, 3128 .cfg.ext_zbb = true, 3129 .cfg.ext_zbs = true, 3130 .cfg.ext_zkt = true, 3131 .cfg.ext_zvbb = true, 3132 .cfg.ext_zvbc = true, 3133 .cfg.ext_zvfbfmin = true, 3134 .cfg.ext_zvfbfwma = true, 3135 .cfg.ext_zvfh = true, 3136 .cfg.ext_zvfhmin = true, 3137 .cfg.ext_zvkng = true, 3138 .cfg.ext_smaia = true, 3139 .cfg.ext_smstateen = true, 3140 .cfg.ext_ssaia = true, 3141 .cfg.ext_sscofpmf = true, 3142 .cfg.ext_sstc = true, 3143 .cfg.ext_svade = true, 3144 .cfg.ext_svinval = true, 3145 .cfg.ext_svnapot = true, 3146 .cfg.ext_svpbmt = true, 3147 3148 .cfg.max_satp_mode = VM_1_10_SV57, 3149 ), 3150 3151 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_VEYRON_V1, TYPE_RISCV_VENDOR_CPU, 3152 .misa_mxl_max = MXL_RV64, 3153 .misa_ext = RVG | RVC | RVS | RVU | RVH, 3154 .priv_spec = PRIV_VERSION_1_12_0, 3155 3156 /* ISA extensions */ 3157 .cfg.mmu = true, 3158 .cfg.ext_zifencei = true, 3159 .cfg.ext_zicsr = true, 3160 .cfg.pmp = true, 3161 .cfg.ext_zicbom = true, 3162 .cfg.cbom_blocksize = 64, 3163 .cfg.cboz_blocksize = 64, 3164 .cfg.ext_zicboz = true, 3165 .cfg.ext_smaia = true, 3166 .cfg.ext_ssaia = true, 3167 .cfg.ext_sscofpmf = true, 3168 .cfg.ext_sstc = true, 3169 .cfg.ext_svinval = true, 3170 .cfg.ext_svnapot = true, 3171 .cfg.ext_svpbmt = true, 3172 .cfg.ext_smstateen = true, 3173 .cfg.ext_zba = true, 3174 .cfg.ext_zbb = true, 3175 .cfg.ext_zbc = true, 3176 .cfg.ext_zbs = true, 3177 .cfg.ext_XVentanaCondOps = true, 3178 3179 .cfg.mvendorid = VEYRON_V1_MVENDORID, 3180 .cfg.marchid = VEYRON_V1_MARCHID, 3181 .cfg.mimpid = VEYRON_V1_MIMPID, 3182 3183 .cfg.max_satp_mode = VM_1_10_SV48, 3184 ), 3185 3186 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_XIANGSHAN_NANHU, TYPE_RISCV_VENDOR_CPU, 3187 .misa_mxl_max = MXL_RV64, 3188 .misa_ext = RVG | RVC | RVB | RVS | RVU, 3189 .priv_spec = PRIV_VERSION_1_12_0, 3190 3191 /* ISA extensions */ 3192 .cfg.ext_zbc = true, 3193 .cfg.ext_zbkb = true, 3194 .cfg.ext_zbkc = true, 3195 .cfg.ext_zbkx = true, 3196 .cfg.ext_zknd = true, 3197 .cfg.ext_zkne = true, 3198 .cfg.ext_zknh = true, 3199 .cfg.ext_zksed = true, 3200 .cfg.ext_zksh = true, 3201 .cfg.ext_svinval = true, 3202 3203 .cfg.mmu = true, 3204 .cfg.pmp = true, 3205 3206 .cfg.max_satp_mode = VM_1_10_SV39, 3207 ), 3208 3209 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_XIANGSHAN_KMH, TYPE_RISCV_VENDOR_CPU, 3210 .misa_mxl_max = MXL_RV64, 3211 .misa_ext = RVG | RVC | RVB | RVS | RVU | RVH | RVV, 3212 .priv_spec = PRIV_VERSION_1_13_0, 3213 /* 3214 * The RISC-V Instruction Set Manual: Volume I 3215 * Unprivileged Architecture 3216 */ 3217 .cfg.ext_zicntr = true, 3218 .cfg.ext_zihpm = true, 3219 .cfg.ext_zihintntl = true, 3220 .cfg.ext_zihintpause = true, 3221 .cfg.ext_zimop = true, 3222 .cfg.ext_zcmop = true, 3223 .cfg.ext_zicond = true, 3224 .cfg.ext_zawrs = true, 3225 .cfg.ext_zacas = true, 3226 .cfg.ext_zfh = true, 3227 .cfg.ext_zfa = true, 3228 .cfg.ext_zcb = true, 3229 .cfg.ext_zbc = true, 3230 .cfg.ext_zvfh = true, 3231 .cfg.ext_zkn = true, 3232 .cfg.ext_zks = true, 3233 .cfg.ext_zkt = true, 3234 .cfg.ext_zvbb = true, 3235 .cfg.ext_zvkt = true, 3236 /* 3237 * The RISC-V Instruction Set Manual: Volume II 3238 * Privileged Architecture 3239 */ 3240 .cfg.ext_smstateen = true, 3241 .cfg.ext_smcsrind = true, 3242 .cfg.ext_sscsrind = true, 3243 .cfg.ext_svnapot = true, 3244 .cfg.ext_svpbmt = true, 3245 .cfg.ext_svinval = true, 3246 .cfg.ext_sstc = true, 3247 .cfg.ext_sscofpmf = true, 3248 .cfg.ext_ssdbltrp = true, 3249 .cfg.ext_ssnpm = true, 3250 .cfg.ext_smnpm = true, 3251 .cfg.ext_smmpm = true, 3252 .cfg.ext_sspm = true, 3253 .cfg.ext_supm = true, 3254 /* The RISC-V Advanced Interrupt Architecture */ 3255 .cfg.ext_smaia = true, 3256 .cfg.ext_ssaia = true, 3257 /* RVA23 Profiles */ 3258 .cfg.ext_zicbom = true, 3259 .cfg.ext_zicbop = true, 3260 .cfg.ext_zicboz = true, 3261 .cfg.ext_svade = true, 3262 .cfg.mmu = true, 3263 .cfg.pmp = true, 3264 .cfg.max_satp_mode = VM_1_10_SV48, 3265 ), 3266 3267 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) 3268 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_BASE128, TYPE_RISCV_DYNAMIC_CPU, 3269 .cfg.max_satp_mode = VM_1_10_SV57, 3270 .misa_mxl_max = MXL_RV128, 3271 ), 3272 #endif /* CONFIG_TCG */ 3273 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV64I, TYPE_RISCV_BARE_CPU, 3274 .misa_mxl_max = MXL_RV64, 3275 .misa_ext = RVI 3276 ), 3277 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV64E, TYPE_RISCV_BARE_CPU, 3278 .misa_mxl_max = MXL_RV64, 3279 .misa_ext = RVE 3280 ), 3281 3282 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, TYPE_RISCV_CPU_RV64I, RVA22U64), 3283 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, TYPE_RISCV_CPU_RV64I, RVA22S64), 3284 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23U64, TYPE_RISCV_CPU_RV64I, RVA23U64), 3285 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23S64, TYPE_RISCV_CPU_RV64I, RVA23S64), 3286 #endif /* TARGET_RISCV64 */ 3287 }; 3288 3289 DEFINE_TYPES(riscv_cpu_type_infos) 3290