xref: /openbmc/qemu/target/riscv/cpu.c (revision ae4bdcef)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "cpu_vendorid.h"
26 #include "internals.h"
27 #include "exec/exec-all.h"
28 #include "qapi/error.h"
29 #include "qapi/visitor.h"
30 #include "qemu/error-report.h"
31 #include "hw/qdev-properties.h"
32 #include "hw/core/qdev-prop-internal.h"
33 #include "migration/vmstate.h"
34 #include "fpu/softfloat-helpers.h"
35 #include "sysemu/device_tree.h"
36 #include "sysemu/kvm.h"
37 #include "sysemu/tcg.h"
38 #include "kvm/kvm_riscv.h"
39 #include "tcg/tcg-cpu.h"
40 #include "tcg/tcg.h"
41 
42 /* RISC-V CPU definitions */
43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH";
44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
45                               RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0};
46 
47 /*
48  * From vector_helper.c
49  * Note that vector data is stored in host-endian 64-bit chunks,
50  * so addressing bytes needs a host-endian fixup.
51  */
52 #if HOST_BIG_ENDIAN
53 #define BYTE(x)   ((x) ^ 7)
54 #else
55 #define BYTE(x)   (x)
56 #endif
57 
58 bool riscv_cpu_is_32bit(RISCVCPU *cpu)
59 {
60     return riscv_cpu_mxl(&cpu->env) == MXL_RV32;
61 }
62 
63 /* Hash that stores general user set numeric options */
64 static GHashTable *general_user_opts;
65 
66 static void cpu_option_add_user_setting(const char *optname, uint32_t value)
67 {
68     g_hash_table_insert(general_user_opts, (gpointer)optname,
69                         GUINT_TO_POINTER(value));
70 }
71 
72 bool riscv_cpu_option_set(const char *optname)
73 {
74     return g_hash_table_contains(general_user_opts, optname);
75 }
76 
77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
78     {#_name, _min_ver, CPU_CFG_OFFSET(_prop)}
79 
80 /*
81  * Here are the ordering rules of extension naming defined by RISC-V
82  * specification :
83  * 1. All extensions should be separated from other multi-letter extensions
84  *    by an underscore.
85  * 2. The first letter following the 'Z' conventionally indicates the most
86  *    closely related alphabetical extension category, IMAFDQLCBKJTPVH.
87  *    If multiple 'Z' extensions are named, they should be ordered first
88  *    by category, then alphabetically within a category.
89  * 3. Standard supervisor-level extensions (starts with 'S') should be
90  *    listed after standard unprivileged extensions.  If multiple
91  *    supervisor-level extensions are listed, they should be ordered
92  *    alphabetically.
93  * 4. Non-standard extensions (starts with 'X') must be listed after all
94  *    standard extensions. They must be separated from other multi-letter
95  *    extensions by an underscore.
96  *
97  * Single letter extensions are checked in riscv_cpu_validate_misa_priv()
98  * instead.
99  */
100 const RISCVIsaExtData isa_edata_arr[] = {
101     ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b),
102     ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom),
103     ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop),
104     ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz),
105     ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11),
106     ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11),
107     ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11),
108     ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11),
109     ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
110     ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr),
111     ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr),
112     ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei),
113     ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl),
114     ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
115     ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm),
116     ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop),
117     ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
118     ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_11),
119     ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo),
120     ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha),
121     ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
122     ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b),
123     ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc),
124     ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
125     ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
126     ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
127     ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh),
128     ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin),
129     ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx),
130     ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx),
131     ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca),
132     ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb),
133     ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf),
134     ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd),
135     ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce),
136     ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop),
137     ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp),
138     ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt),
139     ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba),
140     ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb),
141     ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc),
142     ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb),
143     ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc),
144     ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx),
145     ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs),
146     ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk),
147     ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn),
148     ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd),
149     ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne),
150     ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh),
151     ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr),
152     ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks),
153     ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
154     ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
155     ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
156     ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso),
157     ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb),
158     ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
159     ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
160     ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x),
161     ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
162     ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
163     ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x),
164     ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin),
165     ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
166     ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
167     ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
168     ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb),
169     ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg),
170     ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn),
171     ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc),
172     ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
173     ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng),
174     ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
175     ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
176     ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks),
177     ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc),
178     ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed),
179     ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg),
180     ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
181     ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt),
182     ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
183     ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
184     ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
185     ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
186     ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
187     ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
188     ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11),
189     ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
190     ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
191     ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
192     ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
193     ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
194     ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade),
195     ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
196     ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
197     ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
198     ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt),
199     ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba),
200     ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb),
201     ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs),
202     ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo),
203     ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov),
204     ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx),
205     ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv),
206     ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac),
207     ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx),
208     ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair),
209     ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync),
210     ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
211 
212     DEFINE_PROP_END_OF_LIST(),
213 };
214 
215 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset)
216 {
217     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
218 
219     return *ext_enabled;
220 }
221 
222 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en)
223 {
224     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
225 
226     *ext_enabled = en;
227 }
228 
229 bool riscv_cpu_is_vendor(Object *cpu_obj)
230 {
231     return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL;
232 }
233 
234 const char * const riscv_int_regnames[] = {
235     "x0/zero", "x1/ra",  "x2/sp",  "x3/gp",  "x4/tp",  "x5/t0",   "x6/t1",
236     "x7/t2",   "x8/s0",  "x9/s1",  "x10/a0", "x11/a1", "x12/a2",  "x13/a3",
237     "x14/a4",  "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3",  "x20/s4",
238     "x21/s5",  "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
239     "x28/t3",  "x29/t4", "x30/t5", "x31/t6"
240 };
241 
242 const char * const riscv_int_regnamesh[] = {
243     "x0h/zeroh", "x1h/rah",  "x2h/sph",   "x3h/gph",   "x4h/tph",  "x5h/t0h",
244     "x6h/t1h",   "x7h/t2h",  "x8h/s0h",   "x9h/s1h",   "x10h/a0h", "x11h/a1h",
245     "x12h/a2h",  "x13h/a3h", "x14h/a4h",  "x15h/a5h",  "x16h/a6h", "x17h/a7h",
246     "x18h/s2h",  "x19h/s3h", "x20h/s4h",  "x21h/s5h",  "x22h/s6h", "x23h/s7h",
247     "x24h/s8h",  "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
248     "x30h/t5h",  "x31h/t6h"
249 };
250 
251 const char * const riscv_fpr_regnames[] = {
252     "f0/ft0",   "f1/ft1",  "f2/ft2",   "f3/ft3",   "f4/ft4",  "f5/ft5",
253     "f6/ft6",   "f7/ft7",  "f8/fs0",   "f9/fs1",   "f10/fa0", "f11/fa1",
254     "f12/fa2",  "f13/fa3", "f14/fa4",  "f15/fa5",  "f16/fa6", "f17/fa7",
255     "f18/fs2",  "f19/fs3", "f20/fs4",  "f21/fs5",  "f22/fs6", "f23/fs7",
256     "f24/fs8",  "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
257     "f30/ft10", "f31/ft11"
258 };
259 
260 const char * const riscv_rvv_regnames[] = {
261   "v0",  "v1",  "v2",  "v3",  "v4",  "v5",  "v6",
262   "v7",  "v8",  "v9",  "v10", "v11", "v12", "v13",
263   "v14", "v15", "v16", "v17", "v18", "v19", "v20",
264   "v21", "v22", "v23", "v24", "v25", "v26", "v27",
265   "v28", "v29", "v30", "v31"
266 };
267 
268 static const char * const riscv_excp_names[] = {
269     "misaligned_fetch",
270     "fault_fetch",
271     "illegal_instruction",
272     "breakpoint",
273     "misaligned_load",
274     "fault_load",
275     "misaligned_store",
276     "fault_store",
277     "user_ecall",
278     "supervisor_ecall",
279     "hypervisor_ecall",
280     "machine_ecall",
281     "exec_page_fault",
282     "load_page_fault",
283     "reserved",
284     "store_page_fault",
285     "reserved",
286     "reserved",
287     "reserved",
288     "reserved",
289     "guest_exec_page_fault",
290     "guest_load_page_fault",
291     "reserved",
292     "guest_store_page_fault",
293 };
294 
295 static const char * const riscv_intr_names[] = {
296     "u_software",
297     "s_software",
298     "vs_software",
299     "m_software",
300     "u_timer",
301     "s_timer",
302     "vs_timer",
303     "m_timer",
304     "u_external",
305     "s_external",
306     "vs_external",
307     "m_external",
308     "reserved",
309     "reserved",
310     "reserved",
311     "reserved"
312 };
313 
314 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
315 {
316     if (async) {
317         return (cause < ARRAY_SIZE(riscv_intr_names)) ?
318                riscv_intr_names[cause] : "(unknown)";
319     } else {
320         return (cause < ARRAY_SIZE(riscv_excp_names)) ?
321                riscv_excp_names[cause] : "(unknown)";
322     }
323 }
324 
325 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext)
326 {
327     env->misa_ext_mask = env->misa_ext = ext;
328 }
329 
330 int riscv_cpu_max_xlen(RISCVCPUClass *mcc)
331 {
332     return 16 << mcc->misa_mxl_max;
333 }
334 
335 #ifndef CONFIG_USER_ONLY
336 static uint8_t satp_mode_from_str(const char *satp_mode_str)
337 {
338     if (!strncmp(satp_mode_str, "mbare", 5)) {
339         return VM_1_10_MBARE;
340     }
341 
342     if (!strncmp(satp_mode_str, "sv32", 4)) {
343         return VM_1_10_SV32;
344     }
345 
346     if (!strncmp(satp_mode_str, "sv39", 4)) {
347         return VM_1_10_SV39;
348     }
349 
350     if (!strncmp(satp_mode_str, "sv48", 4)) {
351         return VM_1_10_SV48;
352     }
353 
354     if (!strncmp(satp_mode_str, "sv57", 4)) {
355         return VM_1_10_SV57;
356     }
357 
358     if (!strncmp(satp_mode_str, "sv64", 4)) {
359         return VM_1_10_SV64;
360     }
361 
362     g_assert_not_reached();
363 }
364 
365 uint8_t satp_mode_max_from_map(uint32_t map)
366 {
367     /*
368      * 'map = 0' will make us return (31 - 32), which C will
369      * happily overflow to UINT_MAX. There's no good result to
370      * return if 'map = 0' (e.g. returning 0 will be ambiguous
371      * with the result for 'map = 1').
372      *
373      * Assert out if map = 0. Callers will have to deal with
374      * it outside of this function.
375      */
376     g_assert(map > 0);
377 
378     /* map here has at least one bit set, so no problem with clz */
379     return 31 - __builtin_clz(map);
380 }
381 
382 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
383 {
384     if (is_32_bit) {
385         switch (satp_mode) {
386         case VM_1_10_SV32:
387             return "sv32";
388         case VM_1_10_MBARE:
389             return "none";
390         }
391     } else {
392         switch (satp_mode) {
393         case VM_1_10_SV64:
394             return "sv64";
395         case VM_1_10_SV57:
396             return "sv57";
397         case VM_1_10_SV48:
398             return "sv48";
399         case VM_1_10_SV39:
400             return "sv39";
401         case VM_1_10_MBARE:
402             return "none";
403         }
404     }
405 
406     g_assert_not_reached();
407 }
408 
409 static void set_satp_mode_max_supported(RISCVCPU *cpu,
410                                         uint8_t satp_mode)
411 {
412     bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
413     const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
414 
415     for (int i = 0; i <= satp_mode; ++i) {
416         if (valid_vm[i]) {
417             cpu->cfg.satp_mode.supported |= (1 << i);
418         }
419     }
420 }
421 
422 /* Set the satp mode to the max supported */
423 static void set_satp_mode_default_map(RISCVCPU *cpu)
424 {
425     /*
426      * Bare CPUs do not default to the max available.
427      * Users must set a valid satp_mode in the command
428      * line.
429      */
430     if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) {
431         warn_report("No satp mode set. Defaulting to 'bare'");
432         cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE);
433         return;
434     }
435 
436     cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
437 }
438 #endif
439 
440 static void riscv_any_cpu_init(Object *obj)
441 {
442     RISCVCPU *cpu = RISCV_CPU(obj);
443     CPURISCVState *env = &cpu->env;
444     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
445 
446 #ifndef CONFIG_USER_ONLY
447     set_satp_mode_max_supported(RISCV_CPU(obj),
448         riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ?
449         VM_1_10_SV32 : VM_1_10_SV57);
450 #endif
451 
452     env->priv_ver = PRIV_VERSION_LATEST;
453 
454     /* inherited from parent obj via riscv_cpu_init() */
455     cpu->cfg.ext_zifencei = true;
456     cpu->cfg.ext_zicsr = true;
457     cpu->cfg.mmu = true;
458     cpu->cfg.pmp = true;
459 }
460 
461 static void riscv_max_cpu_init(Object *obj)
462 {
463     RISCVCPU *cpu = RISCV_CPU(obj);
464     CPURISCVState *env = &cpu->env;
465 
466     cpu->cfg.mmu = true;
467     cpu->cfg.pmp = true;
468 
469     env->priv_ver = PRIV_VERSION_LATEST;
470 #ifndef CONFIG_USER_ONLY
471 #ifdef TARGET_RISCV32
472     set_satp_mode_max_supported(cpu, VM_1_10_SV32);
473 #else
474     set_satp_mode_max_supported(cpu, VM_1_10_SV57);
475 #endif
476 #endif
477 }
478 
479 #if defined(TARGET_RISCV64)
480 static void rv64_base_cpu_init(Object *obj)
481 {
482     RISCVCPU *cpu = RISCV_CPU(obj);
483     CPURISCVState *env = &cpu->env;
484 
485     cpu->cfg.mmu = true;
486     cpu->cfg.pmp = true;
487 
488     /* Set latest version of privileged specification */
489     env->priv_ver = PRIV_VERSION_LATEST;
490 #ifndef CONFIG_USER_ONLY
491     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
492 #endif
493 }
494 
495 static void rv64_sifive_u_cpu_init(Object *obj)
496 {
497     RISCVCPU *cpu = RISCV_CPU(obj);
498     CPURISCVState *env = &cpu->env;
499     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
500     env->priv_ver = PRIV_VERSION_1_10_0;
501 #ifndef CONFIG_USER_ONLY
502     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39);
503 #endif
504 
505     /* inherited from parent obj via riscv_cpu_init() */
506     cpu->cfg.ext_zifencei = true;
507     cpu->cfg.ext_zicsr = true;
508     cpu->cfg.mmu = true;
509     cpu->cfg.pmp = true;
510 }
511 
512 static void rv64_sifive_e_cpu_init(Object *obj)
513 {
514     CPURISCVState *env = &RISCV_CPU(obj)->env;
515     RISCVCPU *cpu = RISCV_CPU(obj);
516 
517     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
518     env->priv_ver = PRIV_VERSION_1_10_0;
519 #ifndef CONFIG_USER_ONLY
520     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
521 #endif
522 
523     /* inherited from parent obj via riscv_cpu_init() */
524     cpu->cfg.ext_zifencei = true;
525     cpu->cfg.ext_zicsr = true;
526     cpu->cfg.pmp = true;
527 }
528 
529 static void rv64_thead_c906_cpu_init(Object *obj)
530 {
531     CPURISCVState *env = &RISCV_CPU(obj)->env;
532     RISCVCPU *cpu = RISCV_CPU(obj);
533 
534     riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU);
535     env->priv_ver = PRIV_VERSION_1_11_0;
536 
537     cpu->cfg.ext_zfa = true;
538     cpu->cfg.ext_zfh = true;
539     cpu->cfg.mmu = true;
540     cpu->cfg.ext_xtheadba = true;
541     cpu->cfg.ext_xtheadbb = true;
542     cpu->cfg.ext_xtheadbs = true;
543     cpu->cfg.ext_xtheadcmo = true;
544     cpu->cfg.ext_xtheadcondmov = true;
545     cpu->cfg.ext_xtheadfmemidx = true;
546     cpu->cfg.ext_xtheadmac = true;
547     cpu->cfg.ext_xtheadmemidx = true;
548     cpu->cfg.ext_xtheadmempair = true;
549     cpu->cfg.ext_xtheadsync = true;
550 
551     cpu->cfg.mvendorid = THEAD_VENDOR_ID;
552 #ifndef CONFIG_USER_ONLY
553     set_satp_mode_max_supported(cpu, VM_1_10_SV39);
554     th_register_custom_csrs(cpu);
555 #endif
556 
557     /* inherited from parent obj via riscv_cpu_init() */
558     cpu->cfg.pmp = true;
559 }
560 
561 static void rv64_veyron_v1_cpu_init(Object *obj)
562 {
563     CPURISCVState *env = &RISCV_CPU(obj)->env;
564     RISCVCPU *cpu = RISCV_CPU(obj);
565 
566     riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH);
567     env->priv_ver = PRIV_VERSION_1_12_0;
568 
569     /* Enable ISA extensions */
570     cpu->cfg.mmu = true;
571     cpu->cfg.ext_zifencei = true;
572     cpu->cfg.ext_zicsr = true;
573     cpu->cfg.pmp = true;
574     cpu->cfg.ext_zicbom = true;
575     cpu->cfg.cbom_blocksize = 64;
576     cpu->cfg.cboz_blocksize = 64;
577     cpu->cfg.ext_zicboz = true;
578     cpu->cfg.ext_smaia = true;
579     cpu->cfg.ext_ssaia = true;
580     cpu->cfg.ext_sscofpmf = true;
581     cpu->cfg.ext_sstc = true;
582     cpu->cfg.ext_svinval = true;
583     cpu->cfg.ext_svnapot = true;
584     cpu->cfg.ext_svpbmt = true;
585     cpu->cfg.ext_smstateen = true;
586     cpu->cfg.ext_zba = true;
587     cpu->cfg.ext_zbb = true;
588     cpu->cfg.ext_zbc = true;
589     cpu->cfg.ext_zbs = true;
590     cpu->cfg.ext_XVentanaCondOps = true;
591 
592     cpu->cfg.mvendorid = VEYRON_V1_MVENDORID;
593     cpu->cfg.marchid = VEYRON_V1_MARCHID;
594     cpu->cfg.mimpid = VEYRON_V1_MIMPID;
595 
596 #ifndef CONFIG_USER_ONLY
597     set_satp_mode_max_supported(cpu, VM_1_10_SV48);
598 #endif
599 }
600 
601 #ifdef CONFIG_TCG
602 static void rv128_base_cpu_init(Object *obj)
603 {
604     RISCVCPU *cpu = RISCV_CPU(obj);
605     CPURISCVState *env = &cpu->env;
606 
607     if (qemu_tcg_mttcg_enabled()) {
608         /* Missing 128-bit aligned atomics */
609         error_report("128-bit RISC-V currently does not work with Multi "
610                      "Threaded TCG. Please use: -accel tcg,thread=single");
611         exit(EXIT_FAILURE);
612     }
613 
614     cpu->cfg.mmu = true;
615     cpu->cfg.pmp = true;
616 
617     /* Set latest version of privileged specification */
618     env->priv_ver = PRIV_VERSION_LATEST;
619 #ifndef CONFIG_USER_ONLY
620     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
621 #endif
622 }
623 #endif /* CONFIG_TCG */
624 
625 static void rv64i_bare_cpu_init(Object *obj)
626 {
627     CPURISCVState *env = &RISCV_CPU(obj)->env;
628     riscv_cpu_set_misa_ext(env, RVI);
629 }
630 
631 static void rv64e_bare_cpu_init(Object *obj)
632 {
633     CPURISCVState *env = &RISCV_CPU(obj)->env;
634     riscv_cpu_set_misa_ext(env, RVE);
635 }
636 
637 #else /* !TARGET_RISCV64 */
638 
639 static void rv32_base_cpu_init(Object *obj)
640 {
641     RISCVCPU *cpu = RISCV_CPU(obj);
642     CPURISCVState *env = &cpu->env;
643 
644     cpu->cfg.mmu = true;
645     cpu->cfg.pmp = true;
646 
647     /* Set latest version of privileged specification */
648     env->priv_ver = PRIV_VERSION_LATEST;
649 #ifndef CONFIG_USER_ONLY
650     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
651 #endif
652 }
653 
654 static void rv32_sifive_u_cpu_init(Object *obj)
655 {
656     RISCVCPU *cpu = RISCV_CPU(obj);
657     CPURISCVState *env = &cpu->env;
658     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
659     env->priv_ver = PRIV_VERSION_1_10_0;
660 #ifndef CONFIG_USER_ONLY
661     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
662 #endif
663 
664     /* inherited from parent obj via riscv_cpu_init() */
665     cpu->cfg.ext_zifencei = true;
666     cpu->cfg.ext_zicsr = true;
667     cpu->cfg.mmu = true;
668     cpu->cfg.pmp = true;
669 }
670 
671 static void rv32_sifive_e_cpu_init(Object *obj)
672 {
673     CPURISCVState *env = &RISCV_CPU(obj)->env;
674     RISCVCPU *cpu = RISCV_CPU(obj);
675 
676     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
677     env->priv_ver = PRIV_VERSION_1_10_0;
678 #ifndef CONFIG_USER_ONLY
679     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
680 #endif
681 
682     /* inherited from parent obj via riscv_cpu_init() */
683     cpu->cfg.ext_zifencei = true;
684     cpu->cfg.ext_zicsr = true;
685     cpu->cfg.pmp = true;
686 }
687 
688 static void rv32_ibex_cpu_init(Object *obj)
689 {
690     CPURISCVState *env = &RISCV_CPU(obj)->env;
691     RISCVCPU *cpu = RISCV_CPU(obj);
692 
693     riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU);
694     env->priv_ver = PRIV_VERSION_1_12_0;
695 #ifndef CONFIG_USER_ONLY
696     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
697 #endif
698     /* inherited from parent obj via riscv_cpu_init() */
699     cpu->cfg.ext_zifencei = true;
700     cpu->cfg.ext_zicsr = true;
701     cpu->cfg.pmp = true;
702     cpu->cfg.ext_smepmp = true;
703 }
704 
705 static void rv32_imafcu_nommu_cpu_init(Object *obj)
706 {
707     CPURISCVState *env = &RISCV_CPU(obj)->env;
708     RISCVCPU *cpu = RISCV_CPU(obj);
709 
710     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU);
711     env->priv_ver = PRIV_VERSION_1_10_0;
712 #ifndef CONFIG_USER_ONLY
713     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
714 #endif
715 
716     /* inherited from parent obj via riscv_cpu_init() */
717     cpu->cfg.ext_zifencei = true;
718     cpu->cfg.ext_zicsr = true;
719     cpu->cfg.pmp = true;
720 }
721 
722 static void rv32i_bare_cpu_init(Object *obj)
723 {
724     CPURISCVState *env = &RISCV_CPU(obj)->env;
725     riscv_cpu_set_misa_ext(env, RVI);
726 }
727 
728 static void rv32e_bare_cpu_init(Object *obj)
729 {
730     CPURISCVState *env = &RISCV_CPU(obj)->env;
731     riscv_cpu_set_misa_ext(env, RVE);
732 }
733 #endif
734 
735 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
736 {
737     ObjectClass *oc;
738     char *typename;
739     char **cpuname;
740 
741     cpuname = g_strsplit(cpu_model, ",", 1);
742     typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
743     oc = object_class_by_name(typename);
744     g_strfreev(cpuname);
745     g_free(typename);
746 
747     return oc;
748 }
749 
750 char *riscv_cpu_get_name(RISCVCPU *cpu)
751 {
752     RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu);
753     const char *typename = object_class_get_name(OBJECT_CLASS(rcc));
754 
755     g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX));
756 
757     return cpu_model_from_type(typename);
758 }
759 
760 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
761 {
762     RISCVCPU *cpu = RISCV_CPU(cs);
763     CPURISCVState *env = &cpu->env;
764     int i, j;
765     uint8_t *p;
766 
767 #if !defined(CONFIG_USER_ONLY)
768     if (riscv_has_ext(env, RVH)) {
769         qemu_fprintf(f, " %s %d\n", "V      =  ", env->virt_enabled);
770     }
771 #endif
772     qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc      ", env->pc);
773 #ifndef CONFIG_USER_ONLY
774     {
775         static const int dump_csrs[] = {
776             CSR_MHARTID,
777             CSR_MSTATUS,
778             CSR_MSTATUSH,
779             /*
780              * CSR_SSTATUS is intentionally omitted here as its value
781              * can be figured out by looking at CSR_MSTATUS
782              */
783             CSR_HSTATUS,
784             CSR_VSSTATUS,
785             CSR_MIP,
786             CSR_MIE,
787             CSR_MIDELEG,
788             CSR_HIDELEG,
789             CSR_MEDELEG,
790             CSR_HEDELEG,
791             CSR_MTVEC,
792             CSR_STVEC,
793             CSR_VSTVEC,
794             CSR_MEPC,
795             CSR_SEPC,
796             CSR_VSEPC,
797             CSR_MCAUSE,
798             CSR_SCAUSE,
799             CSR_VSCAUSE,
800             CSR_MTVAL,
801             CSR_STVAL,
802             CSR_HTVAL,
803             CSR_MTVAL2,
804             CSR_MSCRATCH,
805             CSR_SSCRATCH,
806             CSR_SATP,
807             CSR_MMTE,
808             CSR_UPMBASE,
809             CSR_UPMMASK,
810             CSR_SPMBASE,
811             CSR_SPMMASK,
812             CSR_MPMBASE,
813             CSR_MPMMASK,
814         };
815 
816         for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
817             int csrno = dump_csrs[i];
818             target_ulong val = 0;
819             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
820 
821             /*
822              * Rely on the smode, hmode, etc, predicates within csr.c
823              * to do the filtering of the registers that are present.
824              */
825             if (res == RISCV_EXCP_NONE) {
826                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
827                              csr_ops[csrno].name, val);
828             }
829         }
830     }
831 #endif
832 
833     for (i = 0; i < 32; i++) {
834         qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
835                      riscv_int_regnames[i], env->gpr[i]);
836         if ((i & 3) == 3) {
837             qemu_fprintf(f, "\n");
838         }
839     }
840     if (flags & CPU_DUMP_FPU) {
841         for (i = 0; i < 32; i++) {
842             qemu_fprintf(f, " %-8s %016" PRIx64,
843                          riscv_fpr_regnames[i], env->fpr[i]);
844             if ((i & 3) == 3) {
845                 qemu_fprintf(f, "\n");
846             }
847         }
848     }
849     if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) {
850         static const int dump_rvv_csrs[] = {
851                     CSR_VSTART,
852                     CSR_VXSAT,
853                     CSR_VXRM,
854                     CSR_VCSR,
855                     CSR_VL,
856                     CSR_VTYPE,
857                     CSR_VLENB,
858                 };
859         for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
860             int csrno = dump_rvv_csrs[i];
861             target_ulong val = 0;
862             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
863 
864             /*
865              * Rely on the smode, hmode, etc, predicates within csr.c
866              * to do the filtering of the registers that are present.
867              */
868             if (res == RISCV_EXCP_NONE) {
869                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
870                              csr_ops[csrno].name, val);
871             }
872         }
873         uint16_t vlenb = cpu->cfg.vlenb;
874 
875         for (i = 0; i < 32; i++) {
876             qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]);
877             p = (uint8_t *)env->vreg;
878             for (j = vlenb - 1 ; j >= 0; j--) {
879                 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j)));
880             }
881             qemu_fprintf(f, "\n");
882         }
883     }
884 }
885 
886 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
887 {
888     RISCVCPU *cpu = RISCV_CPU(cs);
889     CPURISCVState *env = &cpu->env;
890 
891     if (env->xl == MXL_RV32) {
892         env->pc = (int32_t)value;
893     } else {
894         env->pc = value;
895     }
896 }
897 
898 static vaddr riscv_cpu_get_pc(CPUState *cs)
899 {
900     RISCVCPU *cpu = RISCV_CPU(cs);
901     CPURISCVState *env = &cpu->env;
902 
903     /* Match cpu_get_tb_cpu_state. */
904     if (env->xl == MXL_RV32) {
905         return env->pc & UINT32_MAX;
906     }
907     return env->pc;
908 }
909 
910 bool riscv_cpu_has_work(CPUState *cs)
911 {
912 #ifndef CONFIG_USER_ONLY
913     RISCVCPU *cpu = RISCV_CPU(cs);
914     CPURISCVState *env = &cpu->env;
915     /*
916      * Definition of the WFI instruction requires it to ignore the privilege
917      * mode and delegation registers, but respect individual enables
918      */
919     return riscv_cpu_all_pending(env) != 0 ||
920         riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE ||
921         riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE;
922 #else
923     return true;
924 #endif
925 }
926 
927 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch)
928 {
929     return riscv_env_mmu_index(cpu_env(cs), ifetch);
930 }
931 
932 static void riscv_cpu_reset_hold(Object *obj, ResetType type)
933 {
934 #ifndef CONFIG_USER_ONLY
935     uint8_t iprio;
936     int i, irq, rdzero;
937 #endif
938     CPUState *cs = CPU(obj);
939     RISCVCPU *cpu = RISCV_CPU(cs);
940     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
941     CPURISCVState *env = &cpu->env;
942 
943     if (mcc->parent_phases.hold) {
944         mcc->parent_phases.hold(obj, type);
945     }
946 #ifndef CONFIG_USER_ONLY
947     env->misa_mxl = mcc->misa_mxl_max;
948     env->priv = PRV_M;
949     env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
950     if (env->misa_mxl > MXL_RV32) {
951         /*
952          * The reset status of SXL/UXL is undefined, but mstatus is WARL
953          * and we must ensure that the value after init is valid for read.
954          */
955         env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
956         env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
957         if (riscv_has_ext(env, RVH)) {
958             env->vsstatus = set_field(env->vsstatus,
959                                       MSTATUS64_SXL, env->misa_mxl);
960             env->vsstatus = set_field(env->vsstatus,
961                                       MSTATUS64_UXL, env->misa_mxl);
962             env->mstatus_hs = set_field(env->mstatus_hs,
963                                         MSTATUS64_SXL, env->misa_mxl);
964             env->mstatus_hs = set_field(env->mstatus_hs,
965                                         MSTATUS64_UXL, env->misa_mxl);
966         }
967     }
968     env->mcause = 0;
969     env->miclaim = MIP_SGEIP;
970     env->pc = env->resetvec;
971     env->bins = 0;
972     env->two_stage_lookup = false;
973 
974     env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
975                    (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ?
976                     MENVCFG_ADUE : 0);
977     env->henvcfg = 0;
978 
979     /* Initialized default priorities of local interrupts. */
980     for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
981         iprio = riscv_cpu_default_priority(i);
982         env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
983         env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
984         env->hviprio[i] = 0;
985     }
986     i = 0;
987     while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
988         if (!rdzero) {
989             env->hviprio[irq] = env->miprio[irq];
990         }
991         i++;
992     }
993     /* mmte is supposed to have pm.current hardwired to 1 */
994     env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
995 
996     /*
997      * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor
998      * extension is enabled.
999      */
1000     if (riscv_has_ext(env, RVH)) {
1001         env->mideleg |= HS_MODE_INTERRUPTS;
1002     }
1003 
1004     /*
1005      * Clear mseccfg and unlock all the PMP entries upon reset.
1006      * This is allowed as per the priv and smepmp specifications
1007      * and is needed to clear stale entries across reboots.
1008      */
1009     if (riscv_cpu_cfg(env)->ext_smepmp) {
1010         env->mseccfg = 0;
1011     }
1012 
1013     pmp_unlock_entries(env);
1014 #endif
1015     env->xl = riscv_cpu_mxl(env);
1016     riscv_cpu_update_mask(env);
1017     cs->exception_index = RISCV_EXCP_NONE;
1018     env->load_res = -1;
1019     set_default_nan_mode(1, &env->fp_status);
1020 
1021 #ifndef CONFIG_USER_ONLY
1022     if (cpu->cfg.debug) {
1023         riscv_trigger_reset_hold(env);
1024     }
1025 
1026     if (kvm_enabled()) {
1027         kvm_riscv_reset_vcpu(cpu);
1028     }
1029 #endif
1030 }
1031 
1032 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
1033 {
1034     RISCVCPU *cpu = RISCV_CPU(s);
1035     CPURISCVState *env = &cpu->env;
1036     info->target_info = &cpu->cfg;
1037 
1038     switch (env->xl) {
1039     case MXL_RV32:
1040         info->print_insn = print_insn_riscv32;
1041         break;
1042     case MXL_RV64:
1043         info->print_insn = print_insn_riscv64;
1044         break;
1045     case MXL_RV128:
1046         info->print_insn = print_insn_riscv128;
1047         break;
1048     default:
1049         g_assert_not_reached();
1050     }
1051 }
1052 
1053 #ifndef CONFIG_USER_ONLY
1054 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
1055 {
1056     bool rv32 = riscv_cpu_is_32bit(cpu);
1057     uint8_t satp_mode_map_max, satp_mode_supported_max;
1058 
1059     /* The CPU wants the OS to decide which satp mode to use */
1060     if (cpu->cfg.satp_mode.supported == 0) {
1061         return;
1062     }
1063 
1064     satp_mode_supported_max =
1065                     satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
1066 
1067     if (cpu->cfg.satp_mode.map == 0) {
1068         if (cpu->cfg.satp_mode.init == 0) {
1069             /* If unset by the user, we fallback to the default satp mode. */
1070             set_satp_mode_default_map(cpu);
1071         } else {
1072             /*
1073              * Find the lowest level that was disabled and then enable the
1074              * first valid level below which can be found in
1075              * valid_vm_1_10_32/64.
1076              */
1077             for (int i = 1; i < 16; ++i) {
1078                 if ((cpu->cfg.satp_mode.init & (1 << i)) &&
1079                     (cpu->cfg.satp_mode.supported & (1 << i))) {
1080                     for (int j = i - 1; j >= 0; --j) {
1081                         if (cpu->cfg.satp_mode.supported & (1 << j)) {
1082                             cpu->cfg.satp_mode.map |= (1 << j);
1083                             break;
1084                         }
1085                     }
1086                     break;
1087                 }
1088             }
1089         }
1090     }
1091 
1092     satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
1093 
1094     /* Make sure the user asked for a supported configuration (HW and qemu) */
1095     if (satp_mode_map_max > satp_mode_supported_max) {
1096         error_setg(errp, "satp_mode %s is higher than hw max capability %s",
1097                    satp_mode_str(satp_mode_map_max, rv32),
1098                    satp_mode_str(satp_mode_supported_max, rv32));
1099         return;
1100     }
1101 
1102     /*
1103      * Make sure the user did not ask for an invalid configuration as per
1104      * the specification.
1105      */
1106     if (!rv32) {
1107         for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1108             if (!(cpu->cfg.satp_mode.map & (1 << i)) &&
1109                 (cpu->cfg.satp_mode.init & (1 << i)) &&
1110                 (cpu->cfg.satp_mode.supported & (1 << i))) {
1111                 error_setg(errp, "cannot disable %s satp mode if %s "
1112                            "is enabled", satp_mode_str(i, false),
1113                            satp_mode_str(satp_mode_map_max, false));
1114                 return;
1115             }
1116         }
1117     }
1118 
1119     /* Finally expand the map so that all valid modes are set */
1120     for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1121         if (cpu->cfg.satp_mode.supported & (1 << i)) {
1122             cpu->cfg.satp_mode.map |= (1 << i);
1123         }
1124     }
1125 }
1126 #endif
1127 
1128 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1129 {
1130     Error *local_err = NULL;
1131 
1132 #ifndef CONFIG_USER_ONLY
1133     riscv_cpu_satp_mode_finalize(cpu, &local_err);
1134     if (local_err != NULL) {
1135         error_propagate(errp, local_err);
1136         return;
1137     }
1138 #endif
1139 
1140     if (tcg_enabled()) {
1141         riscv_tcg_cpu_finalize_features(cpu, &local_err);
1142         if (local_err != NULL) {
1143             error_propagate(errp, local_err);
1144             return;
1145         }
1146         riscv_tcg_cpu_finalize_dynamic_decoder(cpu);
1147     } else if (kvm_enabled()) {
1148         riscv_kvm_cpu_finalize_features(cpu, &local_err);
1149         if (local_err != NULL) {
1150             error_propagate(errp, local_err);
1151             return;
1152         }
1153     }
1154 }
1155 
1156 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
1157 {
1158     CPUState *cs = CPU(dev);
1159     RISCVCPU *cpu = RISCV_CPU(dev);
1160     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
1161     Error *local_err = NULL;
1162 
1163     if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) {
1164         warn_report("The 'any' CPU is deprecated and will be "
1165                     "removed in the future.");
1166     }
1167 
1168     cpu_exec_realizefn(cs, &local_err);
1169     if (local_err != NULL) {
1170         error_propagate(errp, local_err);
1171         return;
1172     }
1173 
1174     riscv_cpu_finalize_features(cpu, &local_err);
1175     if (local_err != NULL) {
1176         error_propagate(errp, local_err);
1177         return;
1178     }
1179 
1180     riscv_cpu_register_gdb_regs_for_features(cs);
1181 
1182 #ifndef CONFIG_USER_ONLY
1183     if (cpu->cfg.debug) {
1184         riscv_trigger_realize(&cpu->env);
1185     }
1186 #endif
1187 
1188     qemu_init_vcpu(cs);
1189     cpu_reset(cs);
1190 
1191     mcc->parent_realize(dev, errp);
1192 }
1193 
1194 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu)
1195 {
1196     if (tcg_enabled()) {
1197         return riscv_cpu_tcg_compatible(cpu);
1198     }
1199 
1200     return true;
1201 }
1202 
1203 #ifndef CONFIG_USER_ONLY
1204 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
1205                                void *opaque, Error **errp)
1206 {
1207     RISCVSATPMap *satp_map = opaque;
1208     uint8_t satp = satp_mode_from_str(name);
1209     bool value;
1210 
1211     value = satp_map->map & (1 << satp);
1212 
1213     visit_type_bool(v, name, &value, errp);
1214 }
1215 
1216 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
1217                                void *opaque, Error **errp)
1218 {
1219     RISCVSATPMap *satp_map = opaque;
1220     uint8_t satp = satp_mode_from_str(name);
1221     bool value;
1222 
1223     if (!visit_type_bool(v, name, &value, errp)) {
1224         return;
1225     }
1226 
1227     satp_map->map = deposit32(satp_map->map, satp, 1, value);
1228     satp_map->init |= 1 << satp;
1229 }
1230 
1231 void riscv_add_satp_mode_properties(Object *obj)
1232 {
1233     RISCVCPU *cpu = RISCV_CPU(obj);
1234 
1235     if (cpu->env.misa_mxl == MXL_RV32) {
1236         object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
1237                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1238     } else {
1239         object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
1240                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1241         object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
1242                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1243         object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
1244                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1245         object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
1246                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1247     }
1248 }
1249 
1250 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
1251 {
1252     RISCVCPU *cpu = RISCV_CPU(opaque);
1253     CPURISCVState *env = &cpu->env;
1254 
1255     if (irq < IRQ_LOCAL_MAX) {
1256         switch (irq) {
1257         case IRQ_U_SOFT:
1258         case IRQ_S_SOFT:
1259         case IRQ_VS_SOFT:
1260         case IRQ_M_SOFT:
1261         case IRQ_U_TIMER:
1262         case IRQ_S_TIMER:
1263         case IRQ_VS_TIMER:
1264         case IRQ_M_TIMER:
1265         case IRQ_U_EXT:
1266         case IRQ_VS_EXT:
1267         case IRQ_M_EXT:
1268             if (kvm_enabled()) {
1269                 kvm_riscv_set_irq(cpu, irq, level);
1270             } else {
1271                 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level));
1272             }
1273              break;
1274         case IRQ_S_EXT:
1275             if (kvm_enabled()) {
1276                 kvm_riscv_set_irq(cpu, irq, level);
1277             } else {
1278                 env->external_seip = level;
1279                 riscv_cpu_update_mip(env, 1 << irq,
1280                                      BOOL_TO_MASK(level | env->software_seip));
1281             }
1282             break;
1283         default:
1284             g_assert_not_reached();
1285         }
1286     } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
1287         /* Require H-extension for handling guest local interrupts */
1288         if (!riscv_has_ext(env, RVH)) {
1289             g_assert_not_reached();
1290         }
1291 
1292         /* Compute bit position in HGEIP CSR */
1293         irq = irq - IRQ_LOCAL_MAX + 1;
1294         if (env->geilen < irq) {
1295             g_assert_not_reached();
1296         }
1297 
1298         /* Update HGEIP CSR */
1299         env->hgeip &= ~((target_ulong)1 << irq);
1300         if (level) {
1301             env->hgeip |= (target_ulong)1 << irq;
1302         }
1303 
1304         /* Update mip.SGEIP bit */
1305         riscv_cpu_update_mip(env, MIP_SGEIP,
1306                              BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
1307     } else {
1308         g_assert_not_reached();
1309     }
1310 }
1311 #endif /* CONFIG_USER_ONLY */
1312 
1313 static bool riscv_cpu_is_dynamic(Object *cpu_obj)
1314 {
1315     return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
1316 }
1317 
1318 static void riscv_cpu_post_init(Object *obj)
1319 {
1320     accel_cpu_instance_init(CPU(obj));
1321 }
1322 
1323 static void riscv_cpu_init(Object *obj)
1324 {
1325     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
1326     RISCVCPU *cpu = RISCV_CPU(obj);
1327     CPURISCVState *env = &cpu->env;
1328 
1329     env->misa_mxl = mcc->misa_mxl_max;
1330 
1331 #ifndef CONFIG_USER_ONLY
1332     qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
1333                       IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
1334 #endif /* CONFIG_USER_ONLY */
1335 
1336     general_user_opts = g_hash_table_new(g_str_hash, g_str_equal);
1337 
1338     /*
1339      * The timer and performance counters extensions were supported
1340      * in QEMU before they were added as discrete extensions in the
1341      * ISA. To keep compatibility we'll always default them to 'true'
1342      * for all CPUs. Each accelerator will decide what to do when
1343      * users disable them.
1344      */
1345     RISCV_CPU(obj)->cfg.ext_zicntr = true;
1346     RISCV_CPU(obj)->cfg.ext_zihpm = true;
1347 
1348     /* Default values for non-bool cpu properties */
1349     cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16);
1350     cpu->cfg.vlenb = 128 >> 3;
1351     cpu->cfg.elen = 64;
1352     cpu->cfg.cbom_blocksize = 64;
1353     cpu->cfg.cbop_blocksize = 64;
1354     cpu->cfg.cboz_blocksize = 64;
1355     cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1356 }
1357 
1358 static void riscv_bare_cpu_init(Object *obj)
1359 {
1360     RISCVCPU *cpu = RISCV_CPU(obj);
1361 
1362     /*
1363      * Bare CPUs do not inherit the timer and performance
1364      * counters from the parent class (see riscv_cpu_init()
1365      * for info on why the parent enables them).
1366      *
1367      * Users have to explicitly enable these counters for
1368      * bare CPUs.
1369      */
1370     cpu->cfg.ext_zicntr = false;
1371     cpu->cfg.ext_zihpm = false;
1372 
1373     /* Set to QEMU's first supported priv version */
1374     cpu->env.priv_ver = PRIV_VERSION_1_10_0;
1375 
1376     /*
1377      * Support all available satp_mode settings. The default
1378      * value will be set to MBARE if the user doesn't set
1379      * satp_mode manually (see set_satp_mode_default()).
1380      */
1381 #ifndef CONFIG_USER_ONLY
1382     set_satp_mode_max_supported(cpu, VM_1_10_SV64);
1383 #endif
1384 }
1385 
1386 typedef struct misa_ext_info {
1387     const char *name;
1388     const char *description;
1389 } MISAExtInfo;
1390 
1391 #define MISA_INFO_IDX(_bit) \
1392     __builtin_ctz(_bit)
1393 
1394 #define MISA_EXT_INFO(_bit, _propname, _descr) \
1395     [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr}
1396 
1397 static const MISAExtInfo misa_ext_info_arr[] = {
1398     MISA_EXT_INFO(RVA, "a", "Atomic instructions"),
1399     MISA_EXT_INFO(RVC, "c", "Compressed instructions"),
1400     MISA_EXT_INFO(RVD, "d", "Double-precision float point"),
1401     MISA_EXT_INFO(RVF, "f", "Single-precision float point"),
1402     MISA_EXT_INFO(RVI, "i", "Base integer instruction set"),
1403     MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"),
1404     MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"),
1405     MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
1406     MISA_EXT_INFO(RVU, "u", "User-level instructions"),
1407     MISA_EXT_INFO(RVH, "h", "Hypervisor"),
1408     MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
1409     MISA_EXT_INFO(RVV, "v", "Vector operations"),
1410     MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
1411     MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)")
1412 };
1413 
1414 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc)
1415 {
1416     CPUClass *cc = CPU_CLASS(mcc);
1417 
1418     /* Validate that MISA_MXL is set properly. */
1419     switch (mcc->misa_mxl_max) {
1420 #ifdef TARGET_RISCV64
1421     case MXL_RV64:
1422     case MXL_RV128:
1423         cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
1424         break;
1425 #endif
1426     case MXL_RV32:
1427         cc->gdb_core_xml_file = "riscv-32bit-cpu.xml";
1428         break;
1429     default:
1430         g_assert_not_reached();
1431     }
1432 }
1433 
1434 static int riscv_validate_misa_info_idx(uint32_t bit)
1435 {
1436     int idx;
1437 
1438     /*
1439      * Our lowest valid input (RVA) is 1 and
1440      * __builtin_ctz() is UB with zero.
1441      */
1442     g_assert(bit != 0);
1443     idx = MISA_INFO_IDX(bit);
1444 
1445     g_assert(idx < ARRAY_SIZE(misa_ext_info_arr));
1446     return idx;
1447 }
1448 
1449 const char *riscv_get_misa_ext_name(uint32_t bit)
1450 {
1451     int idx = riscv_validate_misa_info_idx(bit);
1452     const char *val = misa_ext_info_arr[idx].name;
1453 
1454     g_assert(val != NULL);
1455     return val;
1456 }
1457 
1458 const char *riscv_get_misa_ext_description(uint32_t bit)
1459 {
1460     int idx = riscv_validate_misa_info_idx(bit);
1461     const char *val = misa_ext_info_arr[idx].description;
1462 
1463     g_assert(val != NULL);
1464     return val;
1465 }
1466 
1467 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \
1468     {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
1469      .enabled = _defval}
1470 
1471 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
1472     /* Defaults for standard extensions */
1473     MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
1474     MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
1475     MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
1476     MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
1477     MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
1478     MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false),
1479     MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false),
1480     MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false),
1481     MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false),
1482     MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false),
1483     MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false),
1484     MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false),
1485     MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
1486     MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true),
1487     MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false),
1488     MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false),
1489     MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false),
1490     MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false),
1491     MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false),
1492     MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false),
1493     MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false),
1494     MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false),
1495     MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false),
1496     MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false),
1497     MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false),
1498     MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false),
1499     MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
1500 
1501     MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false),
1502     MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
1503     MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
1504     MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false),
1505     MULTI_EXT_CFG_BOOL("svade", ext_svade, false),
1506     MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
1507     MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
1508     MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false),
1509     MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false),
1510 
1511     MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true),
1512     MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true),
1513 
1514     MULTI_EXT_CFG_BOOL("zba", ext_zba, true),
1515     MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true),
1516     MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true),
1517     MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false),
1518     MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false),
1519     MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false),
1520     MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true),
1521     MULTI_EXT_CFG_BOOL("zk", ext_zk, false),
1522     MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false),
1523     MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false),
1524     MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false),
1525     MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false),
1526     MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false),
1527     MULTI_EXT_CFG_BOOL("zks", ext_zks, false),
1528     MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false),
1529     MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false),
1530     MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false),
1531     MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false),
1532 
1533     MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false),
1534     MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false),
1535     MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false),
1536     MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false),
1537 
1538     MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true),
1539     MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true),
1540     MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true),
1541 
1542     MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false),
1543 
1544     MULTI_EXT_CFG_BOOL("zca", ext_zca, false),
1545     MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false),
1546     MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false),
1547     MULTI_EXT_CFG_BOOL("zce", ext_zce, false),
1548     MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false),
1549     MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false),
1550     MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false),
1551     MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false),
1552 
1553     /* Vector cryptography extensions */
1554     MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false),
1555     MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false),
1556     MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false),
1557     MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false),
1558     MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false),
1559     MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false),
1560     MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false),
1561     MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false),
1562     MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false),
1563     MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false),
1564     MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false),
1565     MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false),
1566     MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false),
1567     MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false),
1568     MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false),
1569     MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false),
1570 
1571     DEFINE_PROP_END_OF_LIST(),
1572 };
1573 
1574 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = {
1575     MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false),
1576     MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false),
1577     MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false),
1578     MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false),
1579     MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false),
1580     MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false),
1581     MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false),
1582     MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false),
1583     MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false),
1584     MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false),
1585     MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false),
1586     MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false),
1587 
1588     DEFINE_PROP_END_OF_LIST(),
1589 };
1590 
1591 /* These are experimental so mark with 'x-' */
1592 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
1593     DEFINE_PROP_END_OF_LIST(),
1594 };
1595 
1596 /*
1597  * 'Named features' is the name we give to extensions that we
1598  * don't want to expose to users. They are either immutable
1599  * (always enabled/disable) or they'll vary depending on
1600  * the resulting CPU state. They have riscv,isa strings
1601  * and priv_ver like regular extensions.
1602  */
1603 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
1604     MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true),
1605 
1606     DEFINE_PROP_END_OF_LIST(),
1607 };
1608 
1609 /* Deprecated entries marked for future removal */
1610 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = {
1611     MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true),
1612     MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true),
1613     MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true),
1614     MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true),
1615     MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true),
1616     MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true),
1617     MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false),
1618     MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false),
1619     MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false),
1620     MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false),
1621     MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false),
1622 
1623     DEFINE_PROP_END_OF_LIST(),
1624 };
1625 
1626 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname,
1627                              Error **errp)
1628 {
1629     g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1630     error_setg(errp, "CPU '%s' does not allow changing the value of '%s'",
1631                cpuname, propname);
1632 }
1633 
1634 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name,
1635                              void *opaque, Error **errp)
1636 {
1637     RISCVCPU *cpu = RISCV_CPU(obj);
1638     uint8_t pmu_num, curr_pmu_num;
1639     uint32_t pmu_mask;
1640 
1641     visit_type_uint8(v, name, &pmu_num, errp);
1642 
1643     curr_pmu_num = ctpop32(cpu->cfg.pmu_mask);
1644 
1645     if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) {
1646         cpu_set_prop_err(cpu, name, errp);
1647         error_append_hint(errp, "Current '%s' val: %u\n",
1648                           name, curr_pmu_num);
1649         return;
1650     }
1651 
1652     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1653         error_setg(errp, "Number of counters exceeds maximum available");
1654         return;
1655     }
1656 
1657     if (pmu_num == 0) {
1658         pmu_mask = 0;
1659     } else {
1660         pmu_mask = MAKE_64BIT_MASK(3, pmu_num);
1661     }
1662 
1663     warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\"");
1664     cpu->cfg.pmu_mask = pmu_mask;
1665     cpu_option_add_user_setting("pmu-mask", pmu_mask);
1666 }
1667 
1668 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name,
1669                              void *opaque, Error **errp)
1670 {
1671     RISCVCPU *cpu = RISCV_CPU(obj);
1672     uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask);
1673 
1674     visit_type_uint8(v, name, &pmu_num, errp);
1675 }
1676 
1677 static const PropertyInfo prop_pmu_num = {
1678     .name = "pmu-num",
1679     .get = prop_pmu_num_get,
1680     .set = prop_pmu_num_set,
1681 };
1682 
1683 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name,
1684                              void *opaque, Error **errp)
1685 {
1686     RISCVCPU *cpu = RISCV_CPU(obj);
1687     uint32_t value;
1688     uint8_t pmu_num;
1689 
1690     visit_type_uint32(v, name, &value, errp);
1691 
1692     if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) {
1693         cpu_set_prop_err(cpu, name, errp);
1694         error_append_hint(errp, "Current '%s' val: %x\n",
1695                           name, cpu->cfg.pmu_mask);
1696         return;
1697     }
1698 
1699     pmu_num = ctpop32(value);
1700 
1701     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1702         error_setg(errp, "Number of counters exceeds maximum available");
1703         return;
1704     }
1705 
1706     cpu_option_add_user_setting(name, value);
1707     cpu->cfg.pmu_mask = value;
1708 }
1709 
1710 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name,
1711                              void *opaque, Error **errp)
1712 {
1713     uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask;
1714 
1715     visit_type_uint8(v, name, &pmu_mask, errp);
1716 }
1717 
1718 static const PropertyInfo prop_pmu_mask = {
1719     .name = "pmu-mask",
1720     .get = prop_pmu_mask_get,
1721     .set = prop_pmu_mask_set,
1722 };
1723 
1724 static void prop_mmu_set(Object *obj, Visitor *v, const char *name,
1725                          void *opaque, Error **errp)
1726 {
1727     RISCVCPU *cpu = RISCV_CPU(obj);
1728     bool value;
1729 
1730     visit_type_bool(v, name, &value, errp);
1731 
1732     if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) {
1733         cpu_set_prop_err(cpu, "mmu", errp);
1734         return;
1735     }
1736 
1737     cpu_option_add_user_setting(name, value);
1738     cpu->cfg.mmu = value;
1739 }
1740 
1741 static void prop_mmu_get(Object *obj, Visitor *v, const char *name,
1742                          void *opaque, Error **errp)
1743 {
1744     bool value = RISCV_CPU(obj)->cfg.mmu;
1745 
1746     visit_type_bool(v, name, &value, errp);
1747 }
1748 
1749 static const PropertyInfo prop_mmu = {
1750     .name = "mmu",
1751     .get = prop_mmu_get,
1752     .set = prop_mmu_set,
1753 };
1754 
1755 static void prop_pmp_set(Object *obj, Visitor *v, const char *name,
1756                          void *opaque, Error **errp)
1757 {
1758     RISCVCPU *cpu = RISCV_CPU(obj);
1759     bool value;
1760 
1761     visit_type_bool(v, name, &value, errp);
1762 
1763     if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) {
1764         cpu_set_prop_err(cpu, name, errp);
1765         return;
1766     }
1767 
1768     cpu_option_add_user_setting(name, value);
1769     cpu->cfg.pmp = value;
1770 }
1771 
1772 static void prop_pmp_get(Object *obj, Visitor *v, const char *name,
1773                          void *opaque, Error **errp)
1774 {
1775     bool value = RISCV_CPU(obj)->cfg.pmp;
1776 
1777     visit_type_bool(v, name, &value, errp);
1778 }
1779 
1780 static const PropertyInfo prop_pmp = {
1781     .name = "pmp",
1782     .get = prop_pmp_get,
1783     .set = prop_pmp_set,
1784 };
1785 
1786 static int priv_spec_from_str(const char *priv_spec_str)
1787 {
1788     int priv_version = -1;
1789 
1790     if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) {
1791         priv_version = PRIV_VERSION_1_13_0;
1792     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) {
1793         priv_version = PRIV_VERSION_1_12_0;
1794     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) {
1795         priv_version = PRIV_VERSION_1_11_0;
1796     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) {
1797         priv_version = PRIV_VERSION_1_10_0;
1798     }
1799 
1800     return priv_version;
1801 }
1802 
1803 const char *priv_spec_to_str(int priv_version)
1804 {
1805     switch (priv_version) {
1806     case PRIV_VERSION_1_10_0:
1807         return PRIV_VER_1_10_0_STR;
1808     case PRIV_VERSION_1_11_0:
1809         return PRIV_VER_1_11_0_STR;
1810     case PRIV_VERSION_1_12_0:
1811         return PRIV_VER_1_12_0_STR;
1812     case PRIV_VERSION_1_13_0:
1813         return PRIV_VER_1_13_0_STR;
1814     default:
1815         return NULL;
1816     }
1817 }
1818 
1819 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name,
1820                                void *opaque, Error **errp)
1821 {
1822     RISCVCPU *cpu = RISCV_CPU(obj);
1823     g_autofree char *value = NULL;
1824     int priv_version = -1;
1825 
1826     visit_type_str(v, name, &value, errp);
1827 
1828     priv_version = priv_spec_from_str(value);
1829     if (priv_version < 0) {
1830         error_setg(errp, "Unsupported privilege spec version '%s'", value);
1831         return;
1832     }
1833 
1834     if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) {
1835         cpu_set_prop_err(cpu, name, errp);
1836         error_append_hint(errp, "Current '%s' val: %s\n", name,
1837                           object_property_get_str(obj, name, NULL));
1838         return;
1839     }
1840 
1841     cpu_option_add_user_setting(name, priv_version);
1842     cpu->env.priv_ver = priv_version;
1843 }
1844 
1845 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name,
1846                                void *opaque, Error **errp)
1847 {
1848     RISCVCPU *cpu = RISCV_CPU(obj);
1849     const char *value = priv_spec_to_str(cpu->env.priv_ver);
1850 
1851     visit_type_str(v, name, (char **)&value, errp);
1852 }
1853 
1854 static const PropertyInfo prop_priv_spec = {
1855     .name = "priv_spec",
1856     .get = prop_priv_spec_get,
1857     .set = prop_priv_spec_set,
1858 };
1859 
1860 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name,
1861                                void *opaque, Error **errp)
1862 {
1863     RISCVCPU *cpu = RISCV_CPU(obj);
1864     g_autofree char *value = NULL;
1865 
1866     visit_type_str(v, name, &value, errp);
1867 
1868     if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) {
1869         error_setg(errp, "Unsupported vector spec version '%s'", value);
1870         return;
1871     }
1872 
1873     cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0);
1874     cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1875 }
1876 
1877 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name,
1878                                void *opaque, Error **errp)
1879 {
1880     const char *value = VEXT_VER_1_00_0_STR;
1881 
1882     visit_type_str(v, name, (char **)&value, errp);
1883 }
1884 
1885 static const PropertyInfo prop_vext_spec = {
1886     .name = "vext_spec",
1887     .get = prop_vext_spec_get,
1888     .set = prop_vext_spec_set,
1889 };
1890 
1891 static void prop_vlen_set(Object *obj, Visitor *v, const char *name,
1892                          void *opaque, Error **errp)
1893 {
1894     RISCVCPU *cpu = RISCV_CPU(obj);
1895     uint16_t value;
1896 
1897     if (!visit_type_uint16(v, name, &value, errp)) {
1898         return;
1899     }
1900 
1901     if (!is_power_of_2(value)) {
1902         error_setg(errp, "Vector extension VLEN must be power of 2");
1903         return;
1904     }
1905 
1906     if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) {
1907         cpu_set_prop_err(cpu, name, errp);
1908         error_append_hint(errp, "Current '%s' val: %u\n",
1909                           name, cpu->cfg.vlenb << 3);
1910         return;
1911     }
1912 
1913     cpu_option_add_user_setting(name, value);
1914     cpu->cfg.vlenb = value >> 3;
1915 }
1916 
1917 static void prop_vlen_get(Object *obj, Visitor *v, const char *name,
1918                          void *opaque, Error **errp)
1919 {
1920     uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3;
1921 
1922     visit_type_uint16(v, name, &value, errp);
1923 }
1924 
1925 static const PropertyInfo prop_vlen = {
1926     .name = "vlen",
1927     .get = prop_vlen_get,
1928     .set = prop_vlen_set,
1929 };
1930 
1931 static void prop_elen_set(Object *obj, Visitor *v, const char *name,
1932                          void *opaque, Error **errp)
1933 {
1934     RISCVCPU *cpu = RISCV_CPU(obj);
1935     uint16_t value;
1936 
1937     if (!visit_type_uint16(v, name, &value, errp)) {
1938         return;
1939     }
1940 
1941     if (!is_power_of_2(value)) {
1942         error_setg(errp, "Vector extension ELEN must be power of 2");
1943         return;
1944     }
1945 
1946     if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) {
1947         cpu_set_prop_err(cpu, name, errp);
1948         error_append_hint(errp, "Current '%s' val: %u\n",
1949                           name, cpu->cfg.elen);
1950         return;
1951     }
1952 
1953     cpu_option_add_user_setting(name, value);
1954     cpu->cfg.elen = value;
1955 }
1956 
1957 static void prop_elen_get(Object *obj, Visitor *v, const char *name,
1958                          void *opaque, Error **errp)
1959 {
1960     uint16_t value = RISCV_CPU(obj)->cfg.elen;
1961 
1962     visit_type_uint16(v, name, &value, errp);
1963 }
1964 
1965 static const PropertyInfo prop_elen = {
1966     .name = "elen",
1967     .get = prop_elen_get,
1968     .set = prop_elen_set,
1969 };
1970 
1971 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name,
1972                                   void *opaque, Error **errp)
1973 {
1974     RISCVCPU *cpu = RISCV_CPU(obj);
1975     uint16_t value;
1976 
1977     if (!visit_type_uint16(v, name, &value, errp)) {
1978         return;
1979     }
1980 
1981     if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) {
1982         cpu_set_prop_err(cpu, name, errp);
1983         error_append_hint(errp, "Current '%s' val: %u\n",
1984                           name, cpu->cfg.cbom_blocksize);
1985         return;
1986     }
1987 
1988     cpu_option_add_user_setting(name, value);
1989     cpu->cfg.cbom_blocksize = value;
1990 }
1991 
1992 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name,
1993                          void *opaque, Error **errp)
1994 {
1995     uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize;
1996 
1997     visit_type_uint16(v, name, &value, errp);
1998 }
1999 
2000 static const PropertyInfo prop_cbom_blksize = {
2001     .name = "cbom_blocksize",
2002     .get = prop_cbom_blksize_get,
2003     .set = prop_cbom_blksize_set,
2004 };
2005 
2006 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name,
2007                                   void *opaque, Error **errp)
2008 {
2009     RISCVCPU *cpu = RISCV_CPU(obj);
2010     uint16_t value;
2011 
2012     if (!visit_type_uint16(v, name, &value, errp)) {
2013         return;
2014     }
2015 
2016     if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) {
2017         cpu_set_prop_err(cpu, name, errp);
2018         error_append_hint(errp, "Current '%s' val: %u\n",
2019                           name, cpu->cfg.cbop_blocksize);
2020         return;
2021     }
2022 
2023     cpu_option_add_user_setting(name, value);
2024     cpu->cfg.cbop_blocksize = value;
2025 }
2026 
2027 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name,
2028                          void *opaque, Error **errp)
2029 {
2030     uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize;
2031 
2032     visit_type_uint16(v, name, &value, errp);
2033 }
2034 
2035 static const PropertyInfo prop_cbop_blksize = {
2036     .name = "cbop_blocksize",
2037     .get = prop_cbop_blksize_get,
2038     .set = prop_cbop_blksize_set,
2039 };
2040 
2041 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name,
2042                                   void *opaque, Error **errp)
2043 {
2044     RISCVCPU *cpu = RISCV_CPU(obj);
2045     uint16_t value;
2046 
2047     if (!visit_type_uint16(v, name, &value, errp)) {
2048         return;
2049     }
2050 
2051     if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) {
2052         cpu_set_prop_err(cpu, name, errp);
2053         error_append_hint(errp, "Current '%s' val: %u\n",
2054                           name, cpu->cfg.cboz_blocksize);
2055         return;
2056     }
2057 
2058     cpu_option_add_user_setting(name, value);
2059     cpu->cfg.cboz_blocksize = value;
2060 }
2061 
2062 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name,
2063                          void *opaque, Error **errp)
2064 {
2065     uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize;
2066 
2067     visit_type_uint16(v, name, &value, errp);
2068 }
2069 
2070 static const PropertyInfo prop_cboz_blksize = {
2071     .name = "cboz_blocksize",
2072     .get = prop_cboz_blksize_get,
2073     .set = prop_cboz_blksize_set,
2074 };
2075 
2076 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name,
2077                                void *opaque, Error **errp)
2078 {
2079     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2080     RISCVCPU *cpu = RISCV_CPU(obj);
2081     uint32_t prev_val = cpu->cfg.mvendorid;
2082     uint32_t value;
2083 
2084     if (!visit_type_uint32(v, name, &value, errp)) {
2085         return;
2086     }
2087 
2088     if (!dynamic_cpu && prev_val != value) {
2089         error_setg(errp, "Unable to change %s mvendorid (0x%x)",
2090                    object_get_typename(obj), prev_val);
2091         return;
2092     }
2093 
2094     cpu->cfg.mvendorid = value;
2095 }
2096 
2097 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name,
2098                                void *opaque, Error **errp)
2099 {
2100     uint32_t value = RISCV_CPU(obj)->cfg.mvendorid;
2101 
2102     visit_type_uint32(v, name, &value, errp);
2103 }
2104 
2105 static const PropertyInfo prop_mvendorid = {
2106     .name = "mvendorid",
2107     .get = prop_mvendorid_get,
2108     .set = prop_mvendorid_set,
2109 };
2110 
2111 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name,
2112                             void *opaque, Error **errp)
2113 {
2114     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2115     RISCVCPU *cpu = RISCV_CPU(obj);
2116     uint64_t prev_val = cpu->cfg.mimpid;
2117     uint64_t value;
2118 
2119     if (!visit_type_uint64(v, name, &value, errp)) {
2120         return;
2121     }
2122 
2123     if (!dynamic_cpu && prev_val != value) {
2124         error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")",
2125                    object_get_typename(obj), prev_val);
2126         return;
2127     }
2128 
2129     cpu->cfg.mimpid = value;
2130 }
2131 
2132 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name,
2133                             void *opaque, Error **errp)
2134 {
2135     uint64_t value = RISCV_CPU(obj)->cfg.mimpid;
2136 
2137     visit_type_uint64(v, name, &value, errp);
2138 }
2139 
2140 static const PropertyInfo prop_mimpid = {
2141     .name = "mimpid",
2142     .get = prop_mimpid_get,
2143     .set = prop_mimpid_set,
2144 };
2145 
2146 static void prop_marchid_set(Object *obj, Visitor *v, const char *name,
2147                              void *opaque, Error **errp)
2148 {
2149     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2150     RISCVCPU *cpu = RISCV_CPU(obj);
2151     uint64_t prev_val = cpu->cfg.marchid;
2152     uint64_t value, invalid_val;
2153     uint32_t mxlen = 0;
2154 
2155     if (!visit_type_uint64(v, name, &value, errp)) {
2156         return;
2157     }
2158 
2159     if (!dynamic_cpu && prev_val != value) {
2160         error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")",
2161                    object_get_typename(obj), prev_val);
2162         return;
2163     }
2164 
2165     switch (riscv_cpu_mxl(&cpu->env)) {
2166     case MXL_RV32:
2167         mxlen = 32;
2168         break;
2169     case MXL_RV64:
2170     case MXL_RV128:
2171         mxlen = 64;
2172         break;
2173     default:
2174         g_assert_not_reached();
2175     }
2176 
2177     invalid_val = 1LL << (mxlen - 1);
2178 
2179     if (value == invalid_val) {
2180         error_setg(errp, "Unable to set marchid with MSB (%u) bit set "
2181                          "and the remaining bits zero", mxlen);
2182         return;
2183     }
2184 
2185     cpu->cfg.marchid = value;
2186 }
2187 
2188 static void prop_marchid_get(Object *obj, Visitor *v, const char *name,
2189                              void *opaque, Error **errp)
2190 {
2191     uint64_t value = RISCV_CPU(obj)->cfg.marchid;
2192 
2193     visit_type_uint64(v, name, &value, errp);
2194 }
2195 
2196 static const PropertyInfo prop_marchid = {
2197     .name = "marchid",
2198     .get = prop_marchid_get,
2199     .set = prop_marchid_set,
2200 };
2201 
2202 /*
2203  * RVA22U64 defines some 'named features' that are cache
2204  * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa
2205  * and Zicclsm. They are always implemented in TCG and
2206  * doesn't need to be manually enabled by the profile.
2207  */
2208 static RISCVCPUProfile RVA22U64 = {
2209     .parent = NULL,
2210     .name = "rva22u64",
2211     .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU,
2212     .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
2213     .satp_mode = RISCV_PROFILE_ATTR_UNUSED,
2214     .ext_offsets = {
2215         CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause),
2216         CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb),
2217         CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin),
2218         CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr),
2219         CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom),
2220         CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz),
2221 
2222         /* mandatory named features for this profile */
2223         CPU_CFG_OFFSET(ext_zic64b),
2224 
2225         RISCV_PROFILE_EXT_LIST_END
2226     }
2227 };
2228 
2229 /*
2230  * As with RVA22U64, RVA22S64 also defines 'named features'.
2231  *
2232  * Cache related features that we consider enabled since we don't
2233  * implement cache: Ssccptr
2234  *
2235  * Other named features that we already implement: Sstvecd, Sstvala,
2236  * Sscounterenw
2237  *
2238  * The remaining features/extensions comes from RVA22U64.
2239  */
2240 static RISCVCPUProfile RVA22S64 = {
2241     .parent = &RVA22U64,
2242     .name = "rva22s64",
2243     .misa_ext = RVS,
2244     .priv_spec = PRIV_VERSION_1_12_0,
2245     .satp_mode = VM_1_10_SV39,
2246     .ext_offsets = {
2247         /* rva22s64 exts */
2248         CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt),
2249         CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade),
2250 
2251         RISCV_PROFILE_EXT_LIST_END
2252     }
2253 };
2254 
2255 RISCVCPUProfile *riscv_profiles[] = {
2256     &RVA22U64,
2257     &RVA22S64,
2258     NULL,
2259 };
2260 
2261 static RISCVCPUImpliedExtsRule RVA_IMPLIED = {
2262     .is_misa = true,
2263     .ext = RVA,
2264     .implied_multi_exts = {
2265         CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo),
2266 
2267         RISCV_IMPLIED_EXTS_RULE_END
2268     },
2269 };
2270 
2271 static RISCVCPUImpliedExtsRule RVD_IMPLIED = {
2272     .is_misa = true,
2273     .ext = RVD,
2274     .implied_misa_exts = RVF,
2275     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2276 };
2277 
2278 static RISCVCPUImpliedExtsRule RVF_IMPLIED = {
2279     .is_misa = true,
2280     .ext = RVF,
2281     .implied_multi_exts = {
2282         CPU_CFG_OFFSET(ext_zicsr),
2283 
2284         RISCV_IMPLIED_EXTS_RULE_END
2285     },
2286 };
2287 
2288 static RISCVCPUImpliedExtsRule RVM_IMPLIED = {
2289     .is_misa = true,
2290     .ext = RVM,
2291     .implied_multi_exts = {
2292         CPU_CFG_OFFSET(ext_zmmul),
2293 
2294         RISCV_IMPLIED_EXTS_RULE_END
2295     },
2296 };
2297 
2298 static RISCVCPUImpliedExtsRule RVV_IMPLIED = {
2299     .is_misa = true,
2300     .ext = RVV,
2301     .implied_multi_exts = {
2302         CPU_CFG_OFFSET(ext_zve64d),
2303 
2304         RISCV_IMPLIED_EXTS_RULE_END
2305     },
2306 };
2307 
2308 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = {
2309     .ext = CPU_CFG_OFFSET(ext_zcb),
2310     .implied_multi_exts = {
2311         CPU_CFG_OFFSET(ext_zca),
2312 
2313         RISCV_IMPLIED_EXTS_RULE_END
2314     },
2315 };
2316 
2317 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = {
2318     .ext = CPU_CFG_OFFSET(ext_zcd),
2319     .implied_misa_exts = RVD,
2320     .implied_multi_exts = {
2321         CPU_CFG_OFFSET(ext_zca),
2322 
2323         RISCV_IMPLIED_EXTS_RULE_END
2324     },
2325 };
2326 
2327 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = {
2328     .ext = CPU_CFG_OFFSET(ext_zce),
2329     .implied_multi_exts = {
2330         CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp),
2331         CPU_CFG_OFFSET(ext_zcmt),
2332 
2333         RISCV_IMPLIED_EXTS_RULE_END
2334     },
2335 };
2336 
2337 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = {
2338     .ext = CPU_CFG_OFFSET(ext_zcf),
2339     .implied_misa_exts = RVF,
2340     .implied_multi_exts = {
2341         CPU_CFG_OFFSET(ext_zca),
2342 
2343         RISCV_IMPLIED_EXTS_RULE_END
2344     },
2345 };
2346 
2347 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = {
2348     .ext = CPU_CFG_OFFSET(ext_zcmp),
2349     .implied_multi_exts = {
2350         CPU_CFG_OFFSET(ext_zca),
2351 
2352         RISCV_IMPLIED_EXTS_RULE_END
2353     },
2354 };
2355 
2356 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = {
2357     .ext = CPU_CFG_OFFSET(ext_zcmt),
2358     .implied_multi_exts = {
2359         CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr),
2360 
2361         RISCV_IMPLIED_EXTS_RULE_END
2362     },
2363 };
2364 
2365 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = {
2366     .ext = CPU_CFG_OFFSET(ext_zdinx),
2367     .implied_multi_exts = {
2368         CPU_CFG_OFFSET(ext_zfinx),
2369 
2370         RISCV_IMPLIED_EXTS_RULE_END
2371     },
2372 };
2373 
2374 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = {
2375     .ext = CPU_CFG_OFFSET(ext_zfa),
2376     .implied_misa_exts = RVF,
2377     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2378 };
2379 
2380 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = {
2381     .ext = CPU_CFG_OFFSET(ext_zfbfmin),
2382     .implied_misa_exts = RVF,
2383     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2384 };
2385 
2386 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = {
2387     .ext = CPU_CFG_OFFSET(ext_zfh),
2388     .implied_multi_exts = {
2389         CPU_CFG_OFFSET(ext_zfhmin),
2390 
2391         RISCV_IMPLIED_EXTS_RULE_END
2392     },
2393 };
2394 
2395 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = {
2396     .ext = CPU_CFG_OFFSET(ext_zfhmin),
2397     .implied_misa_exts = RVF,
2398     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2399 };
2400 
2401 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = {
2402     .ext = CPU_CFG_OFFSET(ext_zfinx),
2403     .implied_multi_exts = {
2404         CPU_CFG_OFFSET(ext_zicsr),
2405 
2406         RISCV_IMPLIED_EXTS_RULE_END
2407     },
2408 };
2409 
2410 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = {
2411     .ext = CPU_CFG_OFFSET(ext_zhinx),
2412     .implied_multi_exts = {
2413         CPU_CFG_OFFSET(ext_zhinxmin),
2414 
2415         RISCV_IMPLIED_EXTS_RULE_END
2416     },
2417 };
2418 
2419 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = {
2420     .ext = CPU_CFG_OFFSET(ext_zhinxmin),
2421     .implied_multi_exts = {
2422         CPU_CFG_OFFSET(ext_zfinx),
2423 
2424         RISCV_IMPLIED_EXTS_RULE_END
2425     },
2426 };
2427 
2428 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = {
2429     .ext = CPU_CFG_OFFSET(ext_zicntr),
2430     .implied_multi_exts = {
2431         CPU_CFG_OFFSET(ext_zicsr),
2432 
2433         RISCV_IMPLIED_EXTS_RULE_END
2434     },
2435 };
2436 
2437 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = {
2438     .ext = CPU_CFG_OFFSET(ext_zihpm),
2439     .implied_multi_exts = {
2440         CPU_CFG_OFFSET(ext_zicsr),
2441 
2442         RISCV_IMPLIED_EXTS_RULE_END
2443     },
2444 };
2445 
2446 static RISCVCPUImpliedExtsRule ZK_IMPLIED = {
2447     .ext = CPU_CFG_OFFSET(ext_zk),
2448     .implied_multi_exts = {
2449         CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr),
2450         CPU_CFG_OFFSET(ext_zkt),
2451 
2452         RISCV_IMPLIED_EXTS_RULE_END
2453     },
2454 };
2455 
2456 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = {
2457     .ext = CPU_CFG_OFFSET(ext_zkn),
2458     .implied_multi_exts = {
2459         CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
2460         CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne),
2461         CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh),
2462 
2463         RISCV_IMPLIED_EXTS_RULE_END
2464     },
2465 };
2466 
2467 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = {
2468     .ext = CPU_CFG_OFFSET(ext_zks),
2469     .implied_multi_exts = {
2470         CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
2471         CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed),
2472         CPU_CFG_OFFSET(ext_zksh),
2473 
2474         RISCV_IMPLIED_EXTS_RULE_END
2475     },
2476 };
2477 
2478 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = {
2479     .ext = CPU_CFG_OFFSET(ext_zvbb),
2480     .implied_multi_exts = {
2481         CPU_CFG_OFFSET(ext_zvkb),
2482 
2483         RISCV_IMPLIED_EXTS_RULE_END
2484     },
2485 };
2486 
2487 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = {
2488     .ext = CPU_CFG_OFFSET(ext_zve32f),
2489     .implied_misa_exts = RVF,
2490     .implied_multi_exts = {
2491         CPU_CFG_OFFSET(ext_zve32x),
2492 
2493         RISCV_IMPLIED_EXTS_RULE_END
2494     },
2495 };
2496 
2497 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = {
2498     .ext = CPU_CFG_OFFSET(ext_zve32x),
2499     .implied_multi_exts = {
2500         CPU_CFG_OFFSET(ext_zicsr),
2501 
2502         RISCV_IMPLIED_EXTS_RULE_END
2503     },
2504 };
2505 
2506 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = {
2507     .ext = CPU_CFG_OFFSET(ext_zve64d),
2508     .implied_misa_exts = RVD,
2509     .implied_multi_exts = {
2510         CPU_CFG_OFFSET(ext_zve64f),
2511 
2512         RISCV_IMPLIED_EXTS_RULE_END
2513     },
2514 };
2515 
2516 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = {
2517     .ext = CPU_CFG_OFFSET(ext_zve64f),
2518     .implied_misa_exts = RVF,
2519     .implied_multi_exts = {
2520         CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x),
2521 
2522         RISCV_IMPLIED_EXTS_RULE_END
2523     },
2524 };
2525 
2526 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = {
2527     .ext = CPU_CFG_OFFSET(ext_zve64x),
2528     .implied_multi_exts = {
2529         CPU_CFG_OFFSET(ext_zve32x),
2530 
2531         RISCV_IMPLIED_EXTS_RULE_END
2532     },
2533 };
2534 
2535 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = {
2536     .ext = CPU_CFG_OFFSET(ext_zvfbfmin),
2537     .implied_multi_exts = {
2538         CPU_CFG_OFFSET(ext_zve32f),
2539 
2540         RISCV_IMPLIED_EXTS_RULE_END
2541     },
2542 };
2543 
2544 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = {
2545     .ext = CPU_CFG_OFFSET(ext_zvfbfwma),
2546     .implied_multi_exts = {
2547         CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin),
2548 
2549         RISCV_IMPLIED_EXTS_RULE_END
2550     },
2551 };
2552 
2553 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = {
2554     .ext = CPU_CFG_OFFSET(ext_zvfh),
2555     .implied_multi_exts = {
2556         CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin),
2557 
2558         RISCV_IMPLIED_EXTS_RULE_END
2559     },
2560 };
2561 
2562 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = {
2563     .ext = CPU_CFG_OFFSET(ext_zvfhmin),
2564     .implied_multi_exts = {
2565         CPU_CFG_OFFSET(ext_zve32f),
2566 
2567         RISCV_IMPLIED_EXTS_RULE_END
2568     },
2569 };
2570 
2571 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = {
2572     .ext = CPU_CFG_OFFSET(ext_zvkn),
2573     .implied_multi_exts = {
2574         CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb),
2575         CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
2576 
2577         RISCV_IMPLIED_EXTS_RULE_END
2578     },
2579 };
2580 
2581 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = {
2582     .ext = CPU_CFG_OFFSET(ext_zvknc),
2583     .implied_multi_exts = {
2584         CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc),
2585 
2586         RISCV_IMPLIED_EXTS_RULE_END
2587     },
2588 };
2589 
2590 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = {
2591     .ext = CPU_CFG_OFFSET(ext_zvkng),
2592     .implied_multi_exts = {
2593         CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg),
2594 
2595         RISCV_IMPLIED_EXTS_RULE_END
2596     },
2597 };
2598 
2599 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = {
2600     .ext = CPU_CFG_OFFSET(ext_zvknhb),
2601     .implied_multi_exts = {
2602         CPU_CFG_OFFSET(ext_zve64x),
2603 
2604         RISCV_IMPLIED_EXTS_RULE_END
2605     },
2606 };
2607 
2608 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = {
2609     .ext = CPU_CFG_OFFSET(ext_zvks),
2610     .implied_multi_exts = {
2611         CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh),
2612         CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
2613 
2614         RISCV_IMPLIED_EXTS_RULE_END
2615     },
2616 };
2617 
2618 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = {
2619     .ext = CPU_CFG_OFFSET(ext_zvksc),
2620     .implied_multi_exts = {
2621         CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc),
2622 
2623         RISCV_IMPLIED_EXTS_RULE_END
2624     },
2625 };
2626 
2627 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = {
2628     .ext = CPU_CFG_OFFSET(ext_zvksg),
2629     .implied_multi_exts = {
2630         CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg),
2631 
2632         RISCV_IMPLIED_EXTS_RULE_END
2633     },
2634 };
2635 
2636 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = {
2637     &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED,
2638     &RVM_IMPLIED, &RVV_IMPLIED, NULL
2639 };
2640 
2641 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = {
2642     &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED,
2643     &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED,
2644     &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED,
2645     &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED,
2646     &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED,
2647     &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED,
2648     &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED,
2649     &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED,
2650     &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED,
2651     &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED,
2652     &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED,
2653     &ZVKS_IMPLIED,  &ZVKSC_IMPLIED, &ZVKSG_IMPLIED,
2654     NULL
2655 };
2656 
2657 static Property riscv_cpu_properties[] = {
2658     DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
2659 
2660     {.name = "pmu-mask", .info = &prop_pmu_mask},
2661     {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */
2662 
2663     {.name = "mmu", .info = &prop_mmu},
2664     {.name = "pmp", .info = &prop_pmp},
2665 
2666     {.name = "priv_spec", .info = &prop_priv_spec},
2667     {.name = "vext_spec", .info = &prop_vext_spec},
2668 
2669     {.name = "vlen", .info = &prop_vlen},
2670     {.name = "elen", .info = &prop_elen},
2671 
2672     {.name = "cbom_blocksize", .info = &prop_cbom_blksize},
2673     {.name = "cbop_blocksize", .info = &prop_cbop_blksize},
2674     {.name = "cboz_blocksize", .info = &prop_cboz_blksize},
2675 
2676      {.name = "mvendorid", .info = &prop_mvendorid},
2677      {.name = "mimpid", .info = &prop_mimpid},
2678      {.name = "marchid", .info = &prop_marchid},
2679 
2680 #ifndef CONFIG_USER_ONLY
2681     DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
2682 #endif
2683 
2684     DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
2685 
2686     DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
2687     DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
2688 
2689     /*
2690      * write_misa() is marked as experimental for now so mark
2691      * it with -x and default to 'false'.
2692      */
2693     DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
2694     DEFINE_PROP_END_OF_LIST(),
2695 };
2696 
2697 #if defined(TARGET_RISCV64)
2698 static void rva22u64_profile_cpu_init(Object *obj)
2699 {
2700     rv64i_bare_cpu_init(obj);
2701 
2702     RVA22U64.enabled = true;
2703 }
2704 
2705 static void rva22s64_profile_cpu_init(Object *obj)
2706 {
2707     rv64i_bare_cpu_init(obj);
2708 
2709     RVA22S64.enabled = true;
2710 }
2711 #endif
2712 
2713 static const gchar *riscv_gdb_arch_name(CPUState *cs)
2714 {
2715     RISCVCPU *cpu = RISCV_CPU(cs);
2716     CPURISCVState *env = &cpu->env;
2717 
2718     switch (riscv_cpu_mxl(env)) {
2719     case MXL_RV32:
2720         return "riscv:rv32";
2721     case MXL_RV64:
2722     case MXL_RV128:
2723         return "riscv:rv64";
2724     default:
2725         g_assert_not_reached();
2726     }
2727 }
2728 
2729 #ifndef CONFIG_USER_ONLY
2730 static int64_t riscv_get_arch_id(CPUState *cs)
2731 {
2732     RISCVCPU *cpu = RISCV_CPU(cs);
2733 
2734     return cpu->env.mhartid;
2735 }
2736 
2737 #include "hw/core/sysemu-cpu-ops.h"
2738 
2739 static const struct SysemuCPUOps riscv_sysemu_ops = {
2740     .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
2741     .write_elf64_note = riscv_cpu_write_elf64_note,
2742     .write_elf32_note = riscv_cpu_write_elf32_note,
2743     .legacy_vmsd = &vmstate_riscv_cpu,
2744 };
2745 #endif
2746 
2747 static void riscv_cpu_common_class_init(ObjectClass *c, void *data)
2748 {
2749     RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2750     CPUClass *cc = CPU_CLASS(c);
2751     DeviceClass *dc = DEVICE_CLASS(c);
2752     ResettableClass *rc = RESETTABLE_CLASS(c);
2753 
2754     device_class_set_parent_realize(dc, riscv_cpu_realize,
2755                                     &mcc->parent_realize);
2756 
2757     resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL,
2758                                        &mcc->parent_phases);
2759 
2760     cc->class_by_name = riscv_cpu_class_by_name;
2761     cc->has_work = riscv_cpu_has_work;
2762     cc->mmu_index = riscv_cpu_mmu_index;
2763     cc->dump_state = riscv_cpu_dump_state;
2764     cc->set_pc = riscv_cpu_set_pc;
2765     cc->get_pc = riscv_cpu_get_pc;
2766     cc->gdb_read_register = riscv_cpu_gdb_read_register;
2767     cc->gdb_write_register = riscv_cpu_gdb_write_register;
2768     cc->gdb_stop_before_watchpoint = true;
2769     cc->disas_set_info = riscv_cpu_disas_set_info;
2770 #ifndef CONFIG_USER_ONLY
2771     cc->sysemu_ops = &riscv_sysemu_ops;
2772     cc->get_arch_id = riscv_get_arch_id;
2773 #endif
2774     cc->gdb_arch_name = riscv_gdb_arch_name;
2775 
2776     device_class_set_props(dc, riscv_cpu_properties);
2777 }
2778 
2779 static void riscv_cpu_class_init(ObjectClass *c, void *data)
2780 {
2781     RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2782 
2783     mcc->misa_mxl_max = (uint32_t)(uintptr_t)data;
2784     riscv_cpu_validate_misa_mxl(mcc);
2785 }
2786 
2787 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
2788                                  int max_str_len)
2789 {
2790     const RISCVIsaExtData *edata;
2791     char *old = *isa_str;
2792     char *new = *isa_str;
2793 
2794     for (edata = isa_edata_arr; edata && edata->name; edata++) {
2795         if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2796             new = g_strconcat(old, "_", edata->name, NULL);
2797             g_free(old);
2798             old = new;
2799         }
2800     }
2801 
2802     *isa_str = new;
2803 }
2804 
2805 char *riscv_isa_string(RISCVCPU *cpu)
2806 {
2807     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
2808     int i;
2809     const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
2810     char *isa_str = g_new(char, maxlen);
2811     int xlen = riscv_cpu_max_xlen(mcc);
2812     char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen);
2813 
2814     for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2815         if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2816             *p++ = qemu_tolower(riscv_single_letter_exts[i]);
2817         }
2818     }
2819     *p = '\0';
2820     if (!cpu->cfg.short_isa_string) {
2821         riscv_isa_string_ext(cpu, &isa_str, maxlen);
2822     }
2823     return isa_str;
2824 }
2825 
2826 #ifndef CONFIG_USER_ONLY
2827 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count)
2828 {
2829     int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr);
2830     char **extensions = g_new(char *, maxlen);
2831 
2832     for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2833         if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2834             extensions[*count] = g_new(char, 2);
2835             snprintf(extensions[*count], 2, "%c",
2836                      qemu_tolower(riscv_single_letter_exts[i]));
2837             (*count)++;
2838         }
2839     }
2840 
2841     for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) {
2842         if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2843             extensions[*count] = g_strdup(edata->name);
2844             (*count)++;
2845         }
2846     }
2847 
2848     return extensions;
2849 }
2850 
2851 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename)
2852 {
2853     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
2854     const size_t maxlen = sizeof("rv128i");
2855     g_autofree char *isa_base = g_new(char, maxlen);
2856     g_autofree char *riscv_isa;
2857     char **isa_extensions;
2858     int count = 0;
2859     int xlen = riscv_cpu_max_xlen(mcc);
2860 
2861     riscv_isa = riscv_isa_string(cpu);
2862     qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa);
2863 
2864     snprintf(isa_base, maxlen, "rv%di", xlen);
2865     qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base);
2866 
2867     isa_extensions = riscv_isa_extensions_list(cpu, &count);
2868     qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions",
2869                                   isa_extensions, count);
2870 
2871     for (int i = 0; i < count; i++) {
2872         g_free(isa_extensions[i]);
2873     }
2874 
2875     g_free(isa_extensions);
2876 }
2877 #endif
2878 
2879 #define DEFINE_CPU(type_name, misa_mxl_max, initfn)         \
2880     {                                                       \
2881         .name = (type_name),                                \
2882         .parent = TYPE_RISCV_CPU,                           \
2883         .instance_init = (initfn),                          \
2884         .class_init = riscv_cpu_class_init,                 \
2885         .class_data = (void *)(misa_mxl_max)                \
2886     }
2887 
2888 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \
2889     {                                                       \
2890         .name = (type_name),                                \
2891         .parent = TYPE_RISCV_DYNAMIC_CPU,                   \
2892         .instance_init = (initfn),                          \
2893         .class_init = riscv_cpu_class_init,                 \
2894         .class_data = (void *)(misa_mxl_max)                \
2895     }
2896 
2897 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn)  \
2898     {                                                       \
2899         .name = (type_name),                                \
2900         .parent = TYPE_RISCV_VENDOR_CPU,                    \
2901         .instance_init = (initfn),                          \
2902         .class_init = riscv_cpu_class_init,                 \
2903         .class_data = (void *)(misa_mxl_max)                \
2904     }
2905 
2906 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn)    \
2907     {                                                       \
2908         .name = (type_name),                                \
2909         .parent = TYPE_RISCV_BARE_CPU,                      \
2910         .instance_init = (initfn),                          \
2911         .class_init = riscv_cpu_class_init,                 \
2912         .class_data = (void *)(misa_mxl_max)                \
2913     }
2914 
2915 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \
2916     {                                                       \
2917         .name = (type_name),                                \
2918         .parent = TYPE_RISCV_BARE_CPU,                      \
2919         .instance_init = (initfn),                          \
2920         .class_init = riscv_cpu_class_init,                 \
2921         .class_data = (void *)(misa_mxl_max)                \
2922     }
2923 
2924 static const TypeInfo riscv_cpu_type_infos[] = {
2925     {
2926         .name = TYPE_RISCV_CPU,
2927         .parent = TYPE_CPU,
2928         .instance_size = sizeof(RISCVCPU),
2929         .instance_align = __alignof(RISCVCPU),
2930         .instance_init = riscv_cpu_init,
2931         .instance_post_init = riscv_cpu_post_init,
2932         .abstract = true,
2933         .class_size = sizeof(RISCVCPUClass),
2934         .class_init = riscv_cpu_common_class_init,
2935     },
2936     {
2937         .name = TYPE_RISCV_DYNAMIC_CPU,
2938         .parent = TYPE_RISCV_CPU,
2939         .abstract = true,
2940     },
2941     {
2942         .name = TYPE_RISCV_VENDOR_CPU,
2943         .parent = TYPE_RISCV_CPU,
2944         .abstract = true,
2945     },
2946     {
2947         .name = TYPE_RISCV_BARE_CPU,
2948         .parent = TYPE_RISCV_CPU,
2949         .instance_init = riscv_bare_cpu_init,
2950         .abstract = true,
2951     },
2952 #if defined(TARGET_RISCV32)
2953     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY,       MXL_RV32,  riscv_any_cpu_init),
2954     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX,       MXL_RV32,  riscv_max_cpu_init),
2955     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32,    MXL_RV32,  rv32_base_cpu_init),
2956     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX,       MXL_RV32,  rv32_ibex_cpu_init),
2957     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32,  rv32_sifive_e_cpu_init),
2958     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32,  rv32_imafcu_nommu_cpu_init),
2959     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32,  rv32_sifive_u_cpu_init),
2960     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I,        MXL_RV32,  rv32i_bare_cpu_init),
2961     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E,        MXL_RV32,  rv32e_bare_cpu_init),
2962 #elif defined(TARGET_RISCV64)
2963     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY,       MXL_RV64,  riscv_any_cpu_init),
2964     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX,       MXL_RV64,  riscv_max_cpu_init),
2965     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64,    MXL_RV64,  rv64_base_cpu_init),
2966     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64,  rv64_sifive_e_cpu_init),
2967     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64,  rv64_sifive_u_cpu_init),
2968     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C,   MXL_RV64,  rv64_sifive_u_cpu_init),
2969     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64,  rv64_thead_c906_cpu_init),
2970     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1,  MXL_RV64,  rv64_veyron_v1_cpu_init),
2971 #ifdef CONFIG_TCG
2972     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128,   MXL_RV128, rv128_base_cpu_init),
2973 #endif /* CONFIG_TCG */
2974     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I,        MXL_RV64,  rv64i_bare_cpu_init),
2975     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E,        MXL_RV64,  rv64e_bare_cpu_init),
2976     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64,  MXL_RV64,  rva22u64_profile_cpu_init),
2977     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64,  MXL_RV64,  rva22s64_profile_cpu_init),
2978 #endif /* TARGET_RISCV64 */
2979 };
2980 
2981 DEFINE_TYPES(riscv_cpu_type_infos)
2982