xref: /openbmc/qemu/target/riscv/cpu.c (revision d98883d1)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "cpu_vendorid.h"
26 #include "internals.h"
27 #include "exec/exec-all.h"
28 #include "qapi/error.h"
29 #include "qapi/visitor.h"
30 #include "qemu/error-report.h"
31 #include "hw/qdev-properties.h"
32 #include "hw/core/qdev-prop-internal.h"
33 #include "migration/vmstate.h"
34 #include "fpu/softfloat-helpers.h"
35 #include "sysemu/device_tree.h"
36 #include "sysemu/kvm.h"
37 #include "sysemu/tcg.h"
38 #include "kvm/kvm_riscv.h"
39 #include "tcg/tcg-cpu.h"
40 #include "tcg/tcg.h"
41 
42 /* RISC-V CPU definitions */
43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH";
44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
45                               RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0};
46 
47 /*
48  * From vector_helper.c
49  * Note that vector data is stored in host-endian 64-bit chunks,
50  * so addressing bytes needs a host-endian fixup.
51  */
52 #if HOST_BIG_ENDIAN
53 #define BYTE(x)   ((x) ^ 7)
54 #else
55 #define BYTE(x)   (x)
56 #endif
57 
58 bool riscv_cpu_is_32bit(RISCVCPU *cpu)
59 {
60     return riscv_cpu_mxl(&cpu->env) == MXL_RV32;
61 }
62 
63 /* Hash that stores general user set numeric options */
64 static GHashTable *general_user_opts;
65 
66 static void cpu_option_add_user_setting(const char *optname, uint32_t value)
67 {
68     g_hash_table_insert(general_user_opts, (gpointer)optname,
69                         GUINT_TO_POINTER(value));
70 }
71 
72 bool riscv_cpu_option_set(const char *optname)
73 {
74     return g_hash_table_contains(general_user_opts, optname);
75 }
76 
77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
78     {#_name, _min_ver, CPU_CFG_OFFSET(_prop)}
79 
80 /*
81  * Here are the ordering rules of extension naming defined by RISC-V
82  * specification :
83  * 1. All extensions should be separated from other multi-letter extensions
84  *    by an underscore.
85  * 2. The first letter following the 'Z' conventionally indicates the most
86  *    closely related alphabetical extension category, IMAFDQLCBKJTPVH.
87  *    If multiple 'Z' extensions are named, they should be ordered first
88  *    by category, then alphabetically within a category.
89  * 3. Standard supervisor-level extensions (starts with 'S') should be
90  *    listed after standard unprivileged extensions.  If multiple
91  *    supervisor-level extensions are listed, they should be ordered
92  *    alphabetically.
93  * 4. Non-standard extensions (starts with 'X') must be listed after all
94  *    standard extensions. They must be separated from other multi-letter
95  *    extensions by an underscore.
96  *
97  * Single letter extensions are checked in riscv_cpu_validate_misa_priv()
98  * instead.
99  */
100 const RISCVIsaExtData isa_edata_arr[] = {
101     ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b),
102     ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom),
103     ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop),
104     ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz),
105     ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11),
106     ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11),
107     ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11),
108     ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11),
109     ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
110     ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr),
111     ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr),
112     ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei),
113     ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl),
114     ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
115     ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm),
116     ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop),
117     ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
118     ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_11),
119     ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo),
120     ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
121     ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc),
122     ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
123     ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
124     ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
125     ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh),
126     ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin),
127     ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx),
128     ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx),
129     ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca),
130     ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb),
131     ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf),
132     ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd),
133     ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce),
134     ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp),
135     ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt),
136     ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba),
137     ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb),
138     ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc),
139     ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb),
140     ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc),
141     ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx),
142     ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs),
143     ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk),
144     ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn),
145     ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd),
146     ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne),
147     ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh),
148     ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr),
149     ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks),
150     ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
151     ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
152     ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
153     ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso),
154     ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb),
155     ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
156     ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
157     ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x),
158     ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
159     ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
160     ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x),
161     ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin),
162     ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
163     ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
164     ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
165     ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb),
166     ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg),
167     ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn),
168     ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc),
169     ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
170     ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng),
171     ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
172     ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
173     ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks),
174     ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc),
175     ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed),
176     ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg),
177     ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
178     ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt),
179     ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
180     ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
181     ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
182     ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
183     ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
184     ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
185     ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11),
186     ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
187     ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
188     ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
189     ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
190     ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
191     ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade),
192     ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
193     ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
194     ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
195     ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt),
196     ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba),
197     ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb),
198     ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs),
199     ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo),
200     ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov),
201     ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx),
202     ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv),
203     ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac),
204     ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx),
205     ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair),
206     ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync),
207     ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
208 
209     DEFINE_PROP_END_OF_LIST(),
210 };
211 
212 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset)
213 {
214     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
215 
216     return *ext_enabled;
217 }
218 
219 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en)
220 {
221     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
222 
223     *ext_enabled = en;
224 }
225 
226 bool riscv_cpu_is_vendor(Object *cpu_obj)
227 {
228     return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL;
229 }
230 
231 const char * const riscv_int_regnames[] = {
232     "x0/zero", "x1/ra",  "x2/sp",  "x3/gp",  "x4/tp",  "x5/t0",   "x6/t1",
233     "x7/t2",   "x8/s0",  "x9/s1",  "x10/a0", "x11/a1", "x12/a2",  "x13/a3",
234     "x14/a4",  "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3",  "x20/s4",
235     "x21/s5",  "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
236     "x28/t3",  "x29/t4", "x30/t5", "x31/t6"
237 };
238 
239 const char * const riscv_int_regnamesh[] = {
240     "x0h/zeroh", "x1h/rah",  "x2h/sph",   "x3h/gph",   "x4h/tph",  "x5h/t0h",
241     "x6h/t1h",   "x7h/t2h",  "x8h/s0h",   "x9h/s1h",   "x10h/a0h", "x11h/a1h",
242     "x12h/a2h",  "x13h/a3h", "x14h/a4h",  "x15h/a5h",  "x16h/a6h", "x17h/a7h",
243     "x18h/s2h",  "x19h/s3h", "x20h/s4h",  "x21h/s5h",  "x22h/s6h", "x23h/s7h",
244     "x24h/s8h",  "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
245     "x30h/t5h",  "x31h/t6h"
246 };
247 
248 const char * const riscv_fpr_regnames[] = {
249     "f0/ft0",   "f1/ft1",  "f2/ft2",   "f3/ft3",   "f4/ft4",  "f5/ft5",
250     "f6/ft6",   "f7/ft7",  "f8/fs0",   "f9/fs1",   "f10/fa0", "f11/fa1",
251     "f12/fa2",  "f13/fa3", "f14/fa4",  "f15/fa5",  "f16/fa6", "f17/fa7",
252     "f18/fs2",  "f19/fs3", "f20/fs4",  "f21/fs5",  "f22/fs6", "f23/fs7",
253     "f24/fs8",  "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
254     "f30/ft10", "f31/ft11"
255 };
256 
257 const char * const riscv_rvv_regnames[] = {
258   "v0",  "v1",  "v2",  "v3",  "v4",  "v5",  "v6",
259   "v7",  "v8",  "v9",  "v10", "v11", "v12", "v13",
260   "v14", "v15", "v16", "v17", "v18", "v19", "v20",
261   "v21", "v22", "v23", "v24", "v25", "v26", "v27",
262   "v28", "v29", "v30", "v31"
263 };
264 
265 static const char * const riscv_excp_names[] = {
266     "misaligned_fetch",
267     "fault_fetch",
268     "illegal_instruction",
269     "breakpoint",
270     "misaligned_load",
271     "fault_load",
272     "misaligned_store",
273     "fault_store",
274     "user_ecall",
275     "supervisor_ecall",
276     "hypervisor_ecall",
277     "machine_ecall",
278     "exec_page_fault",
279     "load_page_fault",
280     "reserved",
281     "store_page_fault",
282     "reserved",
283     "reserved",
284     "reserved",
285     "reserved",
286     "guest_exec_page_fault",
287     "guest_load_page_fault",
288     "reserved",
289     "guest_store_page_fault",
290 };
291 
292 static const char * const riscv_intr_names[] = {
293     "u_software",
294     "s_software",
295     "vs_software",
296     "m_software",
297     "u_timer",
298     "s_timer",
299     "vs_timer",
300     "m_timer",
301     "u_external",
302     "s_external",
303     "vs_external",
304     "m_external",
305     "reserved",
306     "reserved",
307     "reserved",
308     "reserved"
309 };
310 
311 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
312 {
313     if (async) {
314         return (cause < ARRAY_SIZE(riscv_intr_names)) ?
315                riscv_intr_names[cause] : "(unknown)";
316     } else {
317         return (cause < ARRAY_SIZE(riscv_excp_names)) ?
318                riscv_excp_names[cause] : "(unknown)";
319     }
320 }
321 
322 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext)
323 {
324     env->misa_ext_mask = env->misa_ext = ext;
325 }
326 
327 int riscv_cpu_max_xlen(RISCVCPUClass *mcc)
328 {
329     return 16 << mcc->misa_mxl_max;
330 }
331 
332 #ifndef CONFIG_USER_ONLY
333 static uint8_t satp_mode_from_str(const char *satp_mode_str)
334 {
335     if (!strncmp(satp_mode_str, "mbare", 5)) {
336         return VM_1_10_MBARE;
337     }
338 
339     if (!strncmp(satp_mode_str, "sv32", 4)) {
340         return VM_1_10_SV32;
341     }
342 
343     if (!strncmp(satp_mode_str, "sv39", 4)) {
344         return VM_1_10_SV39;
345     }
346 
347     if (!strncmp(satp_mode_str, "sv48", 4)) {
348         return VM_1_10_SV48;
349     }
350 
351     if (!strncmp(satp_mode_str, "sv57", 4)) {
352         return VM_1_10_SV57;
353     }
354 
355     if (!strncmp(satp_mode_str, "sv64", 4)) {
356         return VM_1_10_SV64;
357     }
358 
359     g_assert_not_reached();
360 }
361 
362 uint8_t satp_mode_max_from_map(uint32_t map)
363 {
364     /*
365      * 'map = 0' will make us return (31 - 32), which C will
366      * happily overflow to UINT_MAX. There's no good result to
367      * return if 'map = 0' (e.g. returning 0 will be ambiguous
368      * with the result for 'map = 1').
369      *
370      * Assert out if map = 0. Callers will have to deal with
371      * it outside of this function.
372      */
373     g_assert(map > 0);
374 
375     /* map here has at least one bit set, so no problem with clz */
376     return 31 - __builtin_clz(map);
377 }
378 
379 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
380 {
381     if (is_32_bit) {
382         switch (satp_mode) {
383         case VM_1_10_SV32:
384             return "sv32";
385         case VM_1_10_MBARE:
386             return "none";
387         }
388     } else {
389         switch (satp_mode) {
390         case VM_1_10_SV64:
391             return "sv64";
392         case VM_1_10_SV57:
393             return "sv57";
394         case VM_1_10_SV48:
395             return "sv48";
396         case VM_1_10_SV39:
397             return "sv39";
398         case VM_1_10_MBARE:
399             return "none";
400         }
401     }
402 
403     g_assert_not_reached();
404 }
405 
406 static void set_satp_mode_max_supported(RISCVCPU *cpu,
407                                         uint8_t satp_mode)
408 {
409     bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
410     const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
411 
412     for (int i = 0; i <= satp_mode; ++i) {
413         if (valid_vm[i]) {
414             cpu->cfg.satp_mode.supported |= (1 << i);
415         }
416     }
417 }
418 
419 /* Set the satp mode to the max supported */
420 static void set_satp_mode_default_map(RISCVCPU *cpu)
421 {
422     /*
423      * Bare CPUs do not default to the max available.
424      * Users must set a valid satp_mode in the command
425      * line.
426      */
427     if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) {
428         warn_report("No satp mode set. Defaulting to 'bare'");
429         cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE);
430         return;
431     }
432 
433     cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
434 }
435 #endif
436 
437 static void riscv_any_cpu_init(Object *obj)
438 {
439     RISCVCPU *cpu = RISCV_CPU(obj);
440     CPURISCVState *env = &cpu->env;
441     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
442 
443 #ifndef CONFIG_USER_ONLY
444     set_satp_mode_max_supported(RISCV_CPU(obj),
445         riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ?
446         VM_1_10_SV32 : VM_1_10_SV57);
447 #endif
448 
449     env->priv_ver = PRIV_VERSION_LATEST;
450 
451     /* inherited from parent obj via riscv_cpu_init() */
452     cpu->cfg.ext_zifencei = true;
453     cpu->cfg.ext_zicsr = true;
454     cpu->cfg.mmu = true;
455     cpu->cfg.pmp = true;
456 }
457 
458 static void riscv_max_cpu_init(Object *obj)
459 {
460     RISCVCPU *cpu = RISCV_CPU(obj);
461     CPURISCVState *env = &cpu->env;
462 
463     cpu->cfg.mmu = true;
464     cpu->cfg.pmp = true;
465 
466     env->priv_ver = PRIV_VERSION_LATEST;
467 #ifndef CONFIG_USER_ONLY
468 #ifdef TARGET_RISCV32
469     set_satp_mode_max_supported(cpu, VM_1_10_SV32);
470 #else
471     set_satp_mode_max_supported(cpu, VM_1_10_SV57);
472 #endif
473 #endif
474 }
475 
476 #if defined(TARGET_RISCV64)
477 static void rv64_base_cpu_init(Object *obj)
478 {
479     RISCVCPU *cpu = RISCV_CPU(obj);
480     CPURISCVState *env = &cpu->env;
481 
482     cpu->cfg.mmu = true;
483     cpu->cfg.pmp = true;
484 
485     /* Set latest version of privileged specification */
486     env->priv_ver = PRIV_VERSION_LATEST;
487 #ifndef CONFIG_USER_ONLY
488     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
489 #endif
490 }
491 
492 static void rv64_sifive_u_cpu_init(Object *obj)
493 {
494     RISCVCPU *cpu = RISCV_CPU(obj);
495     CPURISCVState *env = &cpu->env;
496     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
497     env->priv_ver = PRIV_VERSION_1_10_0;
498 #ifndef CONFIG_USER_ONLY
499     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39);
500 #endif
501 
502     /* inherited from parent obj via riscv_cpu_init() */
503     cpu->cfg.ext_zifencei = true;
504     cpu->cfg.ext_zicsr = true;
505     cpu->cfg.mmu = true;
506     cpu->cfg.pmp = true;
507 }
508 
509 static void rv64_sifive_e_cpu_init(Object *obj)
510 {
511     CPURISCVState *env = &RISCV_CPU(obj)->env;
512     RISCVCPU *cpu = RISCV_CPU(obj);
513 
514     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
515     env->priv_ver = PRIV_VERSION_1_10_0;
516 #ifndef CONFIG_USER_ONLY
517     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
518 #endif
519 
520     /* inherited from parent obj via riscv_cpu_init() */
521     cpu->cfg.ext_zifencei = true;
522     cpu->cfg.ext_zicsr = true;
523     cpu->cfg.pmp = true;
524 }
525 
526 static void rv64_thead_c906_cpu_init(Object *obj)
527 {
528     CPURISCVState *env = &RISCV_CPU(obj)->env;
529     RISCVCPU *cpu = RISCV_CPU(obj);
530 
531     riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU);
532     env->priv_ver = PRIV_VERSION_1_11_0;
533 
534     cpu->cfg.ext_zfa = true;
535     cpu->cfg.ext_zfh = true;
536     cpu->cfg.mmu = true;
537     cpu->cfg.ext_xtheadba = true;
538     cpu->cfg.ext_xtheadbb = true;
539     cpu->cfg.ext_xtheadbs = true;
540     cpu->cfg.ext_xtheadcmo = true;
541     cpu->cfg.ext_xtheadcondmov = true;
542     cpu->cfg.ext_xtheadfmemidx = true;
543     cpu->cfg.ext_xtheadmac = true;
544     cpu->cfg.ext_xtheadmemidx = true;
545     cpu->cfg.ext_xtheadmempair = true;
546     cpu->cfg.ext_xtheadsync = true;
547 
548     cpu->cfg.mvendorid = THEAD_VENDOR_ID;
549 #ifndef CONFIG_USER_ONLY
550     set_satp_mode_max_supported(cpu, VM_1_10_SV39);
551     th_register_custom_csrs(cpu);
552 #endif
553 
554     /* inherited from parent obj via riscv_cpu_init() */
555     cpu->cfg.pmp = true;
556 }
557 
558 static void rv64_veyron_v1_cpu_init(Object *obj)
559 {
560     CPURISCVState *env = &RISCV_CPU(obj)->env;
561     RISCVCPU *cpu = RISCV_CPU(obj);
562 
563     riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH);
564     env->priv_ver = PRIV_VERSION_1_12_0;
565 
566     /* Enable ISA extensions */
567     cpu->cfg.mmu = true;
568     cpu->cfg.ext_zifencei = true;
569     cpu->cfg.ext_zicsr = true;
570     cpu->cfg.pmp = true;
571     cpu->cfg.ext_zicbom = true;
572     cpu->cfg.cbom_blocksize = 64;
573     cpu->cfg.cboz_blocksize = 64;
574     cpu->cfg.ext_zicboz = true;
575     cpu->cfg.ext_smaia = true;
576     cpu->cfg.ext_ssaia = true;
577     cpu->cfg.ext_sscofpmf = true;
578     cpu->cfg.ext_sstc = true;
579     cpu->cfg.ext_svinval = true;
580     cpu->cfg.ext_svnapot = true;
581     cpu->cfg.ext_svpbmt = true;
582     cpu->cfg.ext_smstateen = true;
583     cpu->cfg.ext_zba = true;
584     cpu->cfg.ext_zbb = true;
585     cpu->cfg.ext_zbc = true;
586     cpu->cfg.ext_zbs = true;
587     cpu->cfg.ext_XVentanaCondOps = true;
588 
589     cpu->cfg.mvendorid = VEYRON_V1_MVENDORID;
590     cpu->cfg.marchid = VEYRON_V1_MARCHID;
591     cpu->cfg.mimpid = VEYRON_V1_MIMPID;
592 
593 #ifndef CONFIG_USER_ONLY
594     set_satp_mode_max_supported(cpu, VM_1_10_SV48);
595 #endif
596 }
597 
598 #ifdef CONFIG_TCG
599 static void rv128_base_cpu_init(Object *obj)
600 {
601     RISCVCPU *cpu = RISCV_CPU(obj);
602     CPURISCVState *env = &cpu->env;
603 
604     if (qemu_tcg_mttcg_enabled()) {
605         /* Missing 128-bit aligned atomics */
606         error_report("128-bit RISC-V currently does not work with Multi "
607                      "Threaded TCG. Please use: -accel tcg,thread=single");
608         exit(EXIT_FAILURE);
609     }
610 
611     cpu->cfg.mmu = true;
612     cpu->cfg.pmp = true;
613 
614     /* Set latest version of privileged specification */
615     env->priv_ver = PRIV_VERSION_LATEST;
616 #ifndef CONFIG_USER_ONLY
617     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
618 #endif
619 }
620 #endif /* CONFIG_TCG */
621 
622 static void rv64i_bare_cpu_init(Object *obj)
623 {
624     CPURISCVState *env = &RISCV_CPU(obj)->env;
625     riscv_cpu_set_misa_ext(env, RVI);
626 }
627 
628 static void rv64e_bare_cpu_init(Object *obj)
629 {
630     CPURISCVState *env = &RISCV_CPU(obj)->env;
631     riscv_cpu_set_misa_ext(env, RVE);
632 }
633 
634 #else /* !TARGET_RISCV64 */
635 
636 static void rv32_base_cpu_init(Object *obj)
637 {
638     RISCVCPU *cpu = RISCV_CPU(obj);
639     CPURISCVState *env = &cpu->env;
640 
641     cpu->cfg.mmu = true;
642     cpu->cfg.pmp = true;
643 
644     /* Set latest version of privileged specification */
645     env->priv_ver = PRIV_VERSION_LATEST;
646 #ifndef CONFIG_USER_ONLY
647     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
648 #endif
649 }
650 
651 static void rv32_sifive_u_cpu_init(Object *obj)
652 {
653     RISCVCPU *cpu = RISCV_CPU(obj);
654     CPURISCVState *env = &cpu->env;
655     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
656     env->priv_ver = PRIV_VERSION_1_10_0;
657 #ifndef CONFIG_USER_ONLY
658     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
659 #endif
660 
661     /* inherited from parent obj via riscv_cpu_init() */
662     cpu->cfg.ext_zifencei = true;
663     cpu->cfg.ext_zicsr = true;
664     cpu->cfg.mmu = true;
665     cpu->cfg.pmp = true;
666 }
667 
668 static void rv32_sifive_e_cpu_init(Object *obj)
669 {
670     CPURISCVState *env = &RISCV_CPU(obj)->env;
671     RISCVCPU *cpu = RISCV_CPU(obj);
672 
673     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
674     env->priv_ver = PRIV_VERSION_1_10_0;
675 #ifndef CONFIG_USER_ONLY
676     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
677 #endif
678 
679     /* inherited from parent obj via riscv_cpu_init() */
680     cpu->cfg.ext_zifencei = true;
681     cpu->cfg.ext_zicsr = true;
682     cpu->cfg.pmp = true;
683 }
684 
685 static void rv32_ibex_cpu_init(Object *obj)
686 {
687     CPURISCVState *env = &RISCV_CPU(obj)->env;
688     RISCVCPU *cpu = RISCV_CPU(obj);
689 
690     riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU);
691     env->priv_ver = PRIV_VERSION_1_12_0;
692 #ifndef CONFIG_USER_ONLY
693     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
694 #endif
695     /* inherited from parent obj via riscv_cpu_init() */
696     cpu->cfg.ext_zifencei = true;
697     cpu->cfg.ext_zicsr = true;
698     cpu->cfg.pmp = true;
699     cpu->cfg.ext_smepmp = true;
700 }
701 
702 static void rv32_imafcu_nommu_cpu_init(Object *obj)
703 {
704     CPURISCVState *env = &RISCV_CPU(obj)->env;
705     RISCVCPU *cpu = RISCV_CPU(obj);
706 
707     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU);
708     env->priv_ver = PRIV_VERSION_1_10_0;
709 #ifndef CONFIG_USER_ONLY
710     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
711 #endif
712 
713     /* inherited from parent obj via riscv_cpu_init() */
714     cpu->cfg.ext_zifencei = true;
715     cpu->cfg.ext_zicsr = true;
716     cpu->cfg.pmp = true;
717 }
718 
719 static void rv32i_bare_cpu_init(Object *obj)
720 {
721     CPURISCVState *env = &RISCV_CPU(obj)->env;
722     riscv_cpu_set_misa_ext(env, RVI);
723 }
724 
725 static void rv32e_bare_cpu_init(Object *obj)
726 {
727     CPURISCVState *env = &RISCV_CPU(obj)->env;
728     riscv_cpu_set_misa_ext(env, RVE);
729 }
730 #endif
731 
732 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
733 {
734     ObjectClass *oc;
735     char *typename;
736     char **cpuname;
737 
738     cpuname = g_strsplit(cpu_model, ",", 1);
739     typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
740     oc = object_class_by_name(typename);
741     g_strfreev(cpuname);
742     g_free(typename);
743 
744     return oc;
745 }
746 
747 char *riscv_cpu_get_name(RISCVCPU *cpu)
748 {
749     RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu);
750     const char *typename = object_class_get_name(OBJECT_CLASS(rcc));
751 
752     g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX));
753 
754     return cpu_model_from_type(typename);
755 }
756 
757 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
758 {
759     RISCVCPU *cpu = RISCV_CPU(cs);
760     CPURISCVState *env = &cpu->env;
761     int i, j;
762     uint8_t *p;
763 
764 #if !defined(CONFIG_USER_ONLY)
765     if (riscv_has_ext(env, RVH)) {
766         qemu_fprintf(f, " %s %d\n", "V      =  ", env->virt_enabled);
767     }
768 #endif
769     qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc      ", env->pc);
770 #ifndef CONFIG_USER_ONLY
771     {
772         static const int dump_csrs[] = {
773             CSR_MHARTID,
774             CSR_MSTATUS,
775             CSR_MSTATUSH,
776             /*
777              * CSR_SSTATUS is intentionally omitted here as its value
778              * can be figured out by looking at CSR_MSTATUS
779              */
780             CSR_HSTATUS,
781             CSR_VSSTATUS,
782             CSR_MIP,
783             CSR_MIE,
784             CSR_MIDELEG,
785             CSR_HIDELEG,
786             CSR_MEDELEG,
787             CSR_HEDELEG,
788             CSR_MTVEC,
789             CSR_STVEC,
790             CSR_VSTVEC,
791             CSR_MEPC,
792             CSR_SEPC,
793             CSR_VSEPC,
794             CSR_MCAUSE,
795             CSR_SCAUSE,
796             CSR_VSCAUSE,
797             CSR_MTVAL,
798             CSR_STVAL,
799             CSR_HTVAL,
800             CSR_MTVAL2,
801             CSR_MSCRATCH,
802             CSR_SSCRATCH,
803             CSR_SATP,
804             CSR_MMTE,
805             CSR_UPMBASE,
806             CSR_UPMMASK,
807             CSR_SPMBASE,
808             CSR_SPMMASK,
809             CSR_MPMBASE,
810             CSR_MPMMASK,
811         };
812 
813         for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
814             int csrno = dump_csrs[i];
815             target_ulong val = 0;
816             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
817 
818             /*
819              * Rely on the smode, hmode, etc, predicates within csr.c
820              * to do the filtering of the registers that are present.
821              */
822             if (res == RISCV_EXCP_NONE) {
823                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
824                              csr_ops[csrno].name, val);
825             }
826         }
827     }
828 #endif
829 
830     for (i = 0; i < 32; i++) {
831         qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
832                      riscv_int_regnames[i], env->gpr[i]);
833         if ((i & 3) == 3) {
834             qemu_fprintf(f, "\n");
835         }
836     }
837     if (flags & CPU_DUMP_FPU) {
838         for (i = 0; i < 32; i++) {
839             qemu_fprintf(f, " %-8s %016" PRIx64,
840                          riscv_fpr_regnames[i], env->fpr[i]);
841             if ((i & 3) == 3) {
842                 qemu_fprintf(f, "\n");
843             }
844         }
845     }
846     if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) {
847         static const int dump_rvv_csrs[] = {
848                     CSR_VSTART,
849                     CSR_VXSAT,
850                     CSR_VXRM,
851                     CSR_VCSR,
852                     CSR_VL,
853                     CSR_VTYPE,
854                     CSR_VLENB,
855                 };
856         for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
857             int csrno = dump_rvv_csrs[i];
858             target_ulong val = 0;
859             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
860 
861             /*
862              * Rely on the smode, hmode, etc, predicates within csr.c
863              * to do the filtering of the registers that are present.
864              */
865             if (res == RISCV_EXCP_NONE) {
866                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
867                              csr_ops[csrno].name, val);
868             }
869         }
870         uint16_t vlenb = cpu->cfg.vlenb;
871 
872         for (i = 0; i < 32; i++) {
873             qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]);
874             p = (uint8_t *)env->vreg;
875             for (j = vlenb - 1 ; j >= 0; j--) {
876                 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j)));
877             }
878             qemu_fprintf(f, "\n");
879         }
880     }
881 }
882 
883 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
884 {
885     RISCVCPU *cpu = RISCV_CPU(cs);
886     CPURISCVState *env = &cpu->env;
887 
888     if (env->xl == MXL_RV32) {
889         env->pc = (int32_t)value;
890     } else {
891         env->pc = value;
892     }
893 }
894 
895 static vaddr riscv_cpu_get_pc(CPUState *cs)
896 {
897     RISCVCPU *cpu = RISCV_CPU(cs);
898     CPURISCVState *env = &cpu->env;
899 
900     /* Match cpu_get_tb_cpu_state. */
901     if (env->xl == MXL_RV32) {
902         return env->pc & UINT32_MAX;
903     }
904     return env->pc;
905 }
906 
907 bool riscv_cpu_has_work(CPUState *cs)
908 {
909 #ifndef CONFIG_USER_ONLY
910     RISCVCPU *cpu = RISCV_CPU(cs);
911     CPURISCVState *env = &cpu->env;
912     /*
913      * Definition of the WFI instruction requires it to ignore the privilege
914      * mode and delegation registers, but respect individual enables
915      */
916     return riscv_cpu_all_pending(env) != 0 ||
917         riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE ||
918         riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE;
919 #else
920     return true;
921 #endif
922 }
923 
924 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch)
925 {
926     return riscv_env_mmu_index(cpu_env(cs), ifetch);
927 }
928 
929 static void riscv_cpu_reset_hold(Object *obj, ResetType type)
930 {
931 #ifndef CONFIG_USER_ONLY
932     uint8_t iprio;
933     int i, irq, rdzero;
934 #endif
935     CPUState *cs = CPU(obj);
936     RISCVCPU *cpu = RISCV_CPU(cs);
937     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
938     CPURISCVState *env = &cpu->env;
939 
940     if (mcc->parent_phases.hold) {
941         mcc->parent_phases.hold(obj, type);
942     }
943 #ifndef CONFIG_USER_ONLY
944     env->misa_mxl = mcc->misa_mxl_max;
945     env->priv = PRV_M;
946     env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
947     if (env->misa_mxl > MXL_RV32) {
948         /*
949          * The reset status of SXL/UXL is undefined, but mstatus is WARL
950          * and we must ensure that the value after init is valid for read.
951          */
952         env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
953         env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
954         if (riscv_has_ext(env, RVH)) {
955             env->vsstatus = set_field(env->vsstatus,
956                                       MSTATUS64_SXL, env->misa_mxl);
957             env->vsstatus = set_field(env->vsstatus,
958                                       MSTATUS64_UXL, env->misa_mxl);
959             env->mstatus_hs = set_field(env->mstatus_hs,
960                                         MSTATUS64_SXL, env->misa_mxl);
961             env->mstatus_hs = set_field(env->mstatus_hs,
962                                         MSTATUS64_UXL, env->misa_mxl);
963         }
964     }
965     env->mcause = 0;
966     env->miclaim = MIP_SGEIP;
967     env->pc = env->resetvec;
968     env->bins = 0;
969     env->two_stage_lookup = false;
970 
971     env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
972                    (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ?
973                     MENVCFG_ADUE : 0);
974     env->henvcfg = 0;
975 
976     /* Initialized default priorities of local interrupts. */
977     for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
978         iprio = riscv_cpu_default_priority(i);
979         env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
980         env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
981         env->hviprio[i] = 0;
982     }
983     i = 0;
984     while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
985         if (!rdzero) {
986             env->hviprio[irq] = env->miprio[irq];
987         }
988         i++;
989     }
990     /* mmte is supposed to have pm.current hardwired to 1 */
991     env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
992 
993     /*
994      * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor
995      * extension is enabled.
996      */
997     if (riscv_has_ext(env, RVH)) {
998         env->mideleg |= HS_MODE_INTERRUPTS;
999     }
1000 
1001     /*
1002      * Clear mseccfg and unlock all the PMP entries upon reset.
1003      * This is allowed as per the priv and smepmp specifications
1004      * and is needed to clear stale entries across reboots.
1005      */
1006     if (riscv_cpu_cfg(env)->ext_smepmp) {
1007         env->mseccfg = 0;
1008     }
1009 
1010     pmp_unlock_entries(env);
1011 #endif
1012     env->xl = riscv_cpu_mxl(env);
1013     riscv_cpu_update_mask(env);
1014     cs->exception_index = RISCV_EXCP_NONE;
1015     env->load_res = -1;
1016     set_default_nan_mode(1, &env->fp_status);
1017 
1018 #ifndef CONFIG_USER_ONLY
1019     if (cpu->cfg.debug) {
1020         riscv_trigger_reset_hold(env);
1021     }
1022 
1023     if (kvm_enabled()) {
1024         kvm_riscv_reset_vcpu(cpu);
1025     }
1026 #endif
1027 }
1028 
1029 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
1030 {
1031     RISCVCPU *cpu = RISCV_CPU(s);
1032     CPURISCVState *env = &cpu->env;
1033     info->target_info = &cpu->cfg;
1034 
1035     switch (env->xl) {
1036     case MXL_RV32:
1037         info->print_insn = print_insn_riscv32;
1038         break;
1039     case MXL_RV64:
1040         info->print_insn = print_insn_riscv64;
1041         break;
1042     case MXL_RV128:
1043         info->print_insn = print_insn_riscv128;
1044         break;
1045     default:
1046         g_assert_not_reached();
1047     }
1048 }
1049 
1050 #ifndef CONFIG_USER_ONLY
1051 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
1052 {
1053     bool rv32 = riscv_cpu_is_32bit(cpu);
1054     uint8_t satp_mode_map_max, satp_mode_supported_max;
1055 
1056     /* The CPU wants the OS to decide which satp mode to use */
1057     if (cpu->cfg.satp_mode.supported == 0) {
1058         return;
1059     }
1060 
1061     satp_mode_supported_max =
1062                     satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
1063 
1064     if (cpu->cfg.satp_mode.map == 0) {
1065         if (cpu->cfg.satp_mode.init == 0) {
1066             /* If unset by the user, we fallback to the default satp mode. */
1067             set_satp_mode_default_map(cpu);
1068         } else {
1069             /*
1070              * Find the lowest level that was disabled and then enable the
1071              * first valid level below which can be found in
1072              * valid_vm_1_10_32/64.
1073              */
1074             for (int i = 1; i < 16; ++i) {
1075                 if ((cpu->cfg.satp_mode.init & (1 << i)) &&
1076                     (cpu->cfg.satp_mode.supported & (1 << i))) {
1077                     for (int j = i - 1; j >= 0; --j) {
1078                         if (cpu->cfg.satp_mode.supported & (1 << j)) {
1079                             cpu->cfg.satp_mode.map |= (1 << j);
1080                             break;
1081                         }
1082                     }
1083                     break;
1084                 }
1085             }
1086         }
1087     }
1088 
1089     satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
1090 
1091     /* Make sure the user asked for a supported configuration (HW and qemu) */
1092     if (satp_mode_map_max > satp_mode_supported_max) {
1093         error_setg(errp, "satp_mode %s is higher than hw max capability %s",
1094                    satp_mode_str(satp_mode_map_max, rv32),
1095                    satp_mode_str(satp_mode_supported_max, rv32));
1096         return;
1097     }
1098 
1099     /*
1100      * Make sure the user did not ask for an invalid configuration as per
1101      * the specification.
1102      */
1103     if (!rv32) {
1104         for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1105             if (!(cpu->cfg.satp_mode.map & (1 << i)) &&
1106                 (cpu->cfg.satp_mode.init & (1 << i)) &&
1107                 (cpu->cfg.satp_mode.supported & (1 << i))) {
1108                 error_setg(errp, "cannot disable %s satp mode if %s "
1109                            "is enabled", satp_mode_str(i, false),
1110                            satp_mode_str(satp_mode_map_max, false));
1111                 return;
1112             }
1113         }
1114     }
1115 
1116     /* Finally expand the map so that all valid modes are set */
1117     for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1118         if (cpu->cfg.satp_mode.supported & (1 << i)) {
1119             cpu->cfg.satp_mode.map |= (1 << i);
1120         }
1121     }
1122 }
1123 #endif
1124 
1125 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1126 {
1127     Error *local_err = NULL;
1128 
1129 #ifndef CONFIG_USER_ONLY
1130     riscv_cpu_satp_mode_finalize(cpu, &local_err);
1131     if (local_err != NULL) {
1132         error_propagate(errp, local_err);
1133         return;
1134     }
1135 #endif
1136 
1137     if (tcg_enabled()) {
1138         riscv_tcg_cpu_finalize_features(cpu, &local_err);
1139         if (local_err != NULL) {
1140             error_propagate(errp, local_err);
1141             return;
1142         }
1143         riscv_tcg_cpu_finalize_dynamic_decoder(cpu);
1144     } else if (kvm_enabled()) {
1145         riscv_kvm_cpu_finalize_features(cpu, &local_err);
1146         if (local_err != NULL) {
1147             error_propagate(errp, local_err);
1148             return;
1149         }
1150     }
1151 }
1152 
1153 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
1154 {
1155     CPUState *cs = CPU(dev);
1156     RISCVCPU *cpu = RISCV_CPU(dev);
1157     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
1158     Error *local_err = NULL;
1159 
1160     if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) {
1161         warn_report("The 'any' CPU is deprecated and will be "
1162                     "removed in the future.");
1163     }
1164 
1165     cpu_exec_realizefn(cs, &local_err);
1166     if (local_err != NULL) {
1167         error_propagate(errp, local_err);
1168         return;
1169     }
1170 
1171     riscv_cpu_finalize_features(cpu, &local_err);
1172     if (local_err != NULL) {
1173         error_propagate(errp, local_err);
1174         return;
1175     }
1176 
1177     riscv_cpu_register_gdb_regs_for_features(cs);
1178 
1179 #ifndef CONFIG_USER_ONLY
1180     if (cpu->cfg.debug) {
1181         riscv_trigger_realize(&cpu->env);
1182     }
1183 #endif
1184 
1185     qemu_init_vcpu(cs);
1186     cpu_reset(cs);
1187 
1188     mcc->parent_realize(dev, errp);
1189 }
1190 
1191 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu)
1192 {
1193     if (tcg_enabled()) {
1194         return riscv_cpu_tcg_compatible(cpu);
1195     }
1196 
1197     return true;
1198 }
1199 
1200 #ifndef CONFIG_USER_ONLY
1201 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
1202                                void *opaque, Error **errp)
1203 {
1204     RISCVSATPMap *satp_map = opaque;
1205     uint8_t satp = satp_mode_from_str(name);
1206     bool value;
1207 
1208     value = satp_map->map & (1 << satp);
1209 
1210     visit_type_bool(v, name, &value, errp);
1211 }
1212 
1213 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
1214                                void *opaque, Error **errp)
1215 {
1216     RISCVSATPMap *satp_map = opaque;
1217     uint8_t satp = satp_mode_from_str(name);
1218     bool value;
1219 
1220     if (!visit_type_bool(v, name, &value, errp)) {
1221         return;
1222     }
1223 
1224     satp_map->map = deposit32(satp_map->map, satp, 1, value);
1225     satp_map->init |= 1 << satp;
1226 }
1227 
1228 void riscv_add_satp_mode_properties(Object *obj)
1229 {
1230     RISCVCPU *cpu = RISCV_CPU(obj);
1231 
1232     if (cpu->env.misa_mxl == MXL_RV32) {
1233         object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
1234                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1235     } else {
1236         object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
1237                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1238         object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
1239                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1240         object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
1241                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1242         object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
1243                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1244     }
1245 }
1246 
1247 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
1248 {
1249     RISCVCPU *cpu = RISCV_CPU(opaque);
1250     CPURISCVState *env = &cpu->env;
1251 
1252     if (irq < IRQ_LOCAL_MAX) {
1253         switch (irq) {
1254         case IRQ_U_SOFT:
1255         case IRQ_S_SOFT:
1256         case IRQ_VS_SOFT:
1257         case IRQ_M_SOFT:
1258         case IRQ_U_TIMER:
1259         case IRQ_S_TIMER:
1260         case IRQ_VS_TIMER:
1261         case IRQ_M_TIMER:
1262         case IRQ_U_EXT:
1263         case IRQ_VS_EXT:
1264         case IRQ_M_EXT:
1265             if (kvm_enabled()) {
1266                 kvm_riscv_set_irq(cpu, irq, level);
1267             } else {
1268                 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level));
1269             }
1270              break;
1271         case IRQ_S_EXT:
1272             if (kvm_enabled()) {
1273                 kvm_riscv_set_irq(cpu, irq, level);
1274             } else {
1275                 env->external_seip = level;
1276                 riscv_cpu_update_mip(env, 1 << irq,
1277                                      BOOL_TO_MASK(level | env->software_seip));
1278             }
1279             break;
1280         default:
1281             g_assert_not_reached();
1282         }
1283     } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
1284         /* Require H-extension for handling guest local interrupts */
1285         if (!riscv_has_ext(env, RVH)) {
1286             g_assert_not_reached();
1287         }
1288 
1289         /* Compute bit position in HGEIP CSR */
1290         irq = irq - IRQ_LOCAL_MAX + 1;
1291         if (env->geilen < irq) {
1292             g_assert_not_reached();
1293         }
1294 
1295         /* Update HGEIP CSR */
1296         env->hgeip &= ~((target_ulong)1 << irq);
1297         if (level) {
1298             env->hgeip |= (target_ulong)1 << irq;
1299         }
1300 
1301         /* Update mip.SGEIP bit */
1302         riscv_cpu_update_mip(env, MIP_SGEIP,
1303                              BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
1304     } else {
1305         g_assert_not_reached();
1306     }
1307 }
1308 #endif /* CONFIG_USER_ONLY */
1309 
1310 static bool riscv_cpu_is_dynamic(Object *cpu_obj)
1311 {
1312     return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
1313 }
1314 
1315 static void riscv_cpu_post_init(Object *obj)
1316 {
1317     accel_cpu_instance_init(CPU(obj));
1318 }
1319 
1320 static void riscv_cpu_init(Object *obj)
1321 {
1322     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
1323     RISCVCPU *cpu = RISCV_CPU(obj);
1324     CPURISCVState *env = &cpu->env;
1325 
1326     env->misa_mxl = mcc->misa_mxl_max;
1327 
1328 #ifndef CONFIG_USER_ONLY
1329     qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
1330                       IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
1331 #endif /* CONFIG_USER_ONLY */
1332 
1333     general_user_opts = g_hash_table_new(g_str_hash, g_str_equal);
1334 
1335     /*
1336      * The timer and performance counters extensions were supported
1337      * in QEMU before they were added as discrete extensions in the
1338      * ISA. To keep compatibility we'll always default them to 'true'
1339      * for all CPUs. Each accelerator will decide what to do when
1340      * users disable them.
1341      */
1342     RISCV_CPU(obj)->cfg.ext_zicntr = true;
1343     RISCV_CPU(obj)->cfg.ext_zihpm = true;
1344 
1345     /* Default values for non-bool cpu properties */
1346     cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16);
1347     cpu->cfg.vlenb = 128 >> 3;
1348     cpu->cfg.elen = 64;
1349     cpu->cfg.cbom_blocksize = 64;
1350     cpu->cfg.cbop_blocksize = 64;
1351     cpu->cfg.cboz_blocksize = 64;
1352     cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1353 }
1354 
1355 static void riscv_bare_cpu_init(Object *obj)
1356 {
1357     RISCVCPU *cpu = RISCV_CPU(obj);
1358 
1359     /*
1360      * Bare CPUs do not inherit the timer and performance
1361      * counters from the parent class (see riscv_cpu_init()
1362      * for info on why the parent enables them).
1363      *
1364      * Users have to explicitly enable these counters for
1365      * bare CPUs.
1366      */
1367     cpu->cfg.ext_zicntr = false;
1368     cpu->cfg.ext_zihpm = false;
1369 
1370     /* Set to QEMU's first supported priv version */
1371     cpu->env.priv_ver = PRIV_VERSION_1_10_0;
1372 
1373     /*
1374      * Support all available satp_mode settings. The default
1375      * value will be set to MBARE if the user doesn't set
1376      * satp_mode manually (see set_satp_mode_default()).
1377      */
1378 #ifndef CONFIG_USER_ONLY
1379     set_satp_mode_max_supported(cpu, VM_1_10_SV64);
1380 #endif
1381 }
1382 
1383 typedef struct misa_ext_info {
1384     const char *name;
1385     const char *description;
1386 } MISAExtInfo;
1387 
1388 #define MISA_INFO_IDX(_bit) \
1389     __builtin_ctz(_bit)
1390 
1391 #define MISA_EXT_INFO(_bit, _propname, _descr) \
1392     [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr}
1393 
1394 static const MISAExtInfo misa_ext_info_arr[] = {
1395     MISA_EXT_INFO(RVA, "a", "Atomic instructions"),
1396     MISA_EXT_INFO(RVC, "c", "Compressed instructions"),
1397     MISA_EXT_INFO(RVD, "d", "Double-precision float point"),
1398     MISA_EXT_INFO(RVF, "f", "Single-precision float point"),
1399     MISA_EXT_INFO(RVI, "i", "Base integer instruction set"),
1400     MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"),
1401     MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"),
1402     MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
1403     MISA_EXT_INFO(RVU, "u", "User-level instructions"),
1404     MISA_EXT_INFO(RVH, "h", "Hypervisor"),
1405     MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
1406     MISA_EXT_INFO(RVV, "v", "Vector operations"),
1407     MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
1408     MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)")
1409 };
1410 
1411 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc)
1412 {
1413     CPUClass *cc = CPU_CLASS(mcc);
1414 
1415     /* Validate that MISA_MXL is set properly. */
1416     switch (mcc->misa_mxl_max) {
1417 #ifdef TARGET_RISCV64
1418     case MXL_RV64:
1419     case MXL_RV128:
1420         cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
1421         break;
1422 #endif
1423     case MXL_RV32:
1424         cc->gdb_core_xml_file = "riscv-32bit-cpu.xml";
1425         break;
1426     default:
1427         g_assert_not_reached();
1428     }
1429 }
1430 
1431 static int riscv_validate_misa_info_idx(uint32_t bit)
1432 {
1433     int idx;
1434 
1435     /*
1436      * Our lowest valid input (RVA) is 1 and
1437      * __builtin_ctz() is UB with zero.
1438      */
1439     g_assert(bit != 0);
1440     idx = MISA_INFO_IDX(bit);
1441 
1442     g_assert(idx < ARRAY_SIZE(misa_ext_info_arr));
1443     return idx;
1444 }
1445 
1446 const char *riscv_get_misa_ext_name(uint32_t bit)
1447 {
1448     int idx = riscv_validate_misa_info_idx(bit);
1449     const char *val = misa_ext_info_arr[idx].name;
1450 
1451     g_assert(val != NULL);
1452     return val;
1453 }
1454 
1455 const char *riscv_get_misa_ext_description(uint32_t bit)
1456 {
1457     int idx = riscv_validate_misa_info_idx(bit);
1458     const char *val = misa_ext_info_arr[idx].description;
1459 
1460     g_assert(val != NULL);
1461     return val;
1462 }
1463 
1464 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \
1465     {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
1466      .enabled = _defval}
1467 
1468 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
1469     /* Defaults for standard extensions */
1470     MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
1471     MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
1472     MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
1473     MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
1474     MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
1475     MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false),
1476     MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false),
1477     MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false),
1478     MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false),
1479     MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
1480     MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true),
1481     MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false),
1482     MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false),
1483     MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false),
1484     MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false),
1485     MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false),
1486     MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false),
1487     MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false),
1488     MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false),
1489     MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false),
1490     MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false),
1491     MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false),
1492     MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false),
1493     MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
1494 
1495     MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false),
1496     MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
1497     MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
1498     MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false),
1499     MULTI_EXT_CFG_BOOL("svade", ext_svade, false),
1500     MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
1501     MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
1502     MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false),
1503     MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false),
1504 
1505     MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true),
1506     MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true),
1507 
1508     MULTI_EXT_CFG_BOOL("zba", ext_zba, true),
1509     MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true),
1510     MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true),
1511     MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false),
1512     MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false),
1513     MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false),
1514     MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true),
1515     MULTI_EXT_CFG_BOOL("zk", ext_zk, false),
1516     MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false),
1517     MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false),
1518     MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false),
1519     MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false),
1520     MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false),
1521     MULTI_EXT_CFG_BOOL("zks", ext_zks, false),
1522     MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false),
1523     MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false),
1524     MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false),
1525     MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false),
1526 
1527     MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false),
1528     MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false),
1529     MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false),
1530     MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false),
1531 
1532     MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true),
1533     MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true),
1534     MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true),
1535 
1536     MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false),
1537 
1538     MULTI_EXT_CFG_BOOL("zca", ext_zca, false),
1539     MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false),
1540     MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false),
1541     MULTI_EXT_CFG_BOOL("zce", ext_zce, false),
1542     MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false),
1543     MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false),
1544     MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false),
1545     MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false),
1546 
1547     /* Vector cryptography extensions */
1548     MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false),
1549     MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false),
1550     MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false),
1551     MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false),
1552     MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false),
1553     MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false),
1554     MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false),
1555     MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false),
1556     MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false),
1557     MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false),
1558     MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false),
1559     MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false),
1560     MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false),
1561     MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false),
1562     MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false),
1563     MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false),
1564 
1565     DEFINE_PROP_END_OF_LIST(),
1566 };
1567 
1568 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = {
1569     MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false),
1570     MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false),
1571     MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false),
1572     MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false),
1573     MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false),
1574     MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false),
1575     MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false),
1576     MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false),
1577     MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false),
1578     MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false),
1579     MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false),
1580     MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false),
1581 
1582     DEFINE_PROP_END_OF_LIST(),
1583 };
1584 
1585 /* These are experimental so mark with 'x-' */
1586 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
1587     DEFINE_PROP_END_OF_LIST(),
1588 };
1589 
1590 /*
1591  * 'Named features' is the name we give to extensions that we
1592  * don't want to expose to users. They are either immutable
1593  * (always enabled/disable) or they'll vary depending on
1594  * the resulting CPU state. They have riscv,isa strings
1595  * and priv_ver like regular extensions.
1596  */
1597 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
1598     MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true),
1599 
1600     DEFINE_PROP_END_OF_LIST(),
1601 };
1602 
1603 /* Deprecated entries marked for future removal */
1604 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = {
1605     MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true),
1606     MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true),
1607     MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true),
1608     MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true),
1609     MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true),
1610     MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true),
1611     MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false),
1612     MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false),
1613     MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false),
1614     MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false),
1615     MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false),
1616 
1617     DEFINE_PROP_END_OF_LIST(),
1618 };
1619 
1620 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname,
1621                              Error **errp)
1622 {
1623     g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1624     error_setg(errp, "CPU '%s' does not allow changing the value of '%s'",
1625                cpuname, propname);
1626 }
1627 
1628 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name,
1629                              void *opaque, Error **errp)
1630 {
1631     RISCVCPU *cpu = RISCV_CPU(obj);
1632     uint8_t pmu_num, curr_pmu_num;
1633     uint32_t pmu_mask;
1634 
1635     visit_type_uint8(v, name, &pmu_num, errp);
1636 
1637     curr_pmu_num = ctpop32(cpu->cfg.pmu_mask);
1638 
1639     if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) {
1640         cpu_set_prop_err(cpu, name, errp);
1641         error_append_hint(errp, "Current '%s' val: %u\n",
1642                           name, curr_pmu_num);
1643         return;
1644     }
1645 
1646     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1647         error_setg(errp, "Number of counters exceeds maximum available");
1648         return;
1649     }
1650 
1651     if (pmu_num == 0) {
1652         pmu_mask = 0;
1653     } else {
1654         pmu_mask = MAKE_64BIT_MASK(3, pmu_num);
1655     }
1656 
1657     warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\"");
1658     cpu->cfg.pmu_mask = pmu_mask;
1659     cpu_option_add_user_setting("pmu-mask", pmu_mask);
1660 }
1661 
1662 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name,
1663                              void *opaque, Error **errp)
1664 {
1665     RISCVCPU *cpu = RISCV_CPU(obj);
1666     uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask);
1667 
1668     visit_type_uint8(v, name, &pmu_num, errp);
1669 }
1670 
1671 static const PropertyInfo prop_pmu_num = {
1672     .name = "pmu-num",
1673     .get = prop_pmu_num_get,
1674     .set = prop_pmu_num_set,
1675 };
1676 
1677 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name,
1678                              void *opaque, Error **errp)
1679 {
1680     RISCVCPU *cpu = RISCV_CPU(obj);
1681     uint32_t value;
1682     uint8_t pmu_num;
1683 
1684     visit_type_uint32(v, name, &value, errp);
1685 
1686     if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) {
1687         cpu_set_prop_err(cpu, name, errp);
1688         error_append_hint(errp, "Current '%s' val: %x\n",
1689                           name, cpu->cfg.pmu_mask);
1690         return;
1691     }
1692 
1693     pmu_num = ctpop32(value);
1694 
1695     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1696         error_setg(errp, "Number of counters exceeds maximum available");
1697         return;
1698     }
1699 
1700     cpu_option_add_user_setting(name, value);
1701     cpu->cfg.pmu_mask = value;
1702 }
1703 
1704 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name,
1705                              void *opaque, Error **errp)
1706 {
1707     uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask;
1708 
1709     visit_type_uint8(v, name, &pmu_mask, errp);
1710 }
1711 
1712 static const PropertyInfo prop_pmu_mask = {
1713     .name = "pmu-mask",
1714     .get = prop_pmu_mask_get,
1715     .set = prop_pmu_mask_set,
1716 };
1717 
1718 static void prop_mmu_set(Object *obj, Visitor *v, const char *name,
1719                          void *opaque, Error **errp)
1720 {
1721     RISCVCPU *cpu = RISCV_CPU(obj);
1722     bool value;
1723 
1724     visit_type_bool(v, name, &value, errp);
1725 
1726     if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) {
1727         cpu_set_prop_err(cpu, "mmu", errp);
1728         return;
1729     }
1730 
1731     cpu_option_add_user_setting(name, value);
1732     cpu->cfg.mmu = value;
1733 }
1734 
1735 static void prop_mmu_get(Object *obj, Visitor *v, const char *name,
1736                          void *opaque, Error **errp)
1737 {
1738     bool value = RISCV_CPU(obj)->cfg.mmu;
1739 
1740     visit_type_bool(v, name, &value, errp);
1741 }
1742 
1743 static const PropertyInfo prop_mmu = {
1744     .name = "mmu",
1745     .get = prop_mmu_get,
1746     .set = prop_mmu_set,
1747 };
1748 
1749 static void prop_pmp_set(Object *obj, Visitor *v, const char *name,
1750                          void *opaque, Error **errp)
1751 {
1752     RISCVCPU *cpu = RISCV_CPU(obj);
1753     bool value;
1754 
1755     visit_type_bool(v, name, &value, errp);
1756 
1757     if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) {
1758         cpu_set_prop_err(cpu, name, errp);
1759         return;
1760     }
1761 
1762     cpu_option_add_user_setting(name, value);
1763     cpu->cfg.pmp = value;
1764 }
1765 
1766 static void prop_pmp_get(Object *obj, Visitor *v, const char *name,
1767                          void *opaque, Error **errp)
1768 {
1769     bool value = RISCV_CPU(obj)->cfg.pmp;
1770 
1771     visit_type_bool(v, name, &value, errp);
1772 }
1773 
1774 static const PropertyInfo prop_pmp = {
1775     .name = "pmp",
1776     .get = prop_pmp_get,
1777     .set = prop_pmp_set,
1778 };
1779 
1780 static int priv_spec_from_str(const char *priv_spec_str)
1781 {
1782     int priv_version = -1;
1783 
1784     if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) {
1785         priv_version = PRIV_VERSION_1_13_0;
1786     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) {
1787         priv_version = PRIV_VERSION_1_12_0;
1788     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) {
1789         priv_version = PRIV_VERSION_1_11_0;
1790     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) {
1791         priv_version = PRIV_VERSION_1_10_0;
1792     }
1793 
1794     return priv_version;
1795 }
1796 
1797 const char *priv_spec_to_str(int priv_version)
1798 {
1799     switch (priv_version) {
1800     case PRIV_VERSION_1_10_0:
1801         return PRIV_VER_1_10_0_STR;
1802     case PRIV_VERSION_1_11_0:
1803         return PRIV_VER_1_11_0_STR;
1804     case PRIV_VERSION_1_12_0:
1805         return PRIV_VER_1_12_0_STR;
1806     case PRIV_VERSION_1_13_0:
1807         return PRIV_VER_1_13_0_STR;
1808     default:
1809         return NULL;
1810     }
1811 }
1812 
1813 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name,
1814                                void *opaque, Error **errp)
1815 {
1816     RISCVCPU *cpu = RISCV_CPU(obj);
1817     g_autofree char *value = NULL;
1818     int priv_version = -1;
1819 
1820     visit_type_str(v, name, &value, errp);
1821 
1822     priv_version = priv_spec_from_str(value);
1823     if (priv_version < 0) {
1824         error_setg(errp, "Unsupported privilege spec version '%s'", value);
1825         return;
1826     }
1827 
1828     if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) {
1829         cpu_set_prop_err(cpu, name, errp);
1830         error_append_hint(errp, "Current '%s' val: %s\n", name,
1831                           object_property_get_str(obj, name, NULL));
1832         return;
1833     }
1834 
1835     cpu_option_add_user_setting(name, priv_version);
1836     cpu->env.priv_ver = priv_version;
1837 }
1838 
1839 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name,
1840                                void *opaque, Error **errp)
1841 {
1842     RISCVCPU *cpu = RISCV_CPU(obj);
1843     const char *value = priv_spec_to_str(cpu->env.priv_ver);
1844 
1845     visit_type_str(v, name, (char **)&value, errp);
1846 }
1847 
1848 static const PropertyInfo prop_priv_spec = {
1849     .name = "priv_spec",
1850     .get = prop_priv_spec_get,
1851     .set = prop_priv_spec_set,
1852 };
1853 
1854 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name,
1855                                void *opaque, Error **errp)
1856 {
1857     RISCVCPU *cpu = RISCV_CPU(obj);
1858     g_autofree char *value = NULL;
1859 
1860     visit_type_str(v, name, &value, errp);
1861 
1862     if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) {
1863         error_setg(errp, "Unsupported vector spec version '%s'", value);
1864         return;
1865     }
1866 
1867     cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0);
1868     cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1869 }
1870 
1871 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name,
1872                                void *opaque, Error **errp)
1873 {
1874     const char *value = VEXT_VER_1_00_0_STR;
1875 
1876     visit_type_str(v, name, (char **)&value, errp);
1877 }
1878 
1879 static const PropertyInfo prop_vext_spec = {
1880     .name = "vext_spec",
1881     .get = prop_vext_spec_get,
1882     .set = prop_vext_spec_set,
1883 };
1884 
1885 static void prop_vlen_set(Object *obj, Visitor *v, const char *name,
1886                          void *opaque, Error **errp)
1887 {
1888     RISCVCPU *cpu = RISCV_CPU(obj);
1889     uint16_t value;
1890 
1891     if (!visit_type_uint16(v, name, &value, errp)) {
1892         return;
1893     }
1894 
1895     if (!is_power_of_2(value)) {
1896         error_setg(errp, "Vector extension VLEN must be power of 2");
1897         return;
1898     }
1899 
1900     if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) {
1901         cpu_set_prop_err(cpu, name, errp);
1902         error_append_hint(errp, "Current '%s' val: %u\n",
1903                           name, cpu->cfg.vlenb << 3);
1904         return;
1905     }
1906 
1907     cpu_option_add_user_setting(name, value);
1908     cpu->cfg.vlenb = value >> 3;
1909 }
1910 
1911 static void prop_vlen_get(Object *obj, Visitor *v, const char *name,
1912                          void *opaque, Error **errp)
1913 {
1914     uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3;
1915 
1916     visit_type_uint16(v, name, &value, errp);
1917 }
1918 
1919 static const PropertyInfo prop_vlen = {
1920     .name = "vlen",
1921     .get = prop_vlen_get,
1922     .set = prop_vlen_set,
1923 };
1924 
1925 static void prop_elen_set(Object *obj, Visitor *v, const char *name,
1926                          void *opaque, Error **errp)
1927 {
1928     RISCVCPU *cpu = RISCV_CPU(obj);
1929     uint16_t value;
1930 
1931     if (!visit_type_uint16(v, name, &value, errp)) {
1932         return;
1933     }
1934 
1935     if (!is_power_of_2(value)) {
1936         error_setg(errp, "Vector extension ELEN must be power of 2");
1937         return;
1938     }
1939 
1940     if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) {
1941         cpu_set_prop_err(cpu, name, errp);
1942         error_append_hint(errp, "Current '%s' val: %u\n",
1943                           name, cpu->cfg.elen);
1944         return;
1945     }
1946 
1947     cpu_option_add_user_setting(name, value);
1948     cpu->cfg.elen = value;
1949 }
1950 
1951 static void prop_elen_get(Object *obj, Visitor *v, const char *name,
1952                          void *opaque, Error **errp)
1953 {
1954     uint16_t value = RISCV_CPU(obj)->cfg.elen;
1955 
1956     visit_type_uint16(v, name, &value, errp);
1957 }
1958 
1959 static const PropertyInfo prop_elen = {
1960     .name = "elen",
1961     .get = prop_elen_get,
1962     .set = prop_elen_set,
1963 };
1964 
1965 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name,
1966                                   void *opaque, Error **errp)
1967 {
1968     RISCVCPU *cpu = RISCV_CPU(obj);
1969     uint16_t value;
1970 
1971     if (!visit_type_uint16(v, name, &value, errp)) {
1972         return;
1973     }
1974 
1975     if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) {
1976         cpu_set_prop_err(cpu, name, errp);
1977         error_append_hint(errp, "Current '%s' val: %u\n",
1978                           name, cpu->cfg.cbom_blocksize);
1979         return;
1980     }
1981 
1982     cpu_option_add_user_setting(name, value);
1983     cpu->cfg.cbom_blocksize = value;
1984 }
1985 
1986 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name,
1987                          void *opaque, Error **errp)
1988 {
1989     uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize;
1990 
1991     visit_type_uint16(v, name, &value, errp);
1992 }
1993 
1994 static const PropertyInfo prop_cbom_blksize = {
1995     .name = "cbom_blocksize",
1996     .get = prop_cbom_blksize_get,
1997     .set = prop_cbom_blksize_set,
1998 };
1999 
2000 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name,
2001                                   void *opaque, Error **errp)
2002 {
2003     RISCVCPU *cpu = RISCV_CPU(obj);
2004     uint16_t value;
2005 
2006     if (!visit_type_uint16(v, name, &value, errp)) {
2007         return;
2008     }
2009 
2010     if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) {
2011         cpu_set_prop_err(cpu, name, errp);
2012         error_append_hint(errp, "Current '%s' val: %u\n",
2013                           name, cpu->cfg.cbop_blocksize);
2014         return;
2015     }
2016 
2017     cpu_option_add_user_setting(name, value);
2018     cpu->cfg.cbop_blocksize = value;
2019 }
2020 
2021 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name,
2022                          void *opaque, Error **errp)
2023 {
2024     uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize;
2025 
2026     visit_type_uint16(v, name, &value, errp);
2027 }
2028 
2029 static const PropertyInfo prop_cbop_blksize = {
2030     .name = "cbop_blocksize",
2031     .get = prop_cbop_blksize_get,
2032     .set = prop_cbop_blksize_set,
2033 };
2034 
2035 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name,
2036                                   void *opaque, Error **errp)
2037 {
2038     RISCVCPU *cpu = RISCV_CPU(obj);
2039     uint16_t value;
2040 
2041     if (!visit_type_uint16(v, name, &value, errp)) {
2042         return;
2043     }
2044 
2045     if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) {
2046         cpu_set_prop_err(cpu, name, errp);
2047         error_append_hint(errp, "Current '%s' val: %u\n",
2048                           name, cpu->cfg.cboz_blocksize);
2049         return;
2050     }
2051 
2052     cpu_option_add_user_setting(name, value);
2053     cpu->cfg.cboz_blocksize = value;
2054 }
2055 
2056 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name,
2057                          void *opaque, Error **errp)
2058 {
2059     uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize;
2060 
2061     visit_type_uint16(v, name, &value, errp);
2062 }
2063 
2064 static const PropertyInfo prop_cboz_blksize = {
2065     .name = "cboz_blocksize",
2066     .get = prop_cboz_blksize_get,
2067     .set = prop_cboz_blksize_set,
2068 };
2069 
2070 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name,
2071                                void *opaque, Error **errp)
2072 {
2073     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2074     RISCVCPU *cpu = RISCV_CPU(obj);
2075     uint32_t prev_val = cpu->cfg.mvendorid;
2076     uint32_t value;
2077 
2078     if (!visit_type_uint32(v, name, &value, errp)) {
2079         return;
2080     }
2081 
2082     if (!dynamic_cpu && prev_val != value) {
2083         error_setg(errp, "Unable to change %s mvendorid (0x%x)",
2084                    object_get_typename(obj), prev_val);
2085         return;
2086     }
2087 
2088     cpu->cfg.mvendorid = value;
2089 }
2090 
2091 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name,
2092                                void *opaque, Error **errp)
2093 {
2094     uint32_t value = RISCV_CPU(obj)->cfg.mvendorid;
2095 
2096     visit_type_uint32(v, name, &value, errp);
2097 }
2098 
2099 static const PropertyInfo prop_mvendorid = {
2100     .name = "mvendorid",
2101     .get = prop_mvendorid_get,
2102     .set = prop_mvendorid_set,
2103 };
2104 
2105 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name,
2106                             void *opaque, Error **errp)
2107 {
2108     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2109     RISCVCPU *cpu = RISCV_CPU(obj);
2110     uint64_t prev_val = cpu->cfg.mimpid;
2111     uint64_t value;
2112 
2113     if (!visit_type_uint64(v, name, &value, errp)) {
2114         return;
2115     }
2116 
2117     if (!dynamic_cpu && prev_val != value) {
2118         error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")",
2119                    object_get_typename(obj), prev_val);
2120         return;
2121     }
2122 
2123     cpu->cfg.mimpid = value;
2124 }
2125 
2126 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name,
2127                             void *opaque, Error **errp)
2128 {
2129     uint64_t value = RISCV_CPU(obj)->cfg.mimpid;
2130 
2131     visit_type_uint64(v, name, &value, errp);
2132 }
2133 
2134 static const PropertyInfo prop_mimpid = {
2135     .name = "mimpid",
2136     .get = prop_mimpid_get,
2137     .set = prop_mimpid_set,
2138 };
2139 
2140 static void prop_marchid_set(Object *obj, Visitor *v, const char *name,
2141                              void *opaque, Error **errp)
2142 {
2143     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2144     RISCVCPU *cpu = RISCV_CPU(obj);
2145     uint64_t prev_val = cpu->cfg.marchid;
2146     uint64_t value, invalid_val;
2147     uint32_t mxlen = 0;
2148 
2149     if (!visit_type_uint64(v, name, &value, errp)) {
2150         return;
2151     }
2152 
2153     if (!dynamic_cpu && prev_val != value) {
2154         error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")",
2155                    object_get_typename(obj), prev_val);
2156         return;
2157     }
2158 
2159     switch (riscv_cpu_mxl(&cpu->env)) {
2160     case MXL_RV32:
2161         mxlen = 32;
2162         break;
2163     case MXL_RV64:
2164     case MXL_RV128:
2165         mxlen = 64;
2166         break;
2167     default:
2168         g_assert_not_reached();
2169     }
2170 
2171     invalid_val = 1LL << (mxlen - 1);
2172 
2173     if (value == invalid_val) {
2174         error_setg(errp, "Unable to set marchid with MSB (%u) bit set "
2175                          "and the remaining bits zero", mxlen);
2176         return;
2177     }
2178 
2179     cpu->cfg.marchid = value;
2180 }
2181 
2182 static void prop_marchid_get(Object *obj, Visitor *v, const char *name,
2183                              void *opaque, Error **errp)
2184 {
2185     uint64_t value = RISCV_CPU(obj)->cfg.marchid;
2186 
2187     visit_type_uint64(v, name, &value, errp);
2188 }
2189 
2190 static const PropertyInfo prop_marchid = {
2191     .name = "marchid",
2192     .get = prop_marchid_get,
2193     .set = prop_marchid_set,
2194 };
2195 
2196 /*
2197  * RVA22U64 defines some 'named features' that are cache
2198  * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa
2199  * and Zicclsm. They are always implemented in TCG and
2200  * doesn't need to be manually enabled by the profile.
2201  */
2202 static RISCVCPUProfile RVA22U64 = {
2203     .parent = NULL,
2204     .name = "rva22u64",
2205     .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU,
2206     .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
2207     .satp_mode = RISCV_PROFILE_ATTR_UNUSED,
2208     .ext_offsets = {
2209         CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause),
2210         CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb),
2211         CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin),
2212         CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr),
2213         CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom),
2214         CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz),
2215 
2216         /* mandatory named features for this profile */
2217         CPU_CFG_OFFSET(ext_zic64b),
2218 
2219         RISCV_PROFILE_EXT_LIST_END
2220     }
2221 };
2222 
2223 /*
2224  * As with RVA22U64, RVA22S64 also defines 'named features'.
2225  *
2226  * Cache related features that we consider enabled since we don't
2227  * implement cache: Ssccptr
2228  *
2229  * Other named features that we already implement: Sstvecd, Sstvala,
2230  * Sscounterenw
2231  *
2232  * The remaining features/extensions comes from RVA22U64.
2233  */
2234 static RISCVCPUProfile RVA22S64 = {
2235     .parent = &RVA22U64,
2236     .name = "rva22s64",
2237     .misa_ext = RVS,
2238     .priv_spec = PRIV_VERSION_1_12_0,
2239     .satp_mode = VM_1_10_SV39,
2240     .ext_offsets = {
2241         /* rva22s64 exts */
2242         CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt),
2243         CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade),
2244 
2245         RISCV_PROFILE_EXT_LIST_END
2246     }
2247 };
2248 
2249 RISCVCPUProfile *riscv_profiles[] = {
2250     &RVA22U64,
2251     &RVA22S64,
2252     NULL,
2253 };
2254 
2255 static RISCVCPUImpliedExtsRule RVA_IMPLIED = {
2256     .is_misa = true,
2257     .ext = RVA,
2258     .implied_multi_exts = {
2259         CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo),
2260 
2261         RISCV_IMPLIED_EXTS_RULE_END
2262     },
2263 };
2264 
2265 static RISCVCPUImpliedExtsRule RVD_IMPLIED = {
2266     .is_misa = true,
2267     .ext = RVD,
2268     .implied_misa_exts = RVF,
2269     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2270 };
2271 
2272 static RISCVCPUImpliedExtsRule RVF_IMPLIED = {
2273     .is_misa = true,
2274     .ext = RVF,
2275     .implied_multi_exts = {
2276         CPU_CFG_OFFSET(ext_zicsr),
2277 
2278         RISCV_IMPLIED_EXTS_RULE_END
2279     },
2280 };
2281 
2282 static RISCVCPUImpliedExtsRule RVM_IMPLIED = {
2283     .is_misa = true,
2284     .ext = RVM,
2285     .implied_multi_exts = {
2286         CPU_CFG_OFFSET(ext_zmmul),
2287 
2288         RISCV_IMPLIED_EXTS_RULE_END
2289     },
2290 };
2291 
2292 static RISCVCPUImpliedExtsRule RVV_IMPLIED = {
2293     .is_misa = true,
2294     .ext = RVV,
2295     .implied_multi_exts = {
2296         CPU_CFG_OFFSET(ext_zve64d),
2297 
2298         RISCV_IMPLIED_EXTS_RULE_END
2299     },
2300 };
2301 
2302 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = {
2303     .ext = CPU_CFG_OFFSET(ext_zcb),
2304     .implied_multi_exts = {
2305         CPU_CFG_OFFSET(ext_zca),
2306 
2307         RISCV_IMPLIED_EXTS_RULE_END
2308     },
2309 };
2310 
2311 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = {
2312     .ext = CPU_CFG_OFFSET(ext_zcd),
2313     .implied_misa_exts = RVD,
2314     .implied_multi_exts = {
2315         CPU_CFG_OFFSET(ext_zca),
2316 
2317         RISCV_IMPLIED_EXTS_RULE_END
2318     },
2319 };
2320 
2321 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = {
2322     .ext = CPU_CFG_OFFSET(ext_zce),
2323     .implied_multi_exts = {
2324         CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp),
2325         CPU_CFG_OFFSET(ext_zcmt),
2326 
2327         RISCV_IMPLIED_EXTS_RULE_END
2328     },
2329 };
2330 
2331 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = {
2332     .ext = CPU_CFG_OFFSET(ext_zcf),
2333     .implied_misa_exts = RVF,
2334     .implied_multi_exts = {
2335         CPU_CFG_OFFSET(ext_zca),
2336 
2337         RISCV_IMPLIED_EXTS_RULE_END
2338     },
2339 };
2340 
2341 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = {
2342     .ext = CPU_CFG_OFFSET(ext_zcmp),
2343     .implied_multi_exts = {
2344         CPU_CFG_OFFSET(ext_zca),
2345 
2346         RISCV_IMPLIED_EXTS_RULE_END
2347     },
2348 };
2349 
2350 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = {
2351     .ext = CPU_CFG_OFFSET(ext_zcmt),
2352     .implied_multi_exts = {
2353         CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr),
2354 
2355         RISCV_IMPLIED_EXTS_RULE_END
2356     },
2357 };
2358 
2359 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = {
2360     .ext = CPU_CFG_OFFSET(ext_zdinx),
2361     .implied_multi_exts = {
2362         CPU_CFG_OFFSET(ext_zfinx),
2363 
2364         RISCV_IMPLIED_EXTS_RULE_END
2365     },
2366 };
2367 
2368 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = {
2369     .ext = CPU_CFG_OFFSET(ext_zfa),
2370     .implied_misa_exts = RVF,
2371     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2372 };
2373 
2374 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = {
2375     .ext = CPU_CFG_OFFSET(ext_zfbfmin),
2376     .implied_misa_exts = RVF,
2377     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2378 };
2379 
2380 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = {
2381     .ext = CPU_CFG_OFFSET(ext_zfh),
2382     .implied_multi_exts = {
2383         CPU_CFG_OFFSET(ext_zfhmin),
2384 
2385         RISCV_IMPLIED_EXTS_RULE_END
2386     },
2387 };
2388 
2389 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = {
2390     .ext = CPU_CFG_OFFSET(ext_zfhmin),
2391     .implied_misa_exts = RVF,
2392     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2393 };
2394 
2395 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = {
2396     .ext = CPU_CFG_OFFSET(ext_zfinx),
2397     .implied_multi_exts = {
2398         CPU_CFG_OFFSET(ext_zicsr),
2399 
2400         RISCV_IMPLIED_EXTS_RULE_END
2401     },
2402 };
2403 
2404 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = {
2405     .ext = CPU_CFG_OFFSET(ext_zhinx),
2406     .implied_multi_exts = {
2407         CPU_CFG_OFFSET(ext_zhinxmin),
2408 
2409         RISCV_IMPLIED_EXTS_RULE_END
2410     },
2411 };
2412 
2413 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = {
2414     .ext = CPU_CFG_OFFSET(ext_zhinxmin),
2415     .implied_multi_exts = {
2416         CPU_CFG_OFFSET(ext_zfinx),
2417 
2418         RISCV_IMPLIED_EXTS_RULE_END
2419     },
2420 };
2421 
2422 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = {
2423     .ext = CPU_CFG_OFFSET(ext_zicntr),
2424     .implied_multi_exts = {
2425         CPU_CFG_OFFSET(ext_zicsr),
2426 
2427         RISCV_IMPLIED_EXTS_RULE_END
2428     },
2429 };
2430 
2431 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = {
2432     .ext = CPU_CFG_OFFSET(ext_zihpm),
2433     .implied_multi_exts = {
2434         CPU_CFG_OFFSET(ext_zicsr),
2435 
2436         RISCV_IMPLIED_EXTS_RULE_END
2437     },
2438 };
2439 
2440 static RISCVCPUImpliedExtsRule ZK_IMPLIED = {
2441     .ext = CPU_CFG_OFFSET(ext_zk),
2442     .implied_multi_exts = {
2443         CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr),
2444         CPU_CFG_OFFSET(ext_zkt),
2445 
2446         RISCV_IMPLIED_EXTS_RULE_END
2447     },
2448 };
2449 
2450 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = {
2451     .ext = CPU_CFG_OFFSET(ext_zkn),
2452     .implied_multi_exts = {
2453         CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
2454         CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne),
2455         CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh),
2456 
2457         RISCV_IMPLIED_EXTS_RULE_END
2458     },
2459 };
2460 
2461 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = {
2462     .ext = CPU_CFG_OFFSET(ext_zks),
2463     .implied_multi_exts = {
2464         CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
2465         CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed),
2466         CPU_CFG_OFFSET(ext_zksh),
2467 
2468         RISCV_IMPLIED_EXTS_RULE_END
2469     },
2470 };
2471 
2472 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = {
2473     .ext = CPU_CFG_OFFSET(ext_zvbb),
2474     .implied_multi_exts = {
2475         CPU_CFG_OFFSET(ext_zvkb),
2476 
2477         RISCV_IMPLIED_EXTS_RULE_END
2478     },
2479 };
2480 
2481 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = {
2482     .ext = CPU_CFG_OFFSET(ext_zve32f),
2483     .implied_misa_exts = RVF,
2484     .implied_multi_exts = {
2485         CPU_CFG_OFFSET(ext_zve32x),
2486 
2487         RISCV_IMPLIED_EXTS_RULE_END
2488     },
2489 };
2490 
2491 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = {
2492     .ext = CPU_CFG_OFFSET(ext_zve32x),
2493     .implied_multi_exts = {
2494         CPU_CFG_OFFSET(ext_zicsr),
2495 
2496         RISCV_IMPLIED_EXTS_RULE_END
2497     },
2498 };
2499 
2500 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = {
2501     .ext = CPU_CFG_OFFSET(ext_zve64d),
2502     .implied_misa_exts = RVD,
2503     .implied_multi_exts = {
2504         CPU_CFG_OFFSET(ext_zve64f),
2505 
2506         RISCV_IMPLIED_EXTS_RULE_END
2507     },
2508 };
2509 
2510 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = {
2511     .ext = CPU_CFG_OFFSET(ext_zve64f),
2512     .implied_misa_exts = RVF,
2513     .implied_multi_exts = {
2514         CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x),
2515 
2516         RISCV_IMPLIED_EXTS_RULE_END
2517     },
2518 };
2519 
2520 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = {
2521     .ext = CPU_CFG_OFFSET(ext_zve64x),
2522     .implied_multi_exts = {
2523         CPU_CFG_OFFSET(ext_zve32x),
2524 
2525         RISCV_IMPLIED_EXTS_RULE_END
2526     },
2527 };
2528 
2529 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = {
2530     .ext = CPU_CFG_OFFSET(ext_zvfbfmin),
2531     .implied_multi_exts = {
2532         CPU_CFG_OFFSET(ext_zve32f),
2533 
2534         RISCV_IMPLIED_EXTS_RULE_END
2535     },
2536 };
2537 
2538 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = {
2539     .ext = CPU_CFG_OFFSET(ext_zvfbfwma),
2540     .implied_multi_exts = {
2541         CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin),
2542 
2543         RISCV_IMPLIED_EXTS_RULE_END
2544     },
2545 };
2546 
2547 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = {
2548     .ext = CPU_CFG_OFFSET(ext_zvfh),
2549     .implied_multi_exts = {
2550         CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin),
2551 
2552         RISCV_IMPLIED_EXTS_RULE_END
2553     },
2554 };
2555 
2556 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = {
2557     .ext = CPU_CFG_OFFSET(ext_zvfhmin),
2558     .implied_multi_exts = {
2559         CPU_CFG_OFFSET(ext_zve32f),
2560 
2561         RISCV_IMPLIED_EXTS_RULE_END
2562     },
2563 };
2564 
2565 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = {
2566     .ext = CPU_CFG_OFFSET(ext_zvkn),
2567     .implied_multi_exts = {
2568         CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb),
2569         CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
2570 
2571         RISCV_IMPLIED_EXTS_RULE_END
2572     },
2573 };
2574 
2575 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = {
2576     .ext = CPU_CFG_OFFSET(ext_zvknc),
2577     .implied_multi_exts = {
2578         CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc),
2579 
2580         RISCV_IMPLIED_EXTS_RULE_END
2581     },
2582 };
2583 
2584 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = {
2585     .ext = CPU_CFG_OFFSET(ext_zvkng),
2586     .implied_multi_exts = {
2587         CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg),
2588 
2589         RISCV_IMPLIED_EXTS_RULE_END
2590     },
2591 };
2592 
2593 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = {
2594     .ext = CPU_CFG_OFFSET(ext_zvknhb),
2595     .implied_multi_exts = {
2596         CPU_CFG_OFFSET(ext_zve64x),
2597 
2598         RISCV_IMPLIED_EXTS_RULE_END
2599     },
2600 };
2601 
2602 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = {
2603     .ext = CPU_CFG_OFFSET(ext_zvks),
2604     .implied_multi_exts = {
2605         CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh),
2606         CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
2607 
2608         RISCV_IMPLIED_EXTS_RULE_END
2609     },
2610 };
2611 
2612 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = {
2613     .ext = CPU_CFG_OFFSET(ext_zvksc),
2614     .implied_multi_exts = {
2615         CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc),
2616 
2617         RISCV_IMPLIED_EXTS_RULE_END
2618     },
2619 };
2620 
2621 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = {
2622     .ext = CPU_CFG_OFFSET(ext_zvksg),
2623     .implied_multi_exts = {
2624         CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg),
2625 
2626         RISCV_IMPLIED_EXTS_RULE_END
2627     },
2628 };
2629 
2630 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = {
2631     &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED,
2632     &RVM_IMPLIED, &RVV_IMPLIED, NULL
2633 };
2634 
2635 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = {
2636     &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED,
2637     &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED,
2638     &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED,
2639     &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED,
2640     &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED,
2641     &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED,
2642     &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED,
2643     &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED,
2644     &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED,
2645     &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED,
2646     &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED,
2647     &ZVKS_IMPLIED,  &ZVKSC_IMPLIED, &ZVKSG_IMPLIED,
2648     NULL
2649 };
2650 
2651 static Property riscv_cpu_properties[] = {
2652     DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
2653 
2654     {.name = "pmu-mask", .info = &prop_pmu_mask},
2655     {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */
2656 
2657     {.name = "mmu", .info = &prop_mmu},
2658     {.name = "pmp", .info = &prop_pmp},
2659 
2660     {.name = "priv_spec", .info = &prop_priv_spec},
2661     {.name = "vext_spec", .info = &prop_vext_spec},
2662 
2663     {.name = "vlen", .info = &prop_vlen},
2664     {.name = "elen", .info = &prop_elen},
2665 
2666     {.name = "cbom_blocksize", .info = &prop_cbom_blksize},
2667     {.name = "cbop_blocksize", .info = &prop_cbop_blksize},
2668     {.name = "cboz_blocksize", .info = &prop_cboz_blksize},
2669 
2670      {.name = "mvendorid", .info = &prop_mvendorid},
2671      {.name = "mimpid", .info = &prop_mimpid},
2672      {.name = "marchid", .info = &prop_marchid},
2673 
2674 #ifndef CONFIG_USER_ONLY
2675     DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
2676 #endif
2677 
2678     DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
2679 
2680     DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
2681     DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
2682 
2683     /*
2684      * write_misa() is marked as experimental for now so mark
2685      * it with -x and default to 'false'.
2686      */
2687     DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
2688     DEFINE_PROP_END_OF_LIST(),
2689 };
2690 
2691 #if defined(TARGET_RISCV64)
2692 static void rva22u64_profile_cpu_init(Object *obj)
2693 {
2694     rv64i_bare_cpu_init(obj);
2695 
2696     RVA22U64.enabled = true;
2697 }
2698 
2699 static void rva22s64_profile_cpu_init(Object *obj)
2700 {
2701     rv64i_bare_cpu_init(obj);
2702 
2703     RVA22S64.enabled = true;
2704 }
2705 #endif
2706 
2707 static const gchar *riscv_gdb_arch_name(CPUState *cs)
2708 {
2709     RISCVCPU *cpu = RISCV_CPU(cs);
2710     CPURISCVState *env = &cpu->env;
2711 
2712     switch (riscv_cpu_mxl(env)) {
2713     case MXL_RV32:
2714         return "riscv:rv32";
2715     case MXL_RV64:
2716     case MXL_RV128:
2717         return "riscv:rv64";
2718     default:
2719         g_assert_not_reached();
2720     }
2721 }
2722 
2723 #ifndef CONFIG_USER_ONLY
2724 static int64_t riscv_get_arch_id(CPUState *cs)
2725 {
2726     RISCVCPU *cpu = RISCV_CPU(cs);
2727 
2728     return cpu->env.mhartid;
2729 }
2730 
2731 #include "hw/core/sysemu-cpu-ops.h"
2732 
2733 static const struct SysemuCPUOps riscv_sysemu_ops = {
2734     .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
2735     .write_elf64_note = riscv_cpu_write_elf64_note,
2736     .write_elf32_note = riscv_cpu_write_elf32_note,
2737     .legacy_vmsd = &vmstate_riscv_cpu,
2738 };
2739 #endif
2740 
2741 static void riscv_cpu_common_class_init(ObjectClass *c, void *data)
2742 {
2743     RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2744     CPUClass *cc = CPU_CLASS(c);
2745     DeviceClass *dc = DEVICE_CLASS(c);
2746     ResettableClass *rc = RESETTABLE_CLASS(c);
2747 
2748     device_class_set_parent_realize(dc, riscv_cpu_realize,
2749                                     &mcc->parent_realize);
2750 
2751     resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL,
2752                                        &mcc->parent_phases);
2753 
2754     cc->class_by_name = riscv_cpu_class_by_name;
2755     cc->has_work = riscv_cpu_has_work;
2756     cc->mmu_index = riscv_cpu_mmu_index;
2757     cc->dump_state = riscv_cpu_dump_state;
2758     cc->set_pc = riscv_cpu_set_pc;
2759     cc->get_pc = riscv_cpu_get_pc;
2760     cc->gdb_read_register = riscv_cpu_gdb_read_register;
2761     cc->gdb_write_register = riscv_cpu_gdb_write_register;
2762     cc->gdb_stop_before_watchpoint = true;
2763     cc->disas_set_info = riscv_cpu_disas_set_info;
2764 #ifndef CONFIG_USER_ONLY
2765     cc->sysemu_ops = &riscv_sysemu_ops;
2766     cc->get_arch_id = riscv_get_arch_id;
2767 #endif
2768     cc->gdb_arch_name = riscv_gdb_arch_name;
2769 
2770     device_class_set_props(dc, riscv_cpu_properties);
2771 }
2772 
2773 static void riscv_cpu_class_init(ObjectClass *c, void *data)
2774 {
2775     RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2776 
2777     mcc->misa_mxl_max = (uint32_t)(uintptr_t)data;
2778     riscv_cpu_validate_misa_mxl(mcc);
2779 }
2780 
2781 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
2782                                  int max_str_len)
2783 {
2784     const RISCVIsaExtData *edata;
2785     char *old = *isa_str;
2786     char *new = *isa_str;
2787 
2788     for (edata = isa_edata_arr; edata && edata->name; edata++) {
2789         if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2790             new = g_strconcat(old, "_", edata->name, NULL);
2791             g_free(old);
2792             old = new;
2793         }
2794     }
2795 
2796     *isa_str = new;
2797 }
2798 
2799 char *riscv_isa_string(RISCVCPU *cpu)
2800 {
2801     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
2802     int i;
2803     const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
2804     char *isa_str = g_new(char, maxlen);
2805     int xlen = riscv_cpu_max_xlen(mcc);
2806     char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen);
2807 
2808     for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2809         if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2810             *p++ = qemu_tolower(riscv_single_letter_exts[i]);
2811         }
2812     }
2813     *p = '\0';
2814     if (!cpu->cfg.short_isa_string) {
2815         riscv_isa_string_ext(cpu, &isa_str, maxlen);
2816     }
2817     return isa_str;
2818 }
2819 
2820 #ifndef CONFIG_USER_ONLY
2821 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count)
2822 {
2823     int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr);
2824     char **extensions = g_new(char *, maxlen);
2825 
2826     for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2827         if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2828             extensions[*count] = g_new(char, 2);
2829             snprintf(extensions[*count], 2, "%c",
2830                      qemu_tolower(riscv_single_letter_exts[i]));
2831             (*count)++;
2832         }
2833     }
2834 
2835     for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) {
2836         if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2837             extensions[*count] = g_strdup(edata->name);
2838             (*count)++;
2839         }
2840     }
2841 
2842     return extensions;
2843 }
2844 
2845 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename)
2846 {
2847     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
2848     const size_t maxlen = sizeof("rv128i");
2849     g_autofree char *isa_base = g_new(char, maxlen);
2850     g_autofree char *riscv_isa;
2851     char **isa_extensions;
2852     int count = 0;
2853     int xlen = riscv_cpu_max_xlen(mcc);
2854 
2855     riscv_isa = riscv_isa_string(cpu);
2856     qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa);
2857 
2858     snprintf(isa_base, maxlen, "rv%di", xlen);
2859     qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base);
2860 
2861     isa_extensions = riscv_isa_extensions_list(cpu, &count);
2862     qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions",
2863                                   isa_extensions, count);
2864 
2865     for (int i = 0; i < count; i++) {
2866         g_free(isa_extensions[i]);
2867     }
2868 
2869     g_free(isa_extensions);
2870 }
2871 #endif
2872 
2873 #define DEFINE_CPU(type_name, misa_mxl_max, initfn)         \
2874     {                                                       \
2875         .name = (type_name),                                \
2876         .parent = TYPE_RISCV_CPU,                           \
2877         .instance_init = (initfn),                          \
2878         .class_init = riscv_cpu_class_init,                 \
2879         .class_data = (void *)(misa_mxl_max)                \
2880     }
2881 
2882 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \
2883     {                                                       \
2884         .name = (type_name),                                \
2885         .parent = TYPE_RISCV_DYNAMIC_CPU,                   \
2886         .instance_init = (initfn),                          \
2887         .class_init = riscv_cpu_class_init,                 \
2888         .class_data = (void *)(misa_mxl_max)                \
2889     }
2890 
2891 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn)  \
2892     {                                                       \
2893         .name = (type_name),                                \
2894         .parent = TYPE_RISCV_VENDOR_CPU,                    \
2895         .instance_init = (initfn),                          \
2896         .class_init = riscv_cpu_class_init,                 \
2897         .class_data = (void *)(misa_mxl_max)                \
2898     }
2899 
2900 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn)    \
2901     {                                                       \
2902         .name = (type_name),                                \
2903         .parent = TYPE_RISCV_BARE_CPU,                      \
2904         .instance_init = (initfn),                          \
2905         .class_init = riscv_cpu_class_init,                 \
2906         .class_data = (void *)(misa_mxl_max)                \
2907     }
2908 
2909 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \
2910     {                                                       \
2911         .name = (type_name),                                \
2912         .parent = TYPE_RISCV_BARE_CPU,                      \
2913         .instance_init = (initfn),                          \
2914         .class_init = riscv_cpu_class_init,                 \
2915         .class_data = (void *)(misa_mxl_max)                \
2916     }
2917 
2918 static const TypeInfo riscv_cpu_type_infos[] = {
2919     {
2920         .name = TYPE_RISCV_CPU,
2921         .parent = TYPE_CPU,
2922         .instance_size = sizeof(RISCVCPU),
2923         .instance_align = __alignof(RISCVCPU),
2924         .instance_init = riscv_cpu_init,
2925         .instance_post_init = riscv_cpu_post_init,
2926         .abstract = true,
2927         .class_size = sizeof(RISCVCPUClass),
2928         .class_init = riscv_cpu_common_class_init,
2929     },
2930     {
2931         .name = TYPE_RISCV_DYNAMIC_CPU,
2932         .parent = TYPE_RISCV_CPU,
2933         .abstract = true,
2934     },
2935     {
2936         .name = TYPE_RISCV_VENDOR_CPU,
2937         .parent = TYPE_RISCV_CPU,
2938         .abstract = true,
2939     },
2940     {
2941         .name = TYPE_RISCV_BARE_CPU,
2942         .parent = TYPE_RISCV_CPU,
2943         .instance_init = riscv_bare_cpu_init,
2944         .abstract = true,
2945     },
2946 #if defined(TARGET_RISCV32)
2947     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY,       MXL_RV32,  riscv_any_cpu_init),
2948     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX,       MXL_RV32,  riscv_max_cpu_init),
2949     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32,    MXL_RV32,  rv32_base_cpu_init),
2950     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX,       MXL_RV32,  rv32_ibex_cpu_init),
2951     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32,  rv32_sifive_e_cpu_init),
2952     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32,  rv32_imafcu_nommu_cpu_init),
2953     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32,  rv32_sifive_u_cpu_init),
2954     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I,        MXL_RV32,  rv32i_bare_cpu_init),
2955     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E,        MXL_RV32,  rv32e_bare_cpu_init),
2956 #elif defined(TARGET_RISCV64)
2957     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY,       MXL_RV64,  riscv_any_cpu_init),
2958     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX,       MXL_RV64,  riscv_max_cpu_init),
2959     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64,    MXL_RV64,  rv64_base_cpu_init),
2960     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64,  rv64_sifive_e_cpu_init),
2961     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64,  rv64_sifive_u_cpu_init),
2962     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C,   MXL_RV64,  rv64_sifive_u_cpu_init),
2963     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64,  rv64_thead_c906_cpu_init),
2964     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1,  MXL_RV64,  rv64_veyron_v1_cpu_init),
2965 #ifdef CONFIG_TCG
2966     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128,   MXL_RV128, rv128_base_cpu_init),
2967 #endif /* CONFIG_TCG */
2968     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I,        MXL_RV64,  rv64i_bare_cpu_init),
2969     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E,        MXL_RV64,  rv64e_bare_cpu_init),
2970     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64,  MXL_RV64,  rva22u64_profile_cpu_init),
2971     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64,  MXL_RV64,  rva22s64_profile_cpu_init),
2972 #endif /* TARGET_RISCV64 */
2973 };
2974 
2975 DEFINE_TYPES(riscv_cpu_type_infos)
2976