xref: /openbmc/qemu/target/riscv/cpu.c (revision 2f95279a)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "cpu_vendorid.h"
26 #include "internals.h"
27 #include "exec/exec-all.h"
28 #include "qapi/error.h"
29 #include "qapi/visitor.h"
30 #include "qemu/error-report.h"
31 #include "hw/qdev-properties.h"
32 #include "hw/core/qdev-prop-internal.h"
33 #include "migration/vmstate.h"
34 #include "fpu/softfloat-helpers.h"
35 #include "sysemu/device_tree.h"
36 #include "sysemu/kvm.h"
37 #include "sysemu/tcg.h"
38 #include "kvm/kvm_riscv.h"
39 #include "tcg/tcg-cpu.h"
40 #include "tcg/tcg.h"
41 
42 /* RISC-V CPU definitions */
43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH";
44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
45                               RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0};
46 
47 /*
48  * From vector_helper.c
49  * Note that vector data is stored in host-endian 64-bit chunks,
50  * so addressing bytes needs a host-endian fixup.
51  */
52 #if HOST_BIG_ENDIAN
53 #define BYTE(x)   ((x) ^ 7)
54 #else
55 #define BYTE(x)   (x)
56 #endif
57 
58 bool riscv_cpu_is_32bit(RISCVCPU *cpu)
59 {
60     return riscv_cpu_mxl(&cpu->env) == MXL_RV32;
61 }
62 
63 /* Hash that stores general user set numeric options */
64 static GHashTable *general_user_opts;
65 
66 static void cpu_option_add_user_setting(const char *optname, uint32_t value)
67 {
68     g_hash_table_insert(general_user_opts, (gpointer)optname,
69                         GUINT_TO_POINTER(value));
70 }
71 
72 bool riscv_cpu_option_set(const char *optname)
73 {
74     return g_hash_table_contains(general_user_opts, optname);
75 }
76 
77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
78     {#_name, _min_ver, CPU_CFG_OFFSET(_prop)}
79 
80 /*
81  * Here are the ordering rules of extension naming defined by RISC-V
82  * specification :
83  * 1. All extensions should be separated from other multi-letter extensions
84  *    by an underscore.
85  * 2. The first letter following the 'Z' conventionally indicates the most
86  *    closely related alphabetical extension category, IMAFDQLCBKJTPVH.
87  *    If multiple 'Z' extensions are named, they should be ordered first
88  *    by category, then alphabetically within a category.
89  * 3. Standard supervisor-level extensions (starts with 'S') should be
90  *    listed after standard unprivileged extensions.  If multiple
91  *    supervisor-level extensions are listed, they should be ordered
92  *    alphabetically.
93  * 4. Non-standard extensions (starts with 'X') must be listed after all
94  *    standard extensions. They must be separated from other multi-letter
95  *    extensions by an underscore.
96  *
97  * Single letter extensions are checked in riscv_cpu_validate_misa_priv()
98  * instead.
99  */
100 const RISCVIsaExtData isa_edata_arr[] = {
101     ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b),
102     ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom),
103     ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop),
104     ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz),
105     ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11),
106     ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11),
107     ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11),
108     ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11),
109     ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
110     ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr),
111     ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr),
112     ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei),
113     ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl),
114     ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
115     ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm),
116     ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
117     ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_11),
118     ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo),
119     ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
120     ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc),
121     ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
122     ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
123     ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
124     ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh),
125     ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin),
126     ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx),
127     ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx),
128     ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca),
129     ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb),
130     ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf),
131     ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd),
132     ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce),
133     ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp),
134     ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt),
135     ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba),
136     ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb),
137     ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc),
138     ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb),
139     ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc),
140     ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx),
141     ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs),
142     ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk),
143     ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn),
144     ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd),
145     ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne),
146     ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh),
147     ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr),
148     ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks),
149     ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
150     ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
151     ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
152     ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso),
153     ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb),
154     ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
155     ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
156     ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x),
157     ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
158     ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
159     ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x),
160     ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin),
161     ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
162     ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
163     ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
164     ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb),
165     ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg),
166     ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn),
167     ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc),
168     ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
169     ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng),
170     ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
171     ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
172     ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks),
173     ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc),
174     ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed),
175     ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg),
176     ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
177     ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt),
178     ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
179     ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
180     ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
181     ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
182     ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
183     ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
184     ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11),
185     ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
186     ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
187     ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
188     ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
189     ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
190     ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade),
191     ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
192     ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
193     ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
194     ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt),
195     ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba),
196     ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb),
197     ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs),
198     ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo),
199     ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov),
200     ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx),
201     ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv),
202     ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac),
203     ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx),
204     ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair),
205     ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync),
206     ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
207 
208     DEFINE_PROP_END_OF_LIST(),
209 };
210 
211 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset)
212 {
213     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
214 
215     return *ext_enabled;
216 }
217 
218 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en)
219 {
220     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
221 
222     *ext_enabled = en;
223 }
224 
225 bool riscv_cpu_is_vendor(Object *cpu_obj)
226 {
227     return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL;
228 }
229 
230 const char * const riscv_int_regnames[] = {
231     "x0/zero", "x1/ra",  "x2/sp",  "x3/gp",  "x4/tp",  "x5/t0",   "x6/t1",
232     "x7/t2",   "x8/s0",  "x9/s1",  "x10/a0", "x11/a1", "x12/a2",  "x13/a3",
233     "x14/a4",  "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3",  "x20/s4",
234     "x21/s5",  "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
235     "x28/t3",  "x29/t4", "x30/t5", "x31/t6"
236 };
237 
238 const char * const riscv_int_regnamesh[] = {
239     "x0h/zeroh", "x1h/rah",  "x2h/sph",   "x3h/gph",   "x4h/tph",  "x5h/t0h",
240     "x6h/t1h",   "x7h/t2h",  "x8h/s0h",   "x9h/s1h",   "x10h/a0h", "x11h/a1h",
241     "x12h/a2h",  "x13h/a3h", "x14h/a4h",  "x15h/a5h",  "x16h/a6h", "x17h/a7h",
242     "x18h/s2h",  "x19h/s3h", "x20h/s4h",  "x21h/s5h",  "x22h/s6h", "x23h/s7h",
243     "x24h/s8h",  "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
244     "x30h/t5h",  "x31h/t6h"
245 };
246 
247 const char * const riscv_fpr_regnames[] = {
248     "f0/ft0",   "f1/ft1",  "f2/ft2",   "f3/ft3",   "f4/ft4",  "f5/ft5",
249     "f6/ft6",   "f7/ft7",  "f8/fs0",   "f9/fs1",   "f10/fa0", "f11/fa1",
250     "f12/fa2",  "f13/fa3", "f14/fa4",  "f15/fa5",  "f16/fa6", "f17/fa7",
251     "f18/fs2",  "f19/fs3", "f20/fs4",  "f21/fs5",  "f22/fs6", "f23/fs7",
252     "f24/fs8",  "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
253     "f30/ft10", "f31/ft11"
254 };
255 
256 const char * const riscv_rvv_regnames[] = {
257   "v0",  "v1",  "v2",  "v3",  "v4",  "v5",  "v6",
258   "v7",  "v8",  "v9",  "v10", "v11", "v12", "v13",
259   "v14", "v15", "v16", "v17", "v18", "v19", "v20",
260   "v21", "v22", "v23", "v24", "v25", "v26", "v27",
261   "v28", "v29", "v30", "v31"
262 };
263 
264 static const char * const riscv_excp_names[] = {
265     "misaligned_fetch",
266     "fault_fetch",
267     "illegal_instruction",
268     "breakpoint",
269     "misaligned_load",
270     "fault_load",
271     "misaligned_store",
272     "fault_store",
273     "user_ecall",
274     "supervisor_ecall",
275     "hypervisor_ecall",
276     "machine_ecall",
277     "exec_page_fault",
278     "load_page_fault",
279     "reserved",
280     "store_page_fault",
281     "reserved",
282     "reserved",
283     "reserved",
284     "reserved",
285     "guest_exec_page_fault",
286     "guest_load_page_fault",
287     "reserved",
288     "guest_store_page_fault",
289 };
290 
291 static const char * const riscv_intr_names[] = {
292     "u_software",
293     "s_software",
294     "vs_software",
295     "m_software",
296     "u_timer",
297     "s_timer",
298     "vs_timer",
299     "m_timer",
300     "u_external",
301     "s_external",
302     "vs_external",
303     "m_external",
304     "reserved",
305     "reserved",
306     "reserved",
307     "reserved"
308 };
309 
310 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
311 {
312     if (async) {
313         return (cause < ARRAY_SIZE(riscv_intr_names)) ?
314                riscv_intr_names[cause] : "(unknown)";
315     } else {
316         return (cause < ARRAY_SIZE(riscv_excp_names)) ?
317                riscv_excp_names[cause] : "(unknown)";
318     }
319 }
320 
321 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext)
322 {
323     env->misa_ext_mask = env->misa_ext = ext;
324 }
325 
326 int riscv_cpu_max_xlen(RISCVCPUClass *mcc)
327 {
328     return 16 << mcc->misa_mxl_max;
329 }
330 
331 #ifndef CONFIG_USER_ONLY
332 static uint8_t satp_mode_from_str(const char *satp_mode_str)
333 {
334     if (!strncmp(satp_mode_str, "mbare", 5)) {
335         return VM_1_10_MBARE;
336     }
337 
338     if (!strncmp(satp_mode_str, "sv32", 4)) {
339         return VM_1_10_SV32;
340     }
341 
342     if (!strncmp(satp_mode_str, "sv39", 4)) {
343         return VM_1_10_SV39;
344     }
345 
346     if (!strncmp(satp_mode_str, "sv48", 4)) {
347         return VM_1_10_SV48;
348     }
349 
350     if (!strncmp(satp_mode_str, "sv57", 4)) {
351         return VM_1_10_SV57;
352     }
353 
354     if (!strncmp(satp_mode_str, "sv64", 4)) {
355         return VM_1_10_SV64;
356     }
357 
358     g_assert_not_reached();
359 }
360 
361 uint8_t satp_mode_max_from_map(uint32_t map)
362 {
363     /*
364      * 'map = 0' will make us return (31 - 32), which C will
365      * happily overflow to UINT_MAX. There's no good result to
366      * return if 'map = 0' (e.g. returning 0 will be ambiguous
367      * with the result for 'map = 1').
368      *
369      * Assert out if map = 0. Callers will have to deal with
370      * it outside of this function.
371      */
372     g_assert(map > 0);
373 
374     /* map here has at least one bit set, so no problem with clz */
375     return 31 - __builtin_clz(map);
376 }
377 
378 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
379 {
380     if (is_32_bit) {
381         switch (satp_mode) {
382         case VM_1_10_SV32:
383             return "sv32";
384         case VM_1_10_MBARE:
385             return "none";
386         }
387     } else {
388         switch (satp_mode) {
389         case VM_1_10_SV64:
390             return "sv64";
391         case VM_1_10_SV57:
392             return "sv57";
393         case VM_1_10_SV48:
394             return "sv48";
395         case VM_1_10_SV39:
396             return "sv39";
397         case VM_1_10_MBARE:
398             return "none";
399         }
400     }
401 
402     g_assert_not_reached();
403 }
404 
405 static void set_satp_mode_max_supported(RISCVCPU *cpu,
406                                         uint8_t satp_mode)
407 {
408     bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
409     const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
410 
411     for (int i = 0; i <= satp_mode; ++i) {
412         if (valid_vm[i]) {
413             cpu->cfg.satp_mode.supported |= (1 << i);
414         }
415     }
416 }
417 
418 /* Set the satp mode to the max supported */
419 static void set_satp_mode_default_map(RISCVCPU *cpu)
420 {
421     /*
422      * Bare CPUs do not default to the max available.
423      * Users must set a valid satp_mode in the command
424      * line.
425      */
426     if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) {
427         warn_report("No satp mode set. Defaulting to 'bare'");
428         cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE);
429         return;
430     }
431 
432     cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
433 }
434 #endif
435 
436 static void riscv_any_cpu_init(Object *obj)
437 {
438     RISCVCPU *cpu = RISCV_CPU(obj);
439     CPURISCVState *env = &cpu->env;
440     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
441 
442 #ifndef CONFIG_USER_ONLY
443     set_satp_mode_max_supported(RISCV_CPU(obj),
444         riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ?
445         VM_1_10_SV32 : VM_1_10_SV57);
446 #endif
447 
448     env->priv_ver = PRIV_VERSION_LATEST;
449 
450     /* inherited from parent obj via riscv_cpu_init() */
451     cpu->cfg.ext_zifencei = true;
452     cpu->cfg.ext_zicsr = true;
453     cpu->cfg.mmu = true;
454     cpu->cfg.pmp = true;
455 }
456 
457 static void riscv_max_cpu_init(Object *obj)
458 {
459     RISCVCPU *cpu = RISCV_CPU(obj);
460     CPURISCVState *env = &cpu->env;
461 
462     cpu->cfg.mmu = true;
463     cpu->cfg.pmp = true;
464 
465     env->priv_ver = PRIV_VERSION_LATEST;
466 #ifndef CONFIG_USER_ONLY
467 #ifdef TARGET_RISCV32
468     set_satp_mode_max_supported(cpu, VM_1_10_SV32);
469 #else
470     set_satp_mode_max_supported(cpu, VM_1_10_SV57);
471 #endif
472 #endif
473 }
474 
475 #if defined(TARGET_RISCV64)
476 static void rv64_base_cpu_init(Object *obj)
477 {
478     RISCVCPU *cpu = RISCV_CPU(obj);
479     CPURISCVState *env = &cpu->env;
480 
481     cpu->cfg.mmu = true;
482     cpu->cfg.pmp = true;
483 
484     /* Set latest version of privileged specification */
485     env->priv_ver = PRIV_VERSION_LATEST;
486 #ifndef CONFIG_USER_ONLY
487     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
488 #endif
489 }
490 
491 static void rv64_sifive_u_cpu_init(Object *obj)
492 {
493     RISCVCPU *cpu = RISCV_CPU(obj);
494     CPURISCVState *env = &cpu->env;
495     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
496     env->priv_ver = PRIV_VERSION_1_10_0;
497 #ifndef CONFIG_USER_ONLY
498     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39);
499 #endif
500 
501     /* inherited from parent obj via riscv_cpu_init() */
502     cpu->cfg.ext_zifencei = true;
503     cpu->cfg.ext_zicsr = true;
504     cpu->cfg.mmu = true;
505     cpu->cfg.pmp = true;
506 }
507 
508 static void rv64_sifive_e_cpu_init(Object *obj)
509 {
510     CPURISCVState *env = &RISCV_CPU(obj)->env;
511     RISCVCPU *cpu = RISCV_CPU(obj);
512 
513     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
514     env->priv_ver = PRIV_VERSION_1_10_0;
515 #ifndef CONFIG_USER_ONLY
516     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
517 #endif
518 
519     /* inherited from parent obj via riscv_cpu_init() */
520     cpu->cfg.ext_zifencei = true;
521     cpu->cfg.ext_zicsr = true;
522     cpu->cfg.pmp = true;
523 }
524 
525 static void rv64_thead_c906_cpu_init(Object *obj)
526 {
527     CPURISCVState *env = &RISCV_CPU(obj)->env;
528     RISCVCPU *cpu = RISCV_CPU(obj);
529 
530     riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU);
531     env->priv_ver = PRIV_VERSION_1_11_0;
532 
533     cpu->cfg.ext_zfa = true;
534     cpu->cfg.ext_zfh = true;
535     cpu->cfg.mmu = true;
536     cpu->cfg.ext_xtheadba = true;
537     cpu->cfg.ext_xtheadbb = true;
538     cpu->cfg.ext_xtheadbs = true;
539     cpu->cfg.ext_xtheadcmo = true;
540     cpu->cfg.ext_xtheadcondmov = true;
541     cpu->cfg.ext_xtheadfmemidx = true;
542     cpu->cfg.ext_xtheadmac = true;
543     cpu->cfg.ext_xtheadmemidx = true;
544     cpu->cfg.ext_xtheadmempair = true;
545     cpu->cfg.ext_xtheadsync = true;
546 
547     cpu->cfg.mvendorid = THEAD_VENDOR_ID;
548 #ifndef CONFIG_USER_ONLY
549     set_satp_mode_max_supported(cpu, VM_1_10_SV39);
550     th_register_custom_csrs(cpu);
551 #endif
552 
553     /* inherited from parent obj via riscv_cpu_init() */
554     cpu->cfg.pmp = true;
555 }
556 
557 static void rv64_veyron_v1_cpu_init(Object *obj)
558 {
559     CPURISCVState *env = &RISCV_CPU(obj)->env;
560     RISCVCPU *cpu = RISCV_CPU(obj);
561 
562     riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH);
563     env->priv_ver = PRIV_VERSION_1_12_0;
564 
565     /* Enable ISA extensions */
566     cpu->cfg.mmu = true;
567     cpu->cfg.ext_zifencei = true;
568     cpu->cfg.ext_zicsr = true;
569     cpu->cfg.pmp = true;
570     cpu->cfg.ext_zicbom = true;
571     cpu->cfg.cbom_blocksize = 64;
572     cpu->cfg.cboz_blocksize = 64;
573     cpu->cfg.ext_zicboz = true;
574     cpu->cfg.ext_smaia = true;
575     cpu->cfg.ext_ssaia = true;
576     cpu->cfg.ext_sscofpmf = true;
577     cpu->cfg.ext_sstc = true;
578     cpu->cfg.ext_svinval = true;
579     cpu->cfg.ext_svnapot = true;
580     cpu->cfg.ext_svpbmt = true;
581     cpu->cfg.ext_smstateen = true;
582     cpu->cfg.ext_zba = true;
583     cpu->cfg.ext_zbb = true;
584     cpu->cfg.ext_zbc = true;
585     cpu->cfg.ext_zbs = true;
586     cpu->cfg.ext_XVentanaCondOps = true;
587 
588     cpu->cfg.mvendorid = VEYRON_V1_MVENDORID;
589     cpu->cfg.marchid = VEYRON_V1_MARCHID;
590     cpu->cfg.mimpid = VEYRON_V1_MIMPID;
591 
592 #ifndef CONFIG_USER_ONLY
593     set_satp_mode_max_supported(cpu, VM_1_10_SV48);
594 #endif
595 }
596 
597 #ifdef CONFIG_TCG
598 static void rv128_base_cpu_init(Object *obj)
599 {
600     RISCVCPU *cpu = RISCV_CPU(obj);
601     CPURISCVState *env = &cpu->env;
602 
603     if (qemu_tcg_mttcg_enabled()) {
604         /* Missing 128-bit aligned atomics */
605         error_report("128-bit RISC-V currently does not work with Multi "
606                      "Threaded TCG. Please use: -accel tcg,thread=single");
607         exit(EXIT_FAILURE);
608     }
609 
610     cpu->cfg.mmu = true;
611     cpu->cfg.pmp = true;
612 
613     /* Set latest version of privileged specification */
614     env->priv_ver = PRIV_VERSION_LATEST;
615 #ifndef CONFIG_USER_ONLY
616     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
617 #endif
618 }
619 #endif /* CONFIG_TCG */
620 
621 static void rv64i_bare_cpu_init(Object *obj)
622 {
623     CPURISCVState *env = &RISCV_CPU(obj)->env;
624     riscv_cpu_set_misa_ext(env, RVI);
625 }
626 
627 static void rv64e_bare_cpu_init(Object *obj)
628 {
629     CPURISCVState *env = &RISCV_CPU(obj)->env;
630     riscv_cpu_set_misa_ext(env, RVE);
631 }
632 
633 #else /* !TARGET_RISCV64 */
634 
635 static void rv32_base_cpu_init(Object *obj)
636 {
637     RISCVCPU *cpu = RISCV_CPU(obj);
638     CPURISCVState *env = &cpu->env;
639 
640     cpu->cfg.mmu = true;
641     cpu->cfg.pmp = true;
642 
643     /* Set latest version of privileged specification */
644     env->priv_ver = PRIV_VERSION_LATEST;
645 #ifndef CONFIG_USER_ONLY
646     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
647 #endif
648 }
649 
650 static void rv32_sifive_u_cpu_init(Object *obj)
651 {
652     RISCVCPU *cpu = RISCV_CPU(obj);
653     CPURISCVState *env = &cpu->env;
654     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
655     env->priv_ver = PRIV_VERSION_1_10_0;
656 #ifndef CONFIG_USER_ONLY
657     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
658 #endif
659 
660     /* inherited from parent obj via riscv_cpu_init() */
661     cpu->cfg.ext_zifencei = true;
662     cpu->cfg.ext_zicsr = true;
663     cpu->cfg.mmu = true;
664     cpu->cfg.pmp = true;
665 }
666 
667 static void rv32_sifive_e_cpu_init(Object *obj)
668 {
669     CPURISCVState *env = &RISCV_CPU(obj)->env;
670     RISCVCPU *cpu = RISCV_CPU(obj);
671 
672     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
673     env->priv_ver = PRIV_VERSION_1_10_0;
674 #ifndef CONFIG_USER_ONLY
675     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
676 #endif
677 
678     /* inherited from parent obj via riscv_cpu_init() */
679     cpu->cfg.ext_zifencei = true;
680     cpu->cfg.ext_zicsr = true;
681     cpu->cfg.pmp = true;
682 }
683 
684 static void rv32_ibex_cpu_init(Object *obj)
685 {
686     CPURISCVState *env = &RISCV_CPU(obj)->env;
687     RISCVCPU *cpu = RISCV_CPU(obj);
688 
689     riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU);
690     env->priv_ver = PRIV_VERSION_1_12_0;
691 #ifndef CONFIG_USER_ONLY
692     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
693 #endif
694     /* inherited from parent obj via riscv_cpu_init() */
695     cpu->cfg.ext_zifencei = true;
696     cpu->cfg.ext_zicsr = true;
697     cpu->cfg.pmp = true;
698     cpu->cfg.ext_smepmp = true;
699 }
700 
701 static void rv32_imafcu_nommu_cpu_init(Object *obj)
702 {
703     CPURISCVState *env = &RISCV_CPU(obj)->env;
704     RISCVCPU *cpu = RISCV_CPU(obj);
705 
706     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU);
707     env->priv_ver = PRIV_VERSION_1_10_0;
708 #ifndef CONFIG_USER_ONLY
709     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
710 #endif
711 
712     /* inherited from parent obj via riscv_cpu_init() */
713     cpu->cfg.ext_zifencei = true;
714     cpu->cfg.ext_zicsr = true;
715     cpu->cfg.pmp = true;
716 }
717 
718 static void rv32i_bare_cpu_init(Object *obj)
719 {
720     CPURISCVState *env = &RISCV_CPU(obj)->env;
721     riscv_cpu_set_misa_ext(env, RVI);
722 }
723 
724 static void rv32e_bare_cpu_init(Object *obj)
725 {
726     CPURISCVState *env = &RISCV_CPU(obj)->env;
727     riscv_cpu_set_misa_ext(env, RVE);
728 }
729 #endif
730 
731 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
732 {
733     ObjectClass *oc;
734     char *typename;
735     char **cpuname;
736 
737     cpuname = g_strsplit(cpu_model, ",", 1);
738     typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
739     oc = object_class_by_name(typename);
740     g_strfreev(cpuname);
741     g_free(typename);
742 
743     return oc;
744 }
745 
746 char *riscv_cpu_get_name(RISCVCPU *cpu)
747 {
748     RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu);
749     const char *typename = object_class_get_name(OBJECT_CLASS(rcc));
750 
751     g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX));
752 
753     return cpu_model_from_type(typename);
754 }
755 
756 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
757 {
758     RISCVCPU *cpu = RISCV_CPU(cs);
759     CPURISCVState *env = &cpu->env;
760     int i, j;
761     uint8_t *p;
762 
763 #if !defined(CONFIG_USER_ONLY)
764     if (riscv_has_ext(env, RVH)) {
765         qemu_fprintf(f, " %s %d\n", "V      =  ", env->virt_enabled);
766     }
767 #endif
768     qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc      ", env->pc);
769 #ifndef CONFIG_USER_ONLY
770     {
771         static const int dump_csrs[] = {
772             CSR_MHARTID,
773             CSR_MSTATUS,
774             CSR_MSTATUSH,
775             /*
776              * CSR_SSTATUS is intentionally omitted here as its value
777              * can be figured out by looking at CSR_MSTATUS
778              */
779             CSR_HSTATUS,
780             CSR_VSSTATUS,
781             CSR_MIP,
782             CSR_MIE,
783             CSR_MIDELEG,
784             CSR_HIDELEG,
785             CSR_MEDELEG,
786             CSR_HEDELEG,
787             CSR_MTVEC,
788             CSR_STVEC,
789             CSR_VSTVEC,
790             CSR_MEPC,
791             CSR_SEPC,
792             CSR_VSEPC,
793             CSR_MCAUSE,
794             CSR_SCAUSE,
795             CSR_VSCAUSE,
796             CSR_MTVAL,
797             CSR_STVAL,
798             CSR_HTVAL,
799             CSR_MTVAL2,
800             CSR_MSCRATCH,
801             CSR_SSCRATCH,
802             CSR_SATP,
803             CSR_MMTE,
804             CSR_UPMBASE,
805             CSR_UPMMASK,
806             CSR_SPMBASE,
807             CSR_SPMMASK,
808             CSR_MPMBASE,
809             CSR_MPMMASK,
810         };
811 
812         for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
813             int csrno = dump_csrs[i];
814             target_ulong val = 0;
815             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
816 
817             /*
818              * Rely on the smode, hmode, etc, predicates within csr.c
819              * to do the filtering of the registers that are present.
820              */
821             if (res == RISCV_EXCP_NONE) {
822                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
823                              csr_ops[csrno].name, val);
824             }
825         }
826     }
827 #endif
828 
829     for (i = 0; i < 32; i++) {
830         qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
831                      riscv_int_regnames[i], env->gpr[i]);
832         if ((i & 3) == 3) {
833             qemu_fprintf(f, "\n");
834         }
835     }
836     if (flags & CPU_DUMP_FPU) {
837         for (i = 0; i < 32; i++) {
838             qemu_fprintf(f, " %-8s %016" PRIx64,
839                          riscv_fpr_regnames[i], env->fpr[i]);
840             if ((i & 3) == 3) {
841                 qemu_fprintf(f, "\n");
842             }
843         }
844     }
845     if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) {
846         static const int dump_rvv_csrs[] = {
847                     CSR_VSTART,
848                     CSR_VXSAT,
849                     CSR_VXRM,
850                     CSR_VCSR,
851                     CSR_VL,
852                     CSR_VTYPE,
853                     CSR_VLENB,
854                 };
855         for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
856             int csrno = dump_rvv_csrs[i];
857             target_ulong val = 0;
858             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
859 
860             /*
861              * Rely on the smode, hmode, etc, predicates within csr.c
862              * to do the filtering of the registers that are present.
863              */
864             if (res == RISCV_EXCP_NONE) {
865                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
866                              csr_ops[csrno].name, val);
867             }
868         }
869         uint16_t vlenb = cpu->cfg.vlenb;
870 
871         for (i = 0; i < 32; i++) {
872             qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]);
873             p = (uint8_t *)env->vreg;
874             for (j = vlenb - 1 ; j >= 0; j--) {
875                 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j)));
876             }
877             qemu_fprintf(f, "\n");
878         }
879     }
880 }
881 
882 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
883 {
884     RISCVCPU *cpu = RISCV_CPU(cs);
885     CPURISCVState *env = &cpu->env;
886 
887     if (env->xl == MXL_RV32) {
888         env->pc = (int32_t)value;
889     } else {
890         env->pc = value;
891     }
892 }
893 
894 static vaddr riscv_cpu_get_pc(CPUState *cs)
895 {
896     RISCVCPU *cpu = RISCV_CPU(cs);
897     CPURISCVState *env = &cpu->env;
898 
899     /* Match cpu_get_tb_cpu_state. */
900     if (env->xl == MXL_RV32) {
901         return env->pc & UINT32_MAX;
902     }
903     return env->pc;
904 }
905 
906 static bool riscv_cpu_has_work(CPUState *cs)
907 {
908 #ifndef CONFIG_USER_ONLY
909     RISCVCPU *cpu = RISCV_CPU(cs);
910     CPURISCVState *env = &cpu->env;
911     /*
912      * Definition of the WFI instruction requires it to ignore the privilege
913      * mode and delegation registers, but respect individual enables
914      */
915     return riscv_cpu_all_pending(env) != 0 ||
916         riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE ||
917         riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE;
918 #else
919     return true;
920 #endif
921 }
922 
923 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch)
924 {
925     return riscv_env_mmu_index(cpu_env(cs), ifetch);
926 }
927 
928 static void riscv_cpu_reset_hold(Object *obj, ResetType type)
929 {
930 #ifndef CONFIG_USER_ONLY
931     uint8_t iprio;
932     int i, irq, rdzero;
933 #endif
934     CPUState *cs = CPU(obj);
935     RISCVCPU *cpu = RISCV_CPU(cs);
936     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
937     CPURISCVState *env = &cpu->env;
938 
939     if (mcc->parent_phases.hold) {
940         mcc->parent_phases.hold(obj, type);
941     }
942 #ifndef CONFIG_USER_ONLY
943     env->misa_mxl = mcc->misa_mxl_max;
944     env->priv = PRV_M;
945     env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
946     if (env->misa_mxl > MXL_RV32) {
947         /*
948          * The reset status of SXL/UXL is undefined, but mstatus is WARL
949          * and we must ensure that the value after init is valid for read.
950          */
951         env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
952         env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
953         if (riscv_has_ext(env, RVH)) {
954             env->vsstatus = set_field(env->vsstatus,
955                                       MSTATUS64_SXL, env->misa_mxl);
956             env->vsstatus = set_field(env->vsstatus,
957                                       MSTATUS64_UXL, env->misa_mxl);
958             env->mstatus_hs = set_field(env->mstatus_hs,
959                                         MSTATUS64_SXL, env->misa_mxl);
960             env->mstatus_hs = set_field(env->mstatus_hs,
961                                         MSTATUS64_UXL, env->misa_mxl);
962         }
963     }
964     env->mcause = 0;
965     env->miclaim = MIP_SGEIP;
966     env->pc = env->resetvec;
967     env->bins = 0;
968     env->two_stage_lookup = false;
969 
970     env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
971                    (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ?
972                     MENVCFG_ADUE : 0);
973     env->henvcfg = 0;
974 
975     /* Initialized default priorities of local interrupts. */
976     for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
977         iprio = riscv_cpu_default_priority(i);
978         env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
979         env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
980         env->hviprio[i] = 0;
981     }
982     i = 0;
983     while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
984         if (!rdzero) {
985             env->hviprio[irq] = env->miprio[irq];
986         }
987         i++;
988     }
989     /* mmte is supposed to have pm.current hardwired to 1 */
990     env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
991 
992     /*
993      * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor
994      * extension is enabled.
995      */
996     if (riscv_has_ext(env, RVH)) {
997         env->mideleg |= HS_MODE_INTERRUPTS;
998     }
999 
1000     /*
1001      * Clear mseccfg and unlock all the PMP entries upon reset.
1002      * This is allowed as per the priv and smepmp specifications
1003      * and is needed to clear stale entries across reboots.
1004      */
1005     if (riscv_cpu_cfg(env)->ext_smepmp) {
1006         env->mseccfg = 0;
1007     }
1008 
1009     pmp_unlock_entries(env);
1010 #endif
1011     env->xl = riscv_cpu_mxl(env);
1012     riscv_cpu_update_mask(env);
1013     cs->exception_index = RISCV_EXCP_NONE;
1014     env->load_res = -1;
1015     set_default_nan_mode(1, &env->fp_status);
1016 
1017 #ifndef CONFIG_USER_ONLY
1018     if (cpu->cfg.debug) {
1019         riscv_trigger_reset_hold(env);
1020     }
1021 
1022     if (kvm_enabled()) {
1023         kvm_riscv_reset_vcpu(cpu);
1024     }
1025 #endif
1026 }
1027 
1028 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
1029 {
1030     RISCVCPU *cpu = RISCV_CPU(s);
1031     CPURISCVState *env = &cpu->env;
1032     info->target_info = &cpu->cfg;
1033 
1034     switch (env->xl) {
1035     case MXL_RV32:
1036         info->print_insn = print_insn_riscv32;
1037         break;
1038     case MXL_RV64:
1039         info->print_insn = print_insn_riscv64;
1040         break;
1041     case MXL_RV128:
1042         info->print_insn = print_insn_riscv128;
1043         break;
1044     default:
1045         g_assert_not_reached();
1046     }
1047 }
1048 
1049 #ifndef CONFIG_USER_ONLY
1050 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
1051 {
1052     bool rv32 = riscv_cpu_is_32bit(cpu);
1053     uint8_t satp_mode_map_max, satp_mode_supported_max;
1054 
1055     /* The CPU wants the OS to decide which satp mode to use */
1056     if (cpu->cfg.satp_mode.supported == 0) {
1057         return;
1058     }
1059 
1060     satp_mode_supported_max =
1061                     satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
1062 
1063     if (cpu->cfg.satp_mode.map == 0) {
1064         if (cpu->cfg.satp_mode.init == 0) {
1065             /* If unset by the user, we fallback to the default satp mode. */
1066             set_satp_mode_default_map(cpu);
1067         } else {
1068             /*
1069              * Find the lowest level that was disabled and then enable the
1070              * first valid level below which can be found in
1071              * valid_vm_1_10_32/64.
1072              */
1073             for (int i = 1; i < 16; ++i) {
1074                 if ((cpu->cfg.satp_mode.init & (1 << i)) &&
1075                     (cpu->cfg.satp_mode.supported & (1 << i))) {
1076                     for (int j = i - 1; j >= 0; --j) {
1077                         if (cpu->cfg.satp_mode.supported & (1 << j)) {
1078                             cpu->cfg.satp_mode.map |= (1 << j);
1079                             break;
1080                         }
1081                     }
1082                     break;
1083                 }
1084             }
1085         }
1086     }
1087 
1088     satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
1089 
1090     /* Make sure the user asked for a supported configuration (HW and qemu) */
1091     if (satp_mode_map_max > satp_mode_supported_max) {
1092         error_setg(errp, "satp_mode %s is higher than hw max capability %s",
1093                    satp_mode_str(satp_mode_map_max, rv32),
1094                    satp_mode_str(satp_mode_supported_max, rv32));
1095         return;
1096     }
1097 
1098     /*
1099      * Make sure the user did not ask for an invalid configuration as per
1100      * the specification.
1101      */
1102     if (!rv32) {
1103         for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1104             if (!(cpu->cfg.satp_mode.map & (1 << i)) &&
1105                 (cpu->cfg.satp_mode.init & (1 << i)) &&
1106                 (cpu->cfg.satp_mode.supported & (1 << i))) {
1107                 error_setg(errp, "cannot disable %s satp mode if %s "
1108                            "is enabled", satp_mode_str(i, false),
1109                            satp_mode_str(satp_mode_map_max, false));
1110                 return;
1111             }
1112         }
1113     }
1114 
1115     /* Finally expand the map so that all valid modes are set */
1116     for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1117         if (cpu->cfg.satp_mode.supported & (1 << i)) {
1118             cpu->cfg.satp_mode.map |= (1 << i);
1119         }
1120     }
1121 }
1122 #endif
1123 
1124 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1125 {
1126     Error *local_err = NULL;
1127 
1128 #ifndef CONFIG_USER_ONLY
1129     riscv_cpu_satp_mode_finalize(cpu, &local_err);
1130     if (local_err != NULL) {
1131         error_propagate(errp, local_err);
1132         return;
1133     }
1134 #endif
1135 
1136     if (tcg_enabled()) {
1137         riscv_tcg_cpu_finalize_features(cpu, &local_err);
1138         if (local_err != NULL) {
1139             error_propagate(errp, local_err);
1140             return;
1141         }
1142         riscv_tcg_cpu_finalize_dynamic_decoder(cpu);
1143     } else if (kvm_enabled()) {
1144         riscv_kvm_cpu_finalize_features(cpu, &local_err);
1145         if (local_err != NULL) {
1146             error_propagate(errp, local_err);
1147             return;
1148         }
1149     }
1150 }
1151 
1152 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
1153 {
1154     CPUState *cs = CPU(dev);
1155     RISCVCPU *cpu = RISCV_CPU(dev);
1156     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
1157     Error *local_err = NULL;
1158 
1159     if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) {
1160         warn_report("The 'any' CPU is deprecated and will be "
1161                     "removed in the future.");
1162     }
1163 
1164     cpu_exec_realizefn(cs, &local_err);
1165     if (local_err != NULL) {
1166         error_propagate(errp, local_err);
1167         return;
1168     }
1169 
1170     riscv_cpu_finalize_features(cpu, &local_err);
1171     if (local_err != NULL) {
1172         error_propagate(errp, local_err);
1173         return;
1174     }
1175 
1176     riscv_cpu_register_gdb_regs_for_features(cs);
1177 
1178 #ifndef CONFIG_USER_ONLY
1179     if (cpu->cfg.debug) {
1180         riscv_trigger_realize(&cpu->env);
1181     }
1182 #endif
1183 
1184     qemu_init_vcpu(cs);
1185     cpu_reset(cs);
1186 
1187     mcc->parent_realize(dev, errp);
1188 }
1189 
1190 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu)
1191 {
1192     if (tcg_enabled()) {
1193         return riscv_cpu_tcg_compatible(cpu);
1194     }
1195 
1196     return true;
1197 }
1198 
1199 #ifndef CONFIG_USER_ONLY
1200 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
1201                                void *opaque, Error **errp)
1202 {
1203     RISCVSATPMap *satp_map = opaque;
1204     uint8_t satp = satp_mode_from_str(name);
1205     bool value;
1206 
1207     value = satp_map->map & (1 << satp);
1208 
1209     visit_type_bool(v, name, &value, errp);
1210 }
1211 
1212 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
1213                                void *opaque, Error **errp)
1214 {
1215     RISCVSATPMap *satp_map = opaque;
1216     uint8_t satp = satp_mode_from_str(name);
1217     bool value;
1218 
1219     if (!visit_type_bool(v, name, &value, errp)) {
1220         return;
1221     }
1222 
1223     satp_map->map = deposit32(satp_map->map, satp, 1, value);
1224     satp_map->init |= 1 << satp;
1225 }
1226 
1227 void riscv_add_satp_mode_properties(Object *obj)
1228 {
1229     RISCVCPU *cpu = RISCV_CPU(obj);
1230 
1231     if (cpu->env.misa_mxl == MXL_RV32) {
1232         object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
1233                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1234     } else {
1235         object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
1236                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1237         object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
1238                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1239         object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
1240                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1241         object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
1242                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1243     }
1244 }
1245 
1246 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
1247 {
1248     RISCVCPU *cpu = RISCV_CPU(opaque);
1249     CPURISCVState *env = &cpu->env;
1250 
1251     if (irq < IRQ_LOCAL_MAX) {
1252         switch (irq) {
1253         case IRQ_U_SOFT:
1254         case IRQ_S_SOFT:
1255         case IRQ_VS_SOFT:
1256         case IRQ_M_SOFT:
1257         case IRQ_U_TIMER:
1258         case IRQ_S_TIMER:
1259         case IRQ_VS_TIMER:
1260         case IRQ_M_TIMER:
1261         case IRQ_U_EXT:
1262         case IRQ_VS_EXT:
1263         case IRQ_M_EXT:
1264             if (kvm_enabled()) {
1265                 kvm_riscv_set_irq(cpu, irq, level);
1266             } else {
1267                 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level));
1268             }
1269              break;
1270         case IRQ_S_EXT:
1271             if (kvm_enabled()) {
1272                 kvm_riscv_set_irq(cpu, irq, level);
1273             } else {
1274                 env->external_seip = level;
1275                 riscv_cpu_update_mip(env, 1 << irq,
1276                                      BOOL_TO_MASK(level | env->software_seip));
1277             }
1278             break;
1279         default:
1280             g_assert_not_reached();
1281         }
1282     } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
1283         /* Require H-extension for handling guest local interrupts */
1284         if (!riscv_has_ext(env, RVH)) {
1285             g_assert_not_reached();
1286         }
1287 
1288         /* Compute bit position in HGEIP CSR */
1289         irq = irq - IRQ_LOCAL_MAX + 1;
1290         if (env->geilen < irq) {
1291             g_assert_not_reached();
1292         }
1293 
1294         /* Update HGEIP CSR */
1295         env->hgeip &= ~((target_ulong)1 << irq);
1296         if (level) {
1297             env->hgeip |= (target_ulong)1 << irq;
1298         }
1299 
1300         /* Update mip.SGEIP bit */
1301         riscv_cpu_update_mip(env, MIP_SGEIP,
1302                              BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
1303     } else {
1304         g_assert_not_reached();
1305     }
1306 }
1307 #endif /* CONFIG_USER_ONLY */
1308 
1309 static bool riscv_cpu_is_dynamic(Object *cpu_obj)
1310 {
1311     return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
1312 }
1313 
1314 static void riscv_cpu_post_init(Object *obj)
1315 {
1316     accel_cpu_instance_init(CPU(obj));
1317 }
1318 
1319 static void riscv_cpu_init(Object *obj)
1320 {
1321     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
1322     RISCVCPU *cpu = RISCV_CPU(obj);
1323     CPURISCVState *env = &cpu->env;
1324 
1325     env->misa_mxl = mcc->misa_mxl_max;
1326 
1327 #ifndef CONFIG_USER_ONLY
1328     qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
1329                       IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
1330 #endif /* CONFIG_USER_ONLY */
1331 
1332     general_user_opts = g_hash_table_new(g_str_hash, g_str_equal);
1333 
1334     /*
1335      * The timer and performance counters extensions were supported
1336      * in QEMU before they were added as discrete extensions in the
1337      * ISA. To keep compatibility we'll always default them to 'true'
1338      * for all CPUs. Each accelerator will decide what to do when
1339      * users disable them.
1340      */
1341     RISCV_CPU(obj)->cfg.ext_zicntr = true;
1342     RISCV_CPU(obj)->cfg.ext_zihpm = true;
1343 
1344     /* Default values for non-bool cpu properties */
1345     cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16);
1346     cpu->cfg.vlenb = 128 >> 3;
1347     cpu->cfg.elen = 64;
1348     cpu->cfg.cbom_blocksize = 64;
1349     cpu->cfg.cbop_blocksize = 64;
1350     cpu->cfg.cboz_blocksize = 64;
1351     cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1352 }
1353 
1354 static void riscv_bare_cpu_init(Object *obj)
1355 {
1356     RISCVCPU *cpu = RISCV_CPU(obj);
1357 
1358     /*
1359      * Bare CPUs do not inherit the timer and performance
1360      * counters from the parent class (see riscv_cpu_init()
1361      * for info on why the parent enables them).
1362      *
1363      * Users have to explicitly enable these counters for
1364      * bare CPUs.
1365      */
1366     cpu->cfg.ext_zicntr = false;
1367     cpu->cfg.ext_zihpm = false;
1368 
1369     /* Set to QEMU's first supported priv version */
1370     cpu->env.priv_ver = PRIV_VERSION_1_10_0;
1371 
1372     /*
1373      * Support all available satp_mode settings. The default
1374      * value will be set to MBARE if the user doesn't set
1375      * satp_mode manually (see set_satp_mode_default()).
1376      */
1377 #ifndef CONFIG_USER_ONLY
1378     set_satp_mode_max_supported(cpu, VM_1_10_SV64);
1379 #endif
1380 }
1381 
1382 typedef struct misa_ext_info {
1383     const char *name;
1384     const char *description;
1385 } MISAExtInfo;
1386 
1387 #define MISA_INFO_IDX(_bit) \
1388     __builtin_ctz(_bit)
1389 
1390 #define MISA_EXT_INFO(_bit, _propname, _descr) \
1391     [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr}
1392 
1393 static const MISAExtInfo misa_ext_info_arr[] = {
1394     MISA_EXT_INFO(RVA, "a", "Atomic instructions"),
1395     MISA_EXT_INFO(RVC, "c", "Compressed instructions"),
1396     MISA_EXT_INFO(RVD, "d", "Double-precision float point"),
1397     MISA_EXT_INFO(RVF, "f", "Single-precision float point"),
1398     MISA_EXT_INFO(RVI, "i", "Base integer instruction set"),
1399     MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"),
1400     MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"),
1401     MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
1402     MISA_EXT_INFO(RVU, "u", "User-level instructions"),
1403     MISA_EXT_INFO(RVH, "h", "Hypervisor"),
1404     MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
1405     MISA_EXT_INFO(RVV, "v", "Vector operations"),
1406     MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
1407     MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)")
1408 };
1409 
1410 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc)
1411 {
1412     CPUClass *cc = CPU_CLASS(mcc);
1413 
1414     /* Validate that MISA_MXL is set properly. */
1415     switch (mcc->misa_mxl_max) {
1416 #ifdef TARGET_RISCV64
1417     case MXL_RV64:
1418     case MXL_RV128:
1419         cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
1420         break;
1421 #endif
1422     case MXL_RV32:
1423         cc->gdb_core_xml_file = "riscv-32bit-cpu.xml";
1424         break;
1425     default:
1426         g_assert_not_reached();
1427     }
1428 }
1429 
1430 static int riscv_validate_misa_info_idx(uint32_t bit)
1431 {
1432     int idx;
1433 
1434     /*
1435      * Our lowest valid input (RVA) is 1 and
1436      * __builtin_ctz() is UB with zero.
1437      */
1438     g_assert(bit != 0);
1439     idx = MISA_INFO_IDX(bit);
1440 
1441     g_assert(idx < ARRAY_SIZE(misa_ext_info_arr));
1442     return idx;
1443 }
1444 
1445 const char *riscv_get_misa_ext_name(uint32_t bit)
1446 {
1447     int idx = riscv_validate_misa_info_idx(bit);
1448     const char *val = misa_ext_info_arr[idx].name;
1449 
1450     g_assert(val != NULL);
1451     return val;
1452 }
1453 
1454 const char *riscv_get_misa_ext_description(uint32_t bit)
1455 {
1456     int idx = riscv_validate_misa_info_idx(bit);
1457     const char *val = misa_ext_info_arr[idx].description;
1458 
1459     g_assert(val != NULL);
1460     return val;
1461 }
1462 
1463 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \
1464     {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
1465      .enabled = _defval}
1466 
1467 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
1468     /* Defaults for standard extensions */
1469     MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
1470     MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
1471     MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
1472     MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
1473     MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
1474     MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false),
1475     MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false),
1476     MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false),
1477     MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
1478     MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true),
1479     MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false),
1480     MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false),
1481     MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false),
1482     MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false),
1483     MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false),
1484     MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false),
1485     MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false),
1486     MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false),
1487     MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false),
1488     MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false),
1489     MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false),
1490     MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false),
1491     MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
1492 
1493     MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false),
1494     MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
1495     MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
1496     MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false),
1497     MULTI_EXT_CFG_BOOL("svade", ext_svade, false),
1498     MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
1499     MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
1500     MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false),
1501     MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false),
1502 
1503     MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true),
1504     MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true),
1505 
1506     MULTI_EXT_CFG_BOOL("zba", ext_zba, true),
1507     MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true),
1508     MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true),
1509     MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false),
1510     MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false),
1511     MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false),
1512     MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true),
1513     MULTI_EXT_CFG_BOOL("zk", ext_zk, false),
1514     MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false),
1515     MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false),
1516     MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false),
1517     MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false),
1518     MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false),
1519     MULTI_EXT_CFG_BOOL("zks", ext_zks, false),
1520     MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false),
1521     MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false),
1522     MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false),
1523     MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false),
1524 
1525     MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false),
1526     MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false),
1527     MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false),
1528     MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false),
1529 
1530     MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true),
1531     MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true),
1532     MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true),
1533 
1534     MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false),
1535 
1536     MULTI_EXT_CFG_BOOL("zca", ext_zca, false),
1537     MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false),
1538     MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false),
1539     MULTI_EXT_CFG_BOOL("zce", ext_zce, false),
1540     MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false),
1541     MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false),
1542     MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false),
1543     MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false),
1544 
1545     /* Vector cryptography extensions */
1546     MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false),
1547     MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false),
1548     MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false),
1549     MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false),
1550     MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false),
1551     MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false),
1552     MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false),
1553     MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false),
1554     MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false),
1555     MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false),
1556     MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false),
1557     MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false),
1558     MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false),
1559     MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false),
1560     MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false),
1561     MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false),
1562 
1563     DEFINE_PROP_END_OF_LIST(),
1564 };
1565 
1566 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = {
1567     MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false),
1568     MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false),
1569     MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false),
1570     MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false),
1571     MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false),
1572     MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false),
1573     MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false),
1574     MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false),
1575     MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false),
1576     MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false),
1577     MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false),
1578     MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false),
1579 
1580     DEFINE_PROP_END_OF_LIST(),
1581 };
1582 
1583 /* These are experimental so mark with 'x-' */
1584 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
1585     DEFINE_PROP_END_OF_LIST(),
1586 };
1587 
1588 /*
1589  * 'Named features' is the name we give to extensions that we
1590  * don't want to expose to users. They are either immutable
1591  * (always enabled/disable) or they'll vary depending on
1592  * the resulting CPU state. They have riscv,isa strings
1593  * and priv_ver like regular extensions.
1594  */
1595 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
1596     MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true),
1597 
1598     DEFINE_PROP_END_OF_LIST(),
1599 };
1600 
1601 /* Deprecated entries marked for future removal */
1602 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = {
1603     MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true),
1604     MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true),
1605     MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true),
1606     MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true),
1607     MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true),
1608     MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true),
1609     MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false),
1610     MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false),
1611     MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false),
1612     MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false),
1613     MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false),
1614 
1615     DEFINE_PROP_END_OF_LIST(),
1616 };
1617 
1618 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname,
1619                              Error **errp)
1620 {
1621     g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1622     error_setg(errp, "CPU '%s' does not allow changing the value of '%s'",
1623                cpuname, propname);
1624 }
1625 
1626 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name,
1627                              void *opaque, Error **errp)
1628 {
1629     RISCVCPU *cpu = RISCV_CPU(obj);
1630     uint8_t pmu_num, curr_pmu_num;
1631     uint32_t pmu_mask;
1632 
1633     visit_type_uint8(v, name, &pmu_num, errp);
1634 
1635     curr_pmu_num = ctpop32(cpu->cfg.pmu_mask);
1636 
1637     if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) {
1638         cpu_set_prop_err(cpu, name, errp);
1639         error_append_hint(errp, "Current '%s' val: %u\n",
1640                           name, curr_pmu_num);
1641         return;
1642     }
1643 
1644     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1645         error_setg(errp, "Number of counters exceeds maximum available");
1646         return;
1647     }
1648 
1649     if (pmu_num == 0) {
1650         pmu_mask = 0;
1651     } else {
1652         pmu_mask = MAKE_64BIT_MASK(3, pmu_num);
1653     }
1654 
1655     warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\"");
1656     cpu->cfg.pmu_mask = pmu_mask;
1657     cpu_option_add_user_setting("pmu-mask", pmu_mask);
1658 }
1659 
1660 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name,
1661                              void *opaque, Error **errp)
1662 {
1663     RISCVCPU *cpu = RISCV_CPU(obj);
1664     uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask);
1665 
1666     visit_type_uint8(v, name, &pmu_num, errp);
1667 }
1668 
1669 static const PropertyInfo prop_pmu_num = {
1670     .name = "pmu-num",
1671     .get = prop_pmu_num_get,
1672     .set = prop_pmu_num_set,
1673 };
1674 
1675 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name,
1676                              void *opaque, Error **errp)
1677 {
1678     RISCVCPU *cpu = RISCV_CPU(obj);
1679     uint32_t value;
1680     uint8_t pmu_num;
1681 
1682     visit_type_uint32(v, name, &value, errp);
1683 
1684     if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) {
1685         cpu_set_prop_err(cpu, name, errp);
1686         error_append_hint(errp, "Current '%s' val: %x\n",
1687                           name, cpu->cfg.pmu_mask);
1688         return;
1689     }
1690 
1691     pmu_num = ctpop32(value);
1692 
1693     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1694         error_setg(errp, "Number of counters exceeds maximum available");
1695         return;
1696     }
1697 
1698     cpu_option_add_user_setting(name, value);
1699     cpu->cfg.pmu_mask = value;
1700 }
1701 
1702 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name,
1703                              void *opaque, Error **errp)
1704 {
1705     uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask;
1706 
1707     visit_type_uint8(v, name, &pmu_mask, errp);
1708 }
1709 
1710 static const PropertyInfo prop_pmu_mask = {
1711     .name = "pmu-mask",
1712     .get = prop_pmu_mask_get,
1713     .set = prop_pmu_mask_set,
1714 };
1715 
1716 static void prop_mmu_set(Object *obj, Visitor *v, const char *name,
1717                          void *opaque, Error **errp)
1718 {
1719     RISCVCPU *cpu = RISCV_CPU(obj);
1720     bool value;
1721 
1722     visit_type_bool(v, name, &value, errp);
1723 
1724     if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) {
1725         cpu_set_prop_err(cpu, "mmu", errp);
1726         return;
1727     }
1728 
1729     cpu_option_add_user_setting(name, value);
1730     cpu->cfg.mmu = value;
1731 }
1732 
1733 static void prop_mmu_get(Object *obj, Visitor *v, const char *name,
1734                          void *opaque, Error **errp)
1735 {
1736     bool value = RISCV_CPU(obj)->cfg.mmu;
1737 
1738     visit_type_bool(v, name, &value, errp);
1739 }
1740 
1741 static const PropertyInfo prop_mmu = {
1742     .name = "mmu",
1743     .get = prop_mmu_get,
1744     .set = prop_mmu_set,
1745 };
1746 
1747 static void prop_pmp_set(Object *obj, Visitor *v, const char *name,
1748                          void *opaque, Error **errp)
1749 {
1750     RISCVCPU *cpu = RISCV_CPU(obj);
1751     bool value;
1752 
1753     visit_type_bool(v, name, &value, errp);
1754 
1755     if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) {
1756         cpu_set_prop_err(cpu, name, errp);
1757         return;
1758     }
1759 
1760     cpu_option_add_user_setting(name, value);
1761     cpu->cfg.pmp = value;
1762 }
1763 
1764 static void prop_pmp_get(Object *obj, Visitor *v, const char *name,
1765                          void *opaque, Error **errp)
1766 {
1767     bool value = RISCV_CPU(obj)->cfg.pmp;
1768 
1769     visit_type_bool(v, name, &value, errp);
1770 }
1771 
1772 static const PropertyInfo prop_pmp = {
1773     .name = "pmp",
1774     .get = prop_pmp_get,
1775     .set = prop_pmp_set,
1776 };
1777 
1778 static int priv_spec_from_str(const char *priv_spec_str)
1779 {
1780     int priv_version = -1;
1781 
1782     if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) {
1783         priv_version = PRIV_VERSION_1_13_0;
1784     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) {
1785         priv_version = PRIV_VERSION_1_12_0;
1786     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) {
1787         priv_version = PRIV_VERSION_1_11_0;
1788     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) {
1789         priv_version = PRIV_VERSION_1_10_0;
1790     }
1791 
1792     return priv_version;
1793 }
1794 
1795 const char *priv_spec_to_str(int priv_version)
1796 {
1797     switch (priv_version) {
1798     case PRIV_VERSION_1_10_0:
1799         return PRIV_VER_1_10_0_STR;
1800     case PRIV_VERSION_1_11_0:
1801         return PRIV_VER_1_11_0_STR;
1802     case PRIV_VERSION_1_12_0:
1803         return PRIV_VER_1_12_0_STR;
1804     case PRIV_VERSION_1_13_0:
1805         return PRIV_VER_1_13_0_STR;
1806     default:
1807         return NULL;
1808     }
1809 }
1810 
1811 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name,
1812                                void *opaque, Error **errp)
1813 {
1814     RISCVCPU *cpu = RISCV_CPU(obj);
1815     g_autofree char *value = NULL;
1816     int priv_version = -1;
1817 
1818     visit_type_str(v, name, &value, errp);
1819 
1820     priv_version = priv_spec_from_str(value);
1821     if (priv_version < 0) {
1822         error_setg(errp, "Unsupported privilege spec version '%s'", value);
1823         return;
1824     }
1825 
1826     if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) {
1827         cpu_set_prop_err(cpu, name, errp);
1828         error_append_hint(errp, "Current '%s' val: %s\n", name,
1829                           object_property_get_str(obj, name, NULL));
1830         return;
1831     }
1832 
1833     cpu_option_add_user_setting(name, priv_version);
1834     cpu->env.priv_ver = priv_version;
1835 }
1836 
1837 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name,
1838                                void *opaque, Error **errp)
1839 {
1840     RISCVCPU *cpu = RISCV_CPU(obj);
1841     const char *value = priv_spec_to_str(cpu->env.priv_ver);
1842 
1843     visit_type_str(v, name, (char **)&value, errp);
1844 }
1845 
1846 static const PropertyInfo prop_priv_spec = {
1847     .name = "priv_spec",
1848     .get = prop_priv_spec_get,
1849     .set = prop_priv_spec_set,
1850 };
1851 
1852 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name,
1853                                void *opaque, Error **errp)
1854 {
1855     RISCVCPU *cpu = RISCV_CPU(obj);
1856     g_autofree char *value = NULL;
1857 
1858     visit_type_str(v, name, &value, errp);
1859 
1860     if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) {
1861         error_setg(errp, "Unsupported vector spec version '%s'", value);
1862         return;
1863     }
1864 
1865     cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0);
1866     cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1867 }
1868 
1869 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name,
1870                                void *opaque, Error **errp)
1871 {
1872     const char *value = VEXT_VER_1_00_0_STR;
1873 
1874     visit_type_str(v, name, (char **)&value, errp);
1875 }
1876 
1877 static const PropertyInfo prop_vext_spec = {
1878     .name = "vext_spec",
1879     .get = prop_vext_spec_get,
1880     .set = prop_vext_spec_set,
1881 };
1882 
1883 static void prop_vlen_set(Object *obj, Visitor *v, const char *name,
1884                          void *opaque, Error **errp)
1885 {
1886     RISCVCPU *cpu = RISCV_CPU(obj);
1887     uint16_t value;
1888 
1889     if (!visit_type_uint16(v, name, &value, errp)) {
1890         return;
1891     }
1892 
1893     if (!is_power_of_2(value)) {
1894         error_setg(errp, "Vector extension VLEN must be power of 2");
1895         return;
1896     }
1897 
1898     if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) {
1899         cpu_set_prop_err(cpu, name, errp);
1900         error_append_hint(errp, "Current '%s' val: %u\n",
1901                           name, cpu->cfg.vlenb << 3);
1902         return;
1903     }
1904 
1905     cpu_option_add_user_setting(name, value);
1906     cpu->cfg.vlenb = value >> 3;
1907 }
1908 
1909 static void prop_vlen_get(Object *obj, Visitor *v, const char *name,
1910                          void *opaque, Error **errp)
1911 {
1912     uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3;
1913 
1914     visit_type_uint16(v, name, &value, errp);
1915 }
1916 
1917 static const PropertyInfo prop_vlen = {
1918     .name = "vlen",
1919     .get = prop_vlen_get,
1920     .set = prop_vlen_set,
1921 };
1922 
1923 static void prop_elen_set(Object *obj, Visitor *v, const char *name,
1924                          void *opaque, Error **errp)
1925 {
1926     RISCVCPU *cpu = RISCV_CPU(obj);
1927     uint16_t value;
1928 
1929     if (!visit_type_uint16(v, name, &value, errp)) {
1930         return;
1931     }
1932 
1933     if (!is_power_of_2(value)) {
1934         error_setg(errp, "Vector extension ELEN must be power of 2");
1935         return;
1936     }
1937 
1938     if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) {
1939         cpu_set_prop_err(cpu, name, errp);
1940         error_append_hint(errp, "Current '%s' val: %u\n",
1941                           name, cpu->cfg.elen);
1942         return;
1943     }
1944 
1945     cpu_option_add_user_setting(name, value);
1946     cpu->cfg.elen = value;
1947 }
1948 
1949 static void prop_elen_get(Object *obj, Visitor *v, const char *name,
1950                          void *opaque, Error **errp)
1951 {
1952     uint16_t value = RISCV_CPU(obj)->cfg.elen;
1953 
1954     visit_type_uint16(v, name, &value, errp);
1955 }
1956 
1957 static const PropertyInfo prop_elen = {
1958     .name = "elen",
1959     .get = prop_elen_get,
1960     .set = prop_elen_set,
1961 };
1962 
1963 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name,
1964                                   void *opaque, Error **errp)
1965 {
1966     RISCVCPU *cpu = RISCV_CPU(obj);
1967     uint16_t value;
1968 
1969     if (!visit_type_uint16(v, name, &value, errp)) {
1970         return;
1971     }
1972 
1973     if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) {
1974         cpu_set_prop_err(cpu, name, errp);
1975         error_append_hint(errp, "Current '%s' val: %u\n",
1976                           name, cpu->cfg.cbom_blocksize);
1977         return;
1978     }
1979 
1980     cpu_option_add_user_setting(name, value);
1981     cpu->cfg.cbom_blocksize = value;
1982 }
1983 
1984 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name,
1985                          void *opaque, Error **errp)
1986 {
1987     uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize;
1988 
1989     visit_type_uint16(v, name, &value, errp);
1990 }
1991 
1992 static const PropertyInfo prop_cbom_blksize = {
1993     .name = "cbom_blocksize",
1994     .get = prop_cbom_blksize_get,
1995     .set = prop_cbom_blksize_set,
1996 };
1997 
1998 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name,
1999                                   void *opaque, Error **errp)
2000 {
2001     RISCVCPU *cpu = RISCV_CPU(obj);
2002     uint16_t value;
2003 
2004     if (!visit_type_uint16(v, name, &value, errp)) {
2005         return;
2006     }
2007 
2008     if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) {
2009         cpu_set_prop_err(cpu, name, errp);
2010         error_append_hint(errp, "Current '%s' val: %u\n",
2011                           name, cpu->cfg.cbop_blocksize);
2012         return;
2013     }
2014 
2015     cpu_option_add_user_setting(name, value);
2016     cpu->cfg.cbop_blocksize = value;
2017 }
2018 
2019 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name,
2020                          void *opaque, Error **errp)
2021 {
2022     uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize;
2023 
2024     visit_type_uint16(v, name, &value, errp);
2025 }
2026 
2027 static const PropertyInfo prop_cbop_blksize = {
2028     .name = "cbop_blocksize",
2029     .get = prop_cbop_blksize_get,
2030     .set = prop_cbop_blksize_set,
2031 };
2032 
2033 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name,
2034                                   void *opaque, Error **errp)
2035 {
2036     RISCVCPU *cpu = RISCV_CPU(obj);
2037     uint16_t value;
2038 
2039     if (!visit_type_uint16(v, name, &value, errp)) {
2040         return;
2041     }
2042 
2043     if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) {
2044         cpu_set_prop_err(cpu, name, errp);
2045         error_append_hint(errp, "Current '%s' val: %u\n",
2046                           name, cpu->cfg.cboz_blocksize);
2047         return;
2048     }
2049 
2050     cpu_option_add_user_setting(name, value);
2051     cpu->cfg.cboz_blocksize = value;
2052 }
2053 
2054 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name,
2055                          void *opaque, Error **errp)
2056 {
2057     uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize;
2058 
2059     visit_type_uint16(v, name, &value, errp);
2060 }
2061 
2062 static const PropertyInfo prop_cboz_blksize = {
2063     .name = "cboz_blocksize",
2064     .get = prop_cboz_blksize_get,
2065     .set = prop_cboz_blksize_set,
2066 };
2067 
2068 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name,
2069                                void *opaque, Error **errp)
2070 {
2071     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2072     RISCVCPU *cpu = RISCV_CPU(obj);
2073     uint32_t prev_val = cpu->cfg.mvendorid;
2074     uint32_t value;
2075 
2076     if (!visit_type_uint32(v, name, &value, errp)) {
2077         return;
2078     }
2079 
2080     if (!dynamic_cpu && prev_val != value) {
2081         error_setg(errp, "Unable to change %s mvendorid (0x%x)",
2082                    object_get_typename(obj), prev_val);
2083         return;
2084     }
2085 
2086     cpu->cfg.mvendorid = value;
2087 }
2088 
2089 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name,
2090                                void *opaque, Error **errp)
2091 {
2092     uint32_t value = RISCV_CPU(obj)->cfg.mvendorid;
2093 
2094     visit_type_uint32(v, name, &value, errp);
2095 }
2096 
2097 static const PropertyInfo prop_mvendorid = {
2098     .name = "mvendorid",
2099     .get = prop_mvendorid_get,
2100     .set = prop_mvendorid_set,
2101 };
2102 
2103 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name,
2104                             void *opaque, Error **errp)
2105 {
2106     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2107     RISCVCPU *cpu = RISCV_CPU(obj);
2108     uint64_t prev_val = cpu->cfg.mimpid;
2109     uint64_t value;
2110 
2111     if (!visit_type_uint64(v, name, &value, errp)) {
2112         return;
2113     }
2114 
2115     if (!dynamic_cpu && prev_val != value) {
2116         error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")",
2117                    object_get_typename(obj), prev_val);
2118         return;
2119     }
2120 
2121     cpu->cfg.mimpid = value;
2122 }
2123 
2124 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name,
2125                             void *opaque, Error **errp)
2126 {
2127     uint64_t value = RISCV_CPU(obj)->cfg.mimpid;
2128 
2129     visit_type_uint64(v, name, &value, errp);
2130 }
2131 
2132 static const PropertyInfo prop_mimpid = {
2133     .name = "mimpid",
2134     .get = prop_mimpid_get,
2135     .set = prop_mimpid_set,
2136 };
2137 
2138 static void prop_marchid_set(Object *obj, Visitor *v, const char *name,
2139                              void *opaque, Error **errp)
2140 {
2141     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2142     RISCVCPU *cpu = RISCV_CPU(obj);
2143     uint64_t prev_val = cpu->cfg.marchid;
2144     uint64_t value, invalid_val;
2145     uint32_t mxlen = 0;
2146 
2147     if (!visit_type_uint64(v, name, &value, errp)) {
2148         return;
2149     }
2150 
2151     if (!dynamic_cpu && prev_val != value) {
2152         error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")",
2153                    object_get_typename(obj), prev_val);
2154         return;
2155     }
2156 
2157     switch (riscv_cpu_mxl(&cpu->env)) {
2158     case MXL_RV32:
2159         mxlen = 32;
2160         break;
2161     case MXL_RV64:
2162     case MXL_RV128:
2163         mxlen = 64;
2164         break;
2165     default:
2166         g_assert_not_reached();
2167     }
2168 
2169     invalid_val = 1LL << (mxlen - 1);
2170 
2171     if (value == invalid_val) {
2172         error_setg(errp, "Unable to set marchid with MSB (%u) bit set "
2173                          "and the remaining bits zero", mxlen);
2174         return;
2175     }
2176 
2177     cpu->cfg.marchid = value;
2178 }
2179 
2180 static void prop_marchid_get(Object *obj, Visitor *v, const char *name,
2181                              void *opaque, Error **errp)
2182 {
2183     uint64_t value = RISCV_CPU(obj)->cfg.marchid;
2184 
2185     visit_type_uint64(v, name, &value, errp);
2186 }
2187 
2188 static const PropertyInfo prop_marchid = {
2189     .name = "marchid",
2190     .get = prop_marchid_get,
2191     .set = prop_marchid_set,
2192 };
2193 
2194 /*
2195  * RVA22U64 defines some 'named features' that are cache
2196  * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa
2197  * and Zicclsm. They are always implemented in TCG and
2198  * doesn't need to be manually enabled by the profile.
2199  */
2200 static RISCVCPUProfile RVA22U64 = {
2201     .parent = NULL,
2202     .name = "rva22u64",
2203     .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU,
2204     .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
2205     .satp_mode = RISCV_PROFILE_ATTR_UNUSED,
2206     .ext_offsets = {
2207         CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause),
2208         CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb),
2209         CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin),
2210         CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr),
2211         CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom),
2212         CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz),
2213 
2214         /* mandatory named features for this profile */
2215         CPU_CFG_OFFSET(ext_zic64b),
2216 
2217         RISCV_PROFILE_EXT_LIST_END
2218     }
2219 };
2220 
2221 /*
2222  * As with RVA22U64, RVA22S64 also defines 'named features'.
2223  *
2224  * Cache related features that we consider enabled since we don't
2225  * implement cache: Ssccptr
2226  *
2227  * Other named features that we already implement: Sstvecd, Sstvala,
2228  * Sscounterenw
2229  *
2230  * The remaining features/extensions comes from RVA22U64.
2231  */
2232 static RISCVCPUProfile RVA22S64 = {
2233     .parent = &RVA22U64,
2234     .name = "rva22s64",
2235     .misa_ext = RVS,
2236     .priv_spec = PRIV_VERSION_1_12_0,
2237     .satp_mode = VM_1_10_SV39,
2238     .ext_offsets = {
2239         /* rva22s64 exts */
2240         CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt),
2241         CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade),
2242 
2243         RISCV_PROFILE_EXT_LIST_END
2244     }
2245 };
2246 
2247 RISCVCPUProfile *riscv_profiles[] = {
2248     &RVA22U64,
2249     &RVA22S64,
2250     NULL,
2251 };
2252 
2253 static RISCVCPUImpliedExtsRule RVA_IMPLIED = {
2254     .is_misa = true,
2255     .ext = RVA,
2256     .implied_multi_exts = {
2257         CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo),
2258 
2259         RISCV_IMPLIED_EXTS_RULE_END
2260     },
2261 };
2262 
2263 static RISCVCPUImpliedExtsRule RVD_IMPLIED = {
2264     .is_misa = true,
2265     .ext = RVD,
2266     .implied_misa_exts = RVF,
2267     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2268 };
2269 
2270 static RISCVCPUImpliedExtsRule RVF_IMPLIED = {
2271     .is_misa = true,
2272     .ext = RVF,
2273     .implied_multi_exts = {
2274         CPU_CFG_OFFSET(ext_zicsr),
2275 
2276         RISCV_IMPLIED_EXTS_RULE_END
2277     },
2278 };
2279 
2280 static RISCVCPUImpliedExtsRule RVM_IMPLIED = {
2281     .is_misa = true,
2282     .ext = RVM,
2283     .implied_multi_exts = {
2284         CPU_CFG_OFFSET(ext_zmmul),
2285 
2286         RISCV_IMPLIED_EXTS_RULE_END
2287     },
2288 };
2289 
2290 static RISCVCPUImpliedExtsRule RVV_IMPLIED = {
2291     .is_misa = true,
2292     .ext = RVV,
2293     .implied_multi_exts = {
2294         CPU_CFG_OFFSET(ext_zve64d),
2295 
2296         RISCV_IMPLIED_EXTS_RULE_END
2297     },
2298 };
2299 
2300 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = {
2301     .ext = CPU_CFG_OFFSET(ext_zcb),
2302     .implied_multi_exts = {
2303         CPU_CFG_OFFSET(ext_zca),
2304 
2305         RISCV_IMPLIED_EXTS_RULE_END
2306     },
2307 };
2308 
2309 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = {
2310     .ext = CPU_CFG_OFFSET(ext_zcd),
2311     .implied_misa_exts = RVD,
2312     .implied_multi_exts = {
2313         CPU_CFG_OFFSET(ext_zca),
2314 
2315         RISCV_IMPLIED_EXTS_RULE_END
2316     },
2317 };
2318 
2319 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = {
2320     .ext = CPU_CFG_OFFSET(ext_zce),
2321     .implied_multi_exts = {
2322         CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp),
2323         CPU_CFG_OFFSET(ext_zcmt),
2324 
2325         RISCV_IMPLIED_EXTS_RULE_END
2326     },
2327 };
2328 
2329 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = {
2330     .ext = CPU_CFG_OFFSET(ext_zcf),
2331     .implied_misa_exts = RVF,
2332     .implied_multi_exts = {
2333         CPU_CFG_OFFSET(ext_zca),
2334 
2335         RISCV_IMPLIED_EXTS_RULE_END
2336     },
2337 };
2338 
2339 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = {
2340     .ext = CPU_CFG_OFFSET(ext_zcmp),
2341     .implied_multi_exts = {
2342         CPU_CFG_OFFSET(ext_zca),
2343 
2344         RISCV_IMPLIED_EXTS_RULE_END
2345     },
2346 };
2347 
2348 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = {
2349     .ext = CPU_CFG_OFFSET(ext_zcmt),
2350     .implied_multi_exts = {
2351         CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr),
2352 
2353         RISCV_IMPLIED_EXTS_RULE_END
2354     },
2355 };
2356 
2357 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = {
2358     .ext = CPU_CFG_OFFSET(ext_zdinx),
2359     .implied_multi_exts = {
2360         CPU_CFG_OFFSET(ext_zfinx),
2361 
2362         RISCV_IMPLIED_EXTS_RULE_END
2363     },
2364 };
2365 
2366 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = {
2367     .ext = CPU_CFG_OFFSET(ext_zfa),
2368     .implied_misa_exts = RVF,
2369     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2370 };
2371 
2372 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = {
2373     .ext = CPU_CFG_OFFSET(ext_zfbfmin),
2374     .implied_misa_exts = RVF,
2375     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2376 };
2377 
2378 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = {
2379     .ext = CPU_CFG_OFFSET(ext_zfh),
2380     .implied_multi_exts = {
2381         CPU_CFG_OFFSET(ext_zfhmin),
2382 
2383         RISCV_IMPLIED_EXTS_RULE_END
2384     },
2385 };
2386 
2387 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = {
2388     .ext = CPU_CFG_OFFSET(ext_zfhmin),
2389     .implied_misa_exts = RVF,
2390     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2391 };
2392 
2393 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = {
2394     .ext = CPU_CFG_OFFSET(ext_zfinx),
2395     .implied_multi_exts = {
2396         CPU_CFG_OFFSET(ext_zicsr),
2397 
2398         RISCV_IMPLIED_EXTS_RULE_END
2399     },
2400 };
2401 
2402 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = {
2403     .ext = CPU_CFG_OFFSET(ext_zhinx),
2404     .implied_multi_exts = {
2405         CPU_CFG_OFFSET(ext_zhinxmin),
2406 
2407         RISCV_IMPLIED_EXTS_RULE_END
2408     },
2409 };
2410 
2411 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = {
2412     .ext = CPU_CFG_OFFSET(ext_zhinxmin),
2413     .implied_multi_exts = {
2414         CPU_CFG_OFFSET(ext_zfinx),
2415 
2416         RISCV_IMPLIED_EXTS_RULE_END
2417     },
2418 };
2419 
2420 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = {
2421     .ext = CPU_CFG_OFFSET(ext_zicntr),
2422     .implied_multi_exts = {
2423         CPU_CFG_OFFSET(ext_zicsr),
2424 
2425         RISCV_IMPLIED_EXTS_RULE_END
2426     },
2427 };
2428 
2429 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = {
2430     .ext = CPU_CFG_OFFSET(ext_zihpm),
2431     .implied_multi_exts = {
2432         CPU_CFG_OFFSET(ext_zicsr),
2433 
2434         RISCV_IMPLIED_EXTS_RULE_END
2435     },
2436 };
2437 
2438 static RISCVCPUImpliedExtsRule ZK_IMPLIED = {
2439     .ext = CPU_CFG_OFFSET(ext_zk),
2440     .implied_multi_exts = {
2441         CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr),
2442         CPU_CFG_OFFSET(ext_zkt),
2443 
2444         RISCV_IMPLIED_EXTS_RULE_END
2445     },
2446 };
2447 
2448 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = {
2449     .ext = CPU_CFG_OFFSET(ext_zkn),
2450     .implied_multi_exts = {
2451         CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
2452         CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne),
2453         CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh),
2454 
2455         RISCV_IMPLIED_EXTS_RULE_END
2456     },
2457 };
2458 
2459 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = {
2460     .ext = CPU_CFG_OFFSET(ext_zks),
2461     .implied_multi_exts = {
2462         CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
2463         CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed),
2464         CPU_CFG_OFFSET(ext_zksh),
2465 
2466         RISCV_IMPLIED_EXTS_RULE_END
2467     },
2468 };
2469 
2470 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = {
2471     .ext = CPU_CFG_OFFSET(ext_zvbb),
2472     .implied_multi_exts = {
2473         CPU_CFG_OFFSET(ext_zvkb),
2474 
2475         RISCV_IMPLIED_EXTS_RULE_END
2476     },
2477 };
2478 
2479 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = {
2480     .ext = CPU_CFG_OFFSET(ext_zve32f),
2481     .implied_misa_exts = RVF,
2482     .implied_multi_exts = {
2483         CPU_CFG_OFFSET(ext_zve32x),
2484 
2485         RISCV_IMPLIED_EXTS_RULE_END
2486     },
2487 };
2488 
2489 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = {
2490     .ext = CPU_CFG_OFFSET(ext_zve32x),
2491     .implied_multi_exts = {
2492         CPU_CFG_OFFSET(ext_zicsr),
2493 
2494         RISCV_IMPLIED_EXTS_RULE_END
2495     },
2496 };
2497 
2498 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = {
2499     .ext = CPU_CFG_OFFSET(ext_zve64d),
2500     .implied_misa_exts = RVD,
2501     .implied_multi_exts = {
2502         CPU_CFG_OFFSET(ext_zve64f),
2503 
2504         RISCV_IMPLIED_EXTS_RULE_END
2505     },
2506 };
2507 
2508 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = {
2509     .ext = CPU_CFG_OFFSET(ext_zve64f),
2510     .implied_misa_exts = RVF,
2511     .implied_multi_exts = {
2512         CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x),
2513 
2514         RISCV_IMPLIED_EXTS_RULE_END
2515     },
2516 };
2517 
2518 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = {
2519     .ext = CPU_CFG_OFFSET(ext_zve64x),
2520     .implied_multi_exts = {
2521         CPU_CFG_OFFSET(ext_zve32x),
2522 
2523         RISCV_IMPLIED_EXTS_RULE_END
2524     },
2525 };
2526 
2527 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = {
2528     .ext = CPU_CFG_OFFSET(ext_zvfbfmin),
2529     .implied_multi_exts = {
2530         CPU_CFG_OFFSET(ext_zve32f),
2531 
2532         RISCV_IMPLIED_EXTS_RULE_END
2533     },
2534 };
2535 
2536 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = {
2537     .ext = CPU_CFG_OFFSET(ext_zvfbfwma),
2538     .implied_multi_exts = {
2539         CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin),
2540 
2541         RISCV_IMPLIED_EXTS_RULE_END
2542     },
2543 };
2544 
2545 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = {
2546     .ext = CPU_CFG_OFFSET(ext_zvfh),
2547     .implied_multi_exts = {
2548         CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin),
2549 
2550         RISCV_IMPLIED_EXTS_RULE_END
2551     },
2552 };
2553 
2554 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = {
2555     .ext = CPU_CFG_OFFSET(ext_zvfhmin),
2556     .implied_multi_exts = {
2557         CPU_CFG_OFFSET(ext_zve32f),
2558 
2559         RISCV_IMPLIED_EXTS_RULE_END
2560     },
2561 };
2562 
2563 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = {
2564     .ext = CPU_CFG_OFFSET(ext_zvkn),
2565     .implied_multi_exts = {
2566         CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb),
2567         CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
2568 
2569         RISCV_IMPLIED_EXTS_RULE_END
2570     },
2571 };
2572 
2573 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = {
2574     .ext = CPU_CFG_OFFSET(ext_zvknc),
2575     .implied_multi_exts = {
2576         CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc),
2577 
2578         RISCV_IMPLIED_EXTS_RULE_END
2579     },
2580 };
2581 
2582 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = {
2583     .ext = CPU_CFG_OFFSET(ext_zvkng),
2584     .implied_multi_exts = {
2585         CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg),
2586 
2587         RISCV_IMPLIED_EXTS_RULE_END
2588     },
2589 };
2590 
2591 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = {
2592     .ext = CPU_CFG_OFFSET(ext_zvknhb),
2593     .implied_multi_exts = {
2594         CPU_CFG_OFFSET(ext_zve64x),
2595 
2596         RISCV_IMPLIED_EXTS_RULE_END
2597     },
2598 };
2599 
2600 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = {
2601     .ext = CPU_CFG_OFFSET(ext_zvks),
2602     .implied_multi_exts = {
2603         CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh),
2604         CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
2605 
2606         RISCV_IMPLIED_EXTS_RULE_END
2607     },
2608 };
2609 
2610 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = {
2611     .ext = CPU_CFG_OFFSET(ext_zvksc),
2612     .implied_multi_exts = {
2613         CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc),
2614 
2615         RISCV_IMPLIED_EXTS_RULE_END
2616     },
2617 };
2618 
2619 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = {
2620     .ext = CPU_CFG_OFFSET(ext_zvksg),
2621     .implied_multi_exts = {
2622         CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg),
2623 
2624         RISCV_IMPLIED_EXTS_RULE_END
2625     },
2626 };
2627 
2628 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = {
2629     &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED,
2630     &RVM_IMPLIED, &RVV_IMPLIED, NULL
2631 };
2632 
2633 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = {
2634     &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED,
2635     &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED,
2636     &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED,
2637     &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED,
2638     &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED,
2639     &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED,
2640     &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED,
2641     &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED,
2642     &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED,
2643     &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED,
2644     &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED,
2645     &ZVKS_IMPLIED,  &ZVKSC_IMPLIED, &ZVKSG_IMPLIED,
2646     NULL
2647 };
2648 
2649 static Property riscv_cpu_properties[] = {
2650     DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
2651 
2652     {.name = "pmu-mask", .info = &prop_pmu_mask},
2653     {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */
2654 
2655     {.name = "mmu", .info = &prop_mmu},
2656     {.name = "pmp", .info = &prop_pmp},
2657 
2658     {.name = "priv_spec", .info = &prop_priv_spec},
2659     {.name = "vext_spec", .info = &prop_vext_spec},
2660 
2661     {.name = "vlen", .info = &prop_vlen},
2662     {.name = "elen", .info = &prop_elen},
2663 
2664     {.name = "cbom_blocksize", .info = &prop_cbom_blksize},
2665     {.name = "cbop_blocksize", .info = &prop_cbop_blksize},
2666     {.name = "cboz_blocksize", .info = &prop_cboz_blksize},
2667 
2668      {.name = "mvendorid", .info = &prop_mvendorid},
2669      {.name = "mimpid", .info = &prop_mimpid},
2670      {.name = "marchid", .info = &prop_marchid},
2671 
2672 #ifndef CONFIG_USER_ONLY
2673     DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
2674 #endif
2675 
2676     DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
2677 
2678     DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
2679     DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
2680 
2681     /*
2682      * write_misa() is marked as experimental for now so mark
2683      * it with -x and default to 'false'.
2684      */
2685     DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
2686     DEFINE_PROP_END_OF_LIST(),
2687 };
2688 
2689 #if defined(TARGET_RISCV64)
2690 static void rva22u64_profile_cpu_init(Object *obj)
2691 {
2692     rv64i_bare_cpu_init(obj);
2693 
2694     RVA22U64.enabled = true;
2695 }
2696 
2697 static void rva22s64_profile_cpu_init(Object *obj)
2698 {
2699     rv64i_bare_cpu_init(obj);
2700 
2701     RVA22S64.enabled = true;
2702 }
2703 #endif
2704 
2705 static const gchar *riscv_gdb_arch_name(CPUState *cs)
2706 {
2707     RISCVCPU *cpu = RISCV_CPU(cs);
2708     CPURISCVState *env = &cpu->env;
2709 
2710     switch (riscv_cpu_mxl(env)) {
2711     case MXL_RV32:
2712         return "riscv:rv32";
2713     case MXL_RV64:
2714     case MXL_RV128:
2715         return "riscv:rv64";
2716     default:
2717         g_assert_not_reached();
2718     }
2719 }
2720 
2721 #ifndef CONFIG_USER_ONLY
2722 static int64_t riscv_get_arch_id(CPUState *cs)
2723 {
2724     RISCVCPU *cpu = RISCV_CPU(cs);
2725 
2726     return cpu->env.mhartid;
2727 }
2728 
2729 #include "hw/core/sysemu-cpu-ops.h"
2730 
2731 static const struct SysemuCPUOps riscv_sysemu_ops = {
2732     .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
2733     .write_elf64_note = riscv_cpu_write_elf64_note,
2734     .write_elf32_note = riscv_cpu_write_elf32_note,
2735     .legacy_vmsd = &vmstate_riscv_cpu,
2736 };
2737 #endif
2738 
2739 static void riscv_cpu_common_class_init(ObjectClass *c, void *data)
2740 {
2741     RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2742     CPUClass *cc = CPU_CLASS(c);
2743     DeviceClass *dc = DEVICE_CLASS(c);
2744     ResettableClass *rc = RESETTABLE_CLASS(c);
2745 
2746     device_class_set_parent_realize(dc, riscv_cpu_realize,
2747                                     &mcc->parent_realize);
2748 
2749     resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL,
2750                                        &mcc->parent_phases);
2751 
2752     cc->class_by_name = riscv_cpu_class_by_name;
2753     cc->has_work = riscv_cpu_has_work;
2754     cc->mmu_index = riscv_cpu_mmu_index;
2755     cc->dump_state = riscv_cpu_dump_state;
2756     cc->set_pc = riscv_cpu_set_pc;
2757     cc->get_pc = riscv_cpu_get_pc;
2758     cc->gdb_read_register = riscv_cpu_gdb_read_register;
2759     cc->gdb_write_register = riscv_cpu_gdb_write_register;
2760     cc->gdb_stop_before_watchpoint = true;
2761     cc->disas_set_info = riscv_cpu_disas_set_info;
2762 #ifndef CONFIG_USER_ONLY
2763     cc->sysemu_ops = &riscv_sysemu_ops;
2764     cc->get_arch_id = riscv_get_arch_id;
2765 #endif
2766     cc->gdb_arch_name = riscv_gdb_arch_name;
2767 
2768     device_class_set_props(dc, riscv_cpu_properties);
2769 }
2770 
2771 static void riscv_cpu_class_init(ObjectClass *c, void *data)
2772 {
2773     RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2774 
2775     mcc->misa_mxl_max = (uint32_t)(uintptr_t)data;
2776     riscv_cpu_validate_misa_mxl(mcc);
2777 }
2778 
2779 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
2780                                  int max_str_len)
2781 {
2782     const RISCVIsaExtData *edata;
2783     char *old = *isa_str;
2784     char *new = *isa_str;
2785 
2786     for (edata = isa_edata_arr; edata && edata->name; edata++) {
2787         if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2788             new = g_strconcat(old, "_", edata->name, NULL);
2789             g_free(old);
2790             old = new;
2791         }
2792     }
2793 
2794     *isa_str = new;
2795 }
2796 
2797 char *riscv_isa_string(RISCVCPU *cpu)
2798 {
2799     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
2800     int i;
2801     const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
2802     char *isa_str = g_new(char, maxlen);
2803     int xlen = riscv_cpu_max_xlen(mcc);
2804     char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen);
2805 
2806     for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2807         if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2808             *p++ = qemu_tolower(riscv_single_letter_exts[i]);
2809         }
2810     }
2811     *p = '\0';
2812     if (!cpu->cfg.short_isa_string) {
2813         riscv_isa_string_ext(cpu, &isa_str, maxlen);
2814     }
2815     return isa_str;
2816 }
2817 
2818 #ifndef CONFIG_USER_ONLY
2819 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count)
2820 {
2821     int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr);
2822     char **extensions = g_new(char *, maxlen);
2823 
2824     for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2825         if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2826             extensions[*count] = g_new(char, 2);
2827             snprintf(extensions[*count], 2, "%c",
2828                      qemu_tolower(riscv_single_letter_exts[i]));
2829             (*count)++;
2830         }
2831     }
2832 
2833     for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) {
2834         if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2835             extensions[*count] = g_strdup(edata->name);
2836             (*count)++;
2837         }
2838     }
2839 
2840     return extensions;
2841 }
2842 
2843 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename)
2844 {
2845     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
2846     const size_t maxlen = sizeof("rv128i");
2847     g_autofree char *isa_base = g_new(char, maxlen);
2848     g_autofree char *riscv_isa;
2849     char **isa_extensions;
2850     int count = 0;
2851     int xlen = riscv_cpu_max_xlen(mcc);
2852 
2853     riscv_isa = riscv_isa_string(cpu);
2854     qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa);
2855 
2856     snprintf(isa_base, maxlen, "rv%di", xlen);
2857     qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base);
2858 
2859     isa_extensions = riscv_isa_extensions_list(cpu, &count);
2860     qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions",
2861                                   isa_extensions, count);
2862 
2863     for (int i = 0; i < count; i++) {
2864         g_free(isa_extensions[i]);
2865     }
2866 
2867     g_free(isa_extensions);
2868 }
2869 #endif
2870 
2871 #define DEFINE_CPU(type_name, misa_mxl_max, initfn)         \
2872     {                                                       \
2873         .name = (type_name),                                \
2874         .parent = TYPE_RISCV_CPU,                           \
2875         .instance_init = (initfn),                          \
2876         .class_init = riscv_cpu_class_init,                 \
2877         .class_data = (void *)(misa_mxl_max)                \
2878     }
2879 
2880 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \
2881     {                                                       \
2882         .name = (type_name),                                \
2883         .parent = TYPE_RISCV_DYNAMIC_CPU,                   \
2884         .instance_init = (initfn),                          \
2885         .class_init = riscv_cpu_class_init,                 \
2886         .class_data = (void *)(misa_mxl_max)                \
2887     }
2888 
2889 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn)  \
2890     {                                                       \
2891         .name = (type_name),                                \
2892         .parent = TYPE_RISCV_VENDOR_CPU,                    \
2893         .instance_init = (initfn),                          \
2894         .class_init = riscv_cpu_class_init,                 \
2895         .class_data = (void *)(misa_mxl_max)                \
2896     }
2897 
2898 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn)    \
2899     {                                                       \
2900         .name = (type_name),                                \
2901         .parent = TYPE_RISCV_BARE_CPU,                      \
2902         .instance_init = (initfn),                          \
2903         .class_init = riscv_cpu_class_init,                 \
2904         .class_data = (void *)(misa_mxl_max)                \
2905     }
2906 
2907 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \
2908     {                                                       \
2909         .name = (type_name),                                \
2910         .parent = TYPE_RISCV_BARE_CPU,                      \
2911         .instance_init = (initfn),                          \
2912         .class_init = riscv_cpu_class_init,                 \
2913         .class_data = (void *)(misa_mxl_max)                \
2914     }
2915 
2916 static const TypeInfo riscv_cpu_type_infos[] = {
2917     {
2918         .name = TYPE_RISCV_CPU,
2919         .parent = TYPE_CPU,
2920         .instance_size = sizeof(RISCVCPU),
2921         .instance_align = __alignof(RISCVCPU),
2922         .instance_init = riscv_cpu_init,
2923         .instance_post_init = riscv_cpu_post_init,
2924         .abstract = true,
2925         .class_size = sizeof(RISCVCPUClass),
2926         .class_init = riscv_cpu_common_class_init,
2927     },
2928     {
2929         .name = TYPE_RISCV_DYNAMIC_CPU,
2930         .parent = TYPE_RISCV_CPU,
2931         .abstract = true,
2932     },
2933     {
2934         .name = TYPE_RISCV_VENDOR_CPU,
2935         .parent = TYPE_RISCV_CPU,
2936         .abstract = true,
2937     },
2938     {
2939         .name = TYPE_RISCV_BARE_CPU,
2940         .parent = TYPE_RISCV_CPU,
2941         .instance_init = riscv_bare_cpu_init,
2942         .abstract = true,
2943     },
2944 #if defined(TARGET_RISCV32)
2945     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY,       MXL_RV32,  riscv_any_cpu_init),
2946     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX,       MXL_RV32,  riscv_max_cpu_init),
2947     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32,    MXL_RV32,  rv32_base_cpu_init),
2948     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX,       MXL_RV32,  rv32_ibex_cpu_init),
2949     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32,  rv32_sifive_e_cpu_init),
2950     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32,  rv32_imafcu_nommu_cpu_init),
2951     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32,  rv32_sifive_u_cpu_init),
2952     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I,        MXL_RV32,  rv32i_bare_cpu_init),
2953     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E,        MXL_RV32,  rv32e_bare_cpu_init),
2954 #elif defined(TARGET_RISCV64)
2955     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY,       MXL_RV64,  riscv_any_cpu_init),
2956     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX,       MXL_RV64,  riscv_max_cpu_init),
2957     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64,    MXL_RV64,  rv64_base_cpu_init),
2958     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64,  rv64_sifive_e_cpu_init),
2959     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64,  rv64_sifive_u_cpu_init),
2960     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C,   MXL_RV64,  rv64_sifive_u_cpu_init),
2961     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64,  rv64_thead_c906_cpu_init),
2962     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1,  MXL_RV64,  rv64_veyron_v1_cpu_init),
2963 #ifdef CONFIG_TCG
2964     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128,   MXL_RV128, rv128_base_cpu_init),
2965 #endif /* CONFIG_TCG */
2966     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I,        MXL_RV64,  rv64i_bare_cpu_init),
2967     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E,        MXL_RV64,  rv64e_bare_cpu_init),
2968     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64,  MXL_RV64,  rva22u64_profile_cpu_init),
2969     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64,  MXL_RV64,  rva22s64_profile_cpu_init),
2970 #endif /* TARGET_RISCV64 */
2971 };
2972 
2973 DEFINE_TYPES(riscv_cpu_type_infos)
2974