xref: /openbmc/qemu/target/riscv/cpu.c (revision 67e98eba)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "cpu_vendorid.h"
26 #include "internals.h"
27 #include "exec/exec-all.h"
28 #include "qapi/error.h"
29 #include "qapi/visitor.h"
30 #include "qemu/error-report.h"
31 #include "hw/qdev-properties.h"
32 #include "hw/core/qdev-prop-internal.h"
33 #include "migration/vmstate.h"
34 #include "fpu/softfloat-helpers.h"
35 #include "sysemu/device_tree.h"
36 #include "sysemu/kvm.h"
37 #include "sysemu/tcg.h"
38 #include "kvm/kvm_riscv.h"
39 #include "tcg/tcg-cpu.h"
40 #include "tcg/tcg.h"
41 
42 /* RISC-V CPU definitions */
43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH";
44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
45                               RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0};
46 
47 /*
48  * From vector_helper.c
49  * Note that vector data is stored in host-endian 64-bit chunks,
50  * so addressing bytes needs a host-endian fixup.
51  */
52 #if HOST_BIG_ENDIAN
53 #define BYTE(x)   ((x) ^ 7)
54 #else
55 #define BYTE(x)   (x)
56 #endif
57 
58 bool riscv_cpu_is_32bit(RISCVCPU *cpu)
59 {
60     return riscv_cpu_mxl(&cpu->env) == MXL_RV32;
61 }
62 
63 /* Hash that stores general user set numeric options */
64 static GHashTable *general_user_opts;
65 
66 static void cpu_option_add_user_setting(const char *optname, uint32_t value)
67 {
68     g_hash_table_insert(general_user_opts, (gpointer)optname,
69                         GUINT_TO_POINTER(value));
70 }
71 
72 bool riscv_cpu_option_set(const char *optname)
73 {
74     return g_hash_table_contains(general_user_opts, optname);
75 }
76 
77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
78     {#_name, _min_ver, CPU_CFG_OFFSET(_prop)}
79 
80 /*
81  * Here are the ordering rules of extension naming defined by RISC-V
82  * specification :
83  * 1. All extensions should be separated from other multi-letter extensions
84  *    by an underscore.
85  * 2. The first letter following the 'Z' conventionally indicates the most
86  *    closely related alphabetical extension category, IMAFDQLCBKJTPVH.
87  *    If multiple 'Z' extensions are named, they should be ordered first
88  *    by category, then alphabetically within a category.
89  * 3. Standard supervisor-level extensions (starts with 'S') should be
90  *    listed after standard unprivileged extensions.  If multiple
91  *    supervisor-level extensions are listed, they should be ordered
92  *    alphabetically.
93  * 4. Non-standard extensions (starts with 'X') must be listed after all
94  *    standard extensions. They must be separated from other multi-letter
95  *    extensions by an underscore.
96  *
97  * Single letter extensions are checked in riscv_cpu_validate_misa_priv()
98  * instead.
99  */
100 const RISCVIsaExtData isa_edata_arr[] = {
101     ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b),
102     ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom),
103     ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop),
104     ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz),
105     ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11),
106     ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11),
107     ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11),
108     ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11),
109     ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
110     ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr),
111     ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr),
112     ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei),
113     ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl),
114     ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
115     ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm),
116     ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop),
117     ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
118     ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_11),
119     ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo),
120     ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
121     ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc),
122     ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
123     ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
124     ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
125     ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh),
126     ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin),
127     ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx),
128     ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx),
129     ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca),
130     ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb),
131     ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf),
132     ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd),
133     ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce),
134     ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop),
135     ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp),
136     ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt),
137     ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba),
138     ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb),
139     ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc),
140     ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb),
141     ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc),
142     ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx),
143     ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs),
144     ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk),
145     ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn),
146     ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd),
147     ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne),
148     ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh),
149     ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr),
150     ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks),
151     ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
152     ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
153     ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
154     ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso),
155     ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb),
156     ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
157     ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
158     ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x),
159     ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
160     ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
161     ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x),
162     ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin),
163     ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
164     ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
165     ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
166     ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb),
167     ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg),
168     ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn),
169     ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc),
170     ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
171     ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng),
172     ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
173     ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
174     ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks),
175     ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc),
176     ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed),
177     ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg),
178     ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
179     ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt),
180     ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
181     ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
182     ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
183     ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
184     ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
185     ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
186     ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11),
187     ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
188     ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
189     ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
190     ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
191     ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
192     ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade),
193     ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
194     ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
195     ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
196     ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt),
197     ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba),
198     ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb),
199     ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs),
200     ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo),
201     ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov),
202     ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx),
203     ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv),
204     ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac),
205     ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx),
206     ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair),
207     ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync),
208     ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
209 
210     DEFINE_PROP_END_OF_LIST(),
211 };
212 
213 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset)
214 {
215     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
216 
217     return *ext_enabled;
218 }
219 
220 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en)
221 {
222     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
223 
224     *ext_enabled = en;
225 }
226 
227 bool riscv_cpu_is_vendor(Object *cpu_obj)
228 {
229     return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL;
230 }
231 
232 const char * const riscv_int_regnames[] = {
233     "x0/zero", "x1/ra",  "x2/sp",  "x3/gp",  "x4/tp",  "x5/t0",   "x6/t1",
234     "x7/t2",   "x8/s0",  "x9/s1",  "x10/a0", "x11/a1", "x12/a2",  "x13/a3",
235     "x14/a4",  "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3",  "x20/s4",
236     "x21/s5",  "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
237     "x28/t3",  "x29/t4", "x30/t5", "x31/t6"
238 };
239 
240 const char * const riscv_int_regnamesh[] = {
241     "x0h/zeroh", "x1h/rah",  "x2h/sph",   "x3h/gph",   "x4h/tph",  "x5h/t0h",
242     "x6h/t1h",   "x7h/t2h",  "x8h/s0h",   "x9h/s1h",   "x10h/a0h", "x11h/a1h",
243     "x12h/a2h",  "x13h/a3h", "x14h/a4h",  "x15h/a5h",  "x16h/a6h", "x17h/a7h",
244     "x18h/s2h",  "x19h/s3h", "x20h/s4h",  "x21h/s5h",  "x22h/s6h", "x23h/s7h",
245     "x24h/s8h",  "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
246     "x30h/t5h",  "x31h/t6h"
247 };
248 
249 const char * const riscv_fpr_regnames[] = {
250     "f0/ft0",   "f1/ft1",  "f2/ft2",   "f3/ft3",   "f4/ft4",  "f5/ft5",
251     "f6/ft6",   "f7/ft7",  "f8/fs0",   "f9/fs1",   "f10/fa0", "f11/fa1",
252     "f12/fa2",  "f13/fa3", "f14/fa4",  "f15/fa5",  "f16/fa6", "f17/fa7",
253     "f18/fs2",  "f19/fs3", "f20/fs4",  "f21/fs5",  "f22/fs6", "f23/fs7",
254     "f24/fs8",  "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
255     "f30/ft10", "f31/ft11"
256 };
257 
258 const char * const riscv_rvv_regnames[] = {
259   "v0",  "v1",  "v2",  "v3",  "v4",  "v5",  "v6",
260   "v7",  "v8",  "v9",  "v10", "v11", "v12", "v13",
261   "v14", "v15", "v16", "v17", "v18", "v19", "v20",
262   "v21", "v22", "v23", "v24", "v25", "v26", "v27",
263   "v28", "v29", "v30", "v31"
264 };
265 
266 static const char * const riscv_excp_names[] = {
267     "misaligned_fetch",
268     "fault_fetch",
269     "illegal_instruction",
270     "breakpoint",
271     "misaligned_load",
272     "fault_load",
273     "misaligned_store",
274     "fault_store",
275     "user_ecall",
276     "supervisor_ecall",
277     "hypervisor_ecall",
278     "machine_ecall",
279     "exec_page_fault",
280     "load_page_fault",
281     "reserved",
282     "store_page_fault",
283     "reserved",
284     "reserved",
285     "reserved",
286     "reserved",
287     "guest_exec_page_fault",
288     "guest_load_page_fault",
289     "reserved",
290     "guest_store_page_fault",
291 };
292 
293 static const char * const riscv_intr_names[] = {
294     "u_software",
295     "s_software",
296     "vs_software",
297     "m_software",
298     "u_timer",
299     "s_timer",
300     "vs_timer",
301     "m_timer",
302     "u_external",
303     "s_external",
304     "vs_external",
305     "m_external",
306     "reserved",
307     "reserved",
308     "reserved",
309     "reserved"
310 };
311 
312 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
313 {
314     if (async) {
315         return (cause < ARRAY_SIZE(riscv_intr_names)) ?
316                riscv_intr_names[cause] : "(unknown)";
317     } else {
318         return (cause < ARRAY_SIZE(riscv_excp_names)) ?
319                riscv_excp_names[cause] : "(unknown)";
320     }
321 }
322 
323 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext)
324 {
325     env->misa_ext_mask = env->misa_ext = ext;
326 }
327 
328 int riscv_cpu_max_xlen(RISCVCPUClass *mcc)
329 {
330     return 16 << mcc->misa_mxl_max;
331 }
332 
333 #ifndef CONFIG_USER_ONLY
334 static uint8_t satp_mode_from_str(const char *satp_mode_str)
335 {
336     if (!strncmp(satp_mode_str, "mbare", 5)) {
337         return VM_1_10_MBARE;
338     }
339 
340     if (!strncmp(satp_mode_str, "sv32", 4)) {
341         return VM_1_10_SV32;
342     }
343 
344     if (!strncmp(satp_mode_str, "sv39", 4)) {
345         return VM_1_10_SV39;
346     }
347 
348     if (!strncmp(satp_mode_str, "sv48", 4)) {
349         return VM_1_10_SV48;
350     }
351 
352     if (!strncmp(satp_mode_str, "sv57", 4)) {
353         return VM_1_10_SV57;
354     }
355 
356     if (!strncmp(satp_mode_str, "sv64", 4)) {
357         return VM_1_10_SV64;
358     }
359 
360     g_assert_not_reached();
361 }
362 
363 uint8_t satp_mode_max_from_map(uint32_t map)
364 {
365     /*
366      * 'map = 0' will make us return (31 - 32), which C will
367      * happily overflow to UINT_MAX. There's no good result to
368      * return if 'map = 0' (e.g. returning 0 will be ambiguous
369      * with the result for 'map = 1').
370      *
371      * Assert out if map = 0. Callers will have to deal with
372      * it outside of this function.
373      */
374     g_assert(map > 0);
375 
376     /* map here has at least one bit set, so no problem with clz */
377     return 31 - __builtin_clz(map);
378 }
379 
380 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
381 {
382     if (is_32_bit) {
383         switch (satp_mode) {
384         case VM_1_10_SV32:
385             return "sv32";
386         case VM_1_10_MBARE:
387             return "none";
388         }
389     } else {
390         switch (satp_mode) {
391         case VM_1_10_SV64:
392             return "sv64";
393         case VM_1_10_SV57:
394             return "sv57";
395         case VM_1_10_SV48:
396             return "sv48";
397         case VM_1_10_SV39:
398             return "sv39";
399         case VM_1_10_MBARE:
400             return "none";
401         }
402     }
403 
404     g_assert_not_reached();
405 }
406 
407 static void set_satp_mode_max_supported(RISCVCPU *cpu,
408                                         uint8_t satp_mode)
409 {
410     bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
411     const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
412 
413     for (int i = 0; i <= satp_mode; ++i) {
414         if (valid_vm[i]) {
415             cpu->cfg.satp_mode.supported |= (1 << i);
416         }
417     }
418 }
419 
420 /* Set the satp mode to the max supported */
421 static void set_satp_mode_default_map(RISCVCPU *cpu)
422 {
423     /*
424      * Bare CPUs do not default to the max available.
425      * Users must set a valid satp_mode in the command
426      * line.
427      */
428     if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) {
429         warn_report("No satp mode set. Defaulting to 'bare'");
430         cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE);
431         return;
432     }
433 
434     cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
435 }
436 #endif
437 
438 static void riscv_any_cpu_init(Object *obj)
439 {
440     RISCVCPU *cpu = RISCV_CPU(obj);
441     CPURISCVState *env = &cpu->env;
442     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
443 
444 #ifndef CONFIG_USER_ONLY
445     set_satp_mode_max_supported(RISCV_CPU(obj),
446         riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ?
447         VM_1_10_SV32 : VM_1_10_SV57);
448 #endif
449 
450     env->priv_ver = PRIV_VERSION_LATEST;
451 
452     /* inherited from parent obj via riscv_cpu_init() */
453     cpu->cfg.ext_zifencei = true;
454     cpu->cfg.ext_zicsr = true;
455     cpu->cfg.mmu = true;
456     cpu->cfg.pmp = true;
457 }
458 
459 static void riscv_max_cpu_init(Object *obj)
460 {
461     RISCVCPU *cpu = RISCV_CPU(obj);
462     CPURISCVState *env = &cpu->env;
463 
464     cpu->cfg.mmu = true;
465     cpu->cfg.pmp = true;
466 
467     env->priv_ver = PRIV_VERSION_LATEST;
468 #ifndef CONFIG_USER_ONLY
469 #ifdef TARGET_RISCV32
470     set_satp_mode_max_supported(cpu, VM_1_10_SV32);
471 #else
472     set_satp_mode_max_supported(cpu, VM_1_10_SV57);
473 #endif
474 #endif
475 }
476 
477 #if defined(TARGET_RISCV64)
478 static void rv64_base_cpu_init(Object *obj)
479 {
480     RISCVCPU *cpu = RISCV_CPU(obj);
481     CPURISCVState *env = &cpu->env;
482 
483     cpu->cfg.mmu = true;
484     cpu->cfg.pmp = true;
485 
486     /* Set latest version of privileged specification */
487     env->priv_ver = PRIV_VERSION_LATEST;
488 #ifndef CONFIG_USER_ONLY
489     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
490 #endif
491 }
492 
493 static void rv64_sifive_u_cpu_init(Object *obj)
494 {
495     RISCVCPU *cpu = RISCV_CPU(obj);
496     CPURISCVState *env = &cpu->env;
497     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
498     env->priv_ver = PRIV_VERSION_1_10_0;
499 #ifndef CONFIG_USER_ONLY
500     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39);
501 #endif
502 
503     /* inherited from parent obj via riscv_cpu_init() */
504     cpu->cfg.ext_zifencei = true;
505     cpu->cfg.ext_zicsr = true;
506     cpu->cfg.mmu = true;
507     cpu->cfg.pmp = true;
508 }
509 
510 static void rv64_sifive_e_cpu_init(Object *obj)
511 {
512     CPURISCVState *env = &RISCV_CPU(obj)->env;
513     RISCVCPU *cpu = RISCV_CPU(obj);
514 
515     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
516     env->priv_ver = PRIV_VERSION_1_10_0;
517 #ifndef CONFIG_USER_ONLY
518     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
519 #endif
520 
521     /* inherited from parent obj via riscv_cpu_init() */
522     cpu->cfg.ext_zifencei = true;
523     cpu->cfg.ext_zicsr = true;
524     cpu->cfg.pmp = true;
525 }
526 
527 static void rv64_thead_c906_cpu_init(Object *obj)
528 {
529     CPURISCVState *env = &RISCV_CPU(obj)->env;
530     RISCVCPU *cpu = RISCV_CPU(obj);
531 
532     riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU);
533     env->priv_ver = PRIV_VERSION_1_11_0;
534 
535     cpu->cfg.ext_zfa = true;
536     cpu->cfg.ext_zfh = true;
537     cpu->cfg.mmu = true;
538     cpu->cfg.ext_xtheadba = true;
539     cpu->cfg.ext_xtheadbb = true;
540     cpu->cfg.ext_xtheadbs = true;
541     cpu->cfg.ext_xtheadcmo = true;
542     cpu->cfg.ext_xtheadcondmov = true;
543     cpu->cfg.ext_xtheadfmemidx = true;
544     cpu->cfg.ext_xtheadmac = true;
545     cpu->cfg.ext_xtheadmemidx = true;
546     cpu->cfg.ext_xtheadmempair = true;
547     cpu->cfg.ext_xtheadsync = true;
548 
549     cpu->cfg.mvendorid = THEAD_VENDOR_ID;
550 #ifndef CONFIG_USER_ONLY
551     set_satp_mode_max_supported(cpu, VM_1_10_SV39);
552     th_register_custom_csrs(cpu);
553 #endif
554 
555     /* inherited from parent obj via riscv_cpu_init() */
556     cpu->cfg.pmp = true;
557 }
558 
559 static void rv64_veyron_v1_cpu_init(Object *obj)
560 {
561     CPURISCVState *env = &RISCV_CPU(obj)->env;
562     RISCVCPU *cpu = RISCV_CPU(obj);
563 
564     riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH);
565     env->priv_ver = PRIV_VERSION_1_12_0;
566 
567     /* Enable ISA extensions */
568     cpu->cfg.mmu = true;
569     cpu->cfg.ext_zifencei = true;
570     cpu->cfg.ext_zicsr = true;
571     cpu->cfg.pmp = true;
572     cpu->cfg.ext_zicbom = true;
573     cpu->cfg.cbom_blocksize = 64;
574     cpu->cfg.cboz_blocksize = 64;
575     cpu->cfg.ext_zicboz = true;
576     cpu->cfg.ext_smaia = true;
577     cpu->cfg.ext_ssaia = true;
578     cpu->cfg.ext_sscofpmf = true;
579     cpu->cfg.ext_sstc = true;
580     cpu->cfg.ext_svinval = true;
581     cpu->cfg.ext_svnapot = true;
582     cpu->cfg.ext_svpbmt = true;
583     cpu->cfg.ext_smstateen = true;
584     cpu->cfg.ext_zba = true;
585     cpu->cfg.ext_zbb = true;
586     cpu->cfg.ext_zbc = true;
587     cpu->cfg.ext_zbs = true;
588     cpu->cfg.ext_XVentanaCondOps = true;
589 
590     cpu->cfg.mvendorid = VEYRON_V1_MVENDORID;
591     cpu->cfg.marchid = VEYRON_V1_MARCHID;
592     cpu->cfg.mimpid = VEYRON_V1_MIMPID;
593 
594 #ifndef CONFIG_USER_ONLY
595     set_satp_mode_max_supported(cpu, VM_1_10_SV48);
596 #endif
597 }
598 
599 #ifdef CONFIG_TCG
600 static void rv128_base_cpu_init(Object *obj)
601 {
602     RISCVCPU *cpu = RISCV_CPU(obj);
603     CPURISCVState *env = &cpu->env;
604 
605     if (qemu_tcg_mttcg_enabled()) {
606         /* Missing 128-bit aligned atomics */
607         error_report("128-bit RISC-V currently does not work with Multi "
608                      "Threaded TCG. Please use: -accel tcg,thread=single");
609         exit(EXIT_FAILURE);
610     }
611 
612     cpu->cfg.mmu = true;
613     cpu->cfg.pmp = true;
614 
615     /* Set latest version of privileged specification */
616     env->priv_ver = PRIV_VERSION_LATEST;
617 #ifndef CONFIG_USER_ONLY
618     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
619 #endif
620 }
621 #endif /* CONFIG_TCG */
622 
623 static void rv64i_bare_cpu_init(Object *obj)
624 {
625     CPURISCVState *env = &RISCV_CPU(obj)->env;
626     riscv_cpu_set_misa_ext(env, RVI);
627 }
628 
629 static void rv64e_bare_cpu_init(Object *obj)
630 {
631     CPURISCVState *env = &RISCV_CPU(obj)->env;
632     riscv_cpu_set_misa_ext(env, RVE);
633 }
634 
635 #else /* !TARGET_RISCV64 */
636 
637 static void rv32_base_cpu_init(Object *obj)
638 {
639     RISCVCPU *cpu = RISCV_CPU(obj);
640     CPURISCVState *env = &cpu->env;
641 
642     cpu->cfg.mmu = true;
643     cpu->cfg.pmp = true;
644 
645     /* Set latest version of privileged specification */
646     env->priv_ver = PRIV_VERSION_LATEST;
647 #ifndef CONFIG_USER_ONLY
648     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
649 #endif
650 }
651 
652 static void rv32_sifive_u_cpu_init(Object *obj)
653 {
654     RISCVCPU *cpu = RISCV_CPU(obj);
655     CPURISCVState *env = &cpu->env;
656     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
657     env->priv_ver = PRIV_VERSION_1_10_0;
658 #ifndef CONFIG_USER_ONLY
659     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
660 #endif
661 
662     /* inherited from parent obj via riscv_cpu_init() */
663     cpu->cfg.ext_zifencei = true;
664     cpu->cfg.ext_zicsr = true;
665     cpu->cfg.mmu = true;
666     cpu->cfg.pmp = true;
667 }
668 
669 static void rv32_sifive_e_cpu_init(Object *obj)
670 {
671     CPURISCVState *env = &RISCV_CPU(obj)->env;
672     RISCVCPU *cpu = RISCV_CPU(obj);
673 
674     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
675     env->priv_ver = PRIV_VERSION_1_10_0;
676 #ifndef CONFIG_USER_ONLY
677     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
678 #endif
679 
680     /* inherited from parent obj via riscv_cpu_init() */
681     cpu->cfg.ext_zifencei = true;
682     cpu->cfg.ext_zicsr = true;
683     cpu->cfg.pmp = true;
684 }
685 
686 static void rv32_ibex_cpu_init(Object *obj)
687 {
688     CPURISCVState *env = &RISCV_CPU(obj)->env;
689     RISCVCPU *cpu = RISCV_CPU(obj);
690 
691     riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU);
692     env->priv_ver = PRIV_VERSION_1_12_0;
693 #ifndef CONFIG_USER_ONLY
694     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
695 #endif
696     /* inherited from parent obj via riscv_cpu_init() */
697     cpu->cfg.ext_zifencei = true;
698     cpu->cfg.ext_zicsr = true;
699     cpu->cfg.pmp = true;
700     cpu->cfg.ext_smepmp = true;
701 }
702 
703 static void rv32_imafcu_nommu_cpu_init(Object *obj)
704 {
705     CPURISCVState *env = &RISCV_CPU(obj)->env;
706     RISCVCPU *cpu = RISCV_CPU(obj);
707 
708     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU);
709     env->priv_ver = PRIV_VERSION_1_10_0;
710 #ifndef CONFIG_USER_ONLY
711     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
712 #endif
713 
714     /* inherited from parent obj via riscv_cpu_init() */
715     cpu->cfg.ext_zifencei = true;
716     cpu->cfg.ext_zicsr = true;
717     cpu->cfg.pmp = true;
718 }
719 
720 static void rv32i_bare_cpu_init(Object *obj)
721 {
722     CPURISCVState *env = &RISCV_CPU(obj)->env;
723     riscv_cpu_set_misa_ext(env, RVI);
724 }
725 
726 static void rv32e_bare_cpu_init(Object *obj)
727 {
728     CPURISCVState *env = &RISCV_CPU(obj)->env;
729     riscv_cpu_set_misa_ext(env, RVE);
730 }
731 #endif
732 
733 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
734 {
735     ObjectClass *oc;
736     char *typename;
737     char **cpuname;
738 
739     cpuname = g_strsplit(cpu_model, ",", 1);
740     typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
741     oc = object_class_by_name(typename);
742     g_strfreev(cpuname);
743     g_free(typename);
744 
745     return oc;
746 }
747 
748 char *riscv_cpu_get_name(RISCVCPU *cpu)
749 {
750     RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu);
751     const char *typename = object_class_get_name(OBJECT_CLASS(rcc));
752 
753     g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX));
754 
755     return cpu_model_from_type(typename);
756 }
757 
758 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
759 {
760     RISCVCPU *cpu = RISCV_CPU(cs);
761     CPURISCVState *env = &cpu->env;
762     int i, j;
763     uint8_t *p;
764 
765 #if !defined(CONFIG_USER_ONLY)
766     if (riscv_has_ext(env, RVH)) {
767         qemu_fprintf(f, " %s %d\n", "V      =  ", env->virt_enabled);
768     }
769 #endif
770     qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc      ", env->pc);
771 #ifndef CONFIG_USER_ONLY
772     {
773         static const int dump_csrs[] = {
774             CSR_MHARTID,
775             CSR_MSTATUS,
776             CSR_MSTATUSH,
777             /*
778              * CSR_SSTATUS is intentionally omitted here as its value
779              * can be figured out by looking at CSR_MSTATUS
780              */
781             CSR_HSTATUS,
782             CSR_VSSTATUS,
783             CSR_MIP,
784             CSR_MIE,
785             CSR_MIDELEG,
786             CSR_HIDELEG,
787             CSR_MEDELEG,
788             CSR_HEDELEG,
789             CSR_MTVEC,
790             CSR_STVEC,
791             CSR_VSTVEC,
792             CSR_MEPC,
793             CSR_SEPC,
794             CSR_VSEPC,
795             CSR_MCAUSE,
796             CSR_SCAUSE,
797             CSR_VSCAUSE,
798             CSR_MTVAL,
799             CSR_STVAL,
800             CSR_HTVAL,
801             CSR_MTVAL2,
802             CSR_MSCRATCH,
803             CSR_SSCRATCH,
804             CSR_SATP,
805             CSR_MMTE,
806             CSR_UPMBASE,
807             CSR_UPMMASK,
808             CSR_SPMBASE,
809             CSR_SPMMASK,
810             CSR_MPMBASE,
811             CSR_MPMMASK,
812         };
813 
814         for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
815             int csrno = dump_csrs[i];
816             target_ulong val = 0;
817             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
818 
819             /*
820              * Rely on the smode, hmode, etc, predicates within csr.c
821              * to do the filtering of the registers that are present.
822              */
823             if (res == RISCV_EXCP_NONE) {
824                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
825                              csr_ops[csrno].name, val);
826             }
827         }
828     }
829 #endif
830 
831     for (i = 0; i < 32; i++) {
832         qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
833                      riscv_int_regnames[i], env->gpr[i]);
834         if ((i & 3) == 3) {
835             qemu_fprintf(f, "\n");
836         }
837     }
838     if (flags & CPU_DUMP_FPU) {
839         for (i = 0; i < 32; i++) {
840             qemu_fprintf(f, " %-8s %016" PRIx64,
841                          riscv_fpr_regnames[i], env->fpr[i]);
842             if ((i & 3) == 3) {
843                 qemu_fprintf(f, "\n");
844             }
845         }
846     }
847     if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) {
848         static const int dump_rvv_csrs[] = {
849                     CSR_VSTART,
850                     CSR_VXSAT,
851                     CSR_VXRM,
852                     CSR_VCSR,
853                     CSR_VL,
854                     CSR_VTYPE,
855                     CSR_VLENB,
856                 };
857         for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
858             int csrno = dump_rvv_csrs[i];
859             target_ulong val = 0;
860             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
861 
862             /*
863              * Rely on the smode, hmode, etc, predicates within csr.c
864              * to do the filtering of the registers that are present.
865              */
866             if (res == RISCV_EXCP_NONE) {
867                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
868                              csr_ops[csrno].name, val);
869             }
870         }
871         uint16_t vlenb = cpu->cfg.vlenb;
872 
873         for (i = 0; i < 32; i++) {
874             qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]);
875             p = (uint8_t *)env->vreg;
876             for (j = vlenb - 1 ; j >= 0; j--) {
877                 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j)));
878             }
879             qemu_fprintf(f, "\n");
880         }
881     }
882 }
883 
884 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
885 {
886     RISCVCPU *cpu = RISCV_CPU(cs);
887     CPURISCVState *env = &cpu->env;
888 
889     if (env->xl == MXL_RV32) {
890         env->pc = (int32_t)value;
891     } else {
892         env->pc = value;
893     }
894 }
895 
896 static vaddr riscv_cpu_get_pc(CPUState *cs)
897 {
898     RISCVCPU *cpu = RISCV_CPU(cs);
899     CPURISCVState *env = &cpu->env;
900 
901     /* Match cpu_get_tb_cpu_state. */
902     if (env->xl == MXL_RV32) {
903         return env->pc & UINT32_MAX;
904     }
905     return env->pc;
906 }
907 
908 bool riscv_cpu_has_work(CPUState *cs)
909 {
910 #ifndef CONFIG_USER_ONLY
911     RISCVCPU *cpu = RISCV_CPU(cs);
912     CPURISCVState *env = &cpu->env;
913     /*
914      * Definition of the WFI instruction requires it to ignore the privilege
915      * mode and delegation registers, but respect individual enables
916      */
917     return riscv_cpu_all_pending(env) != 0 ||
918         riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE ||
919         riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE;
920 #else
921     return true;
922 #endif
923 }
924 
925 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch)
926 {
927     return riscv_env_mmu_index(cpu_env(cs), ifetch);
928 }
929 
930 static void riscv_cpu_reset_hold(Object *obj, ResetType type)
931 {
932 #ifndef CONFIG_USER_ONLY
933     uint8_t iprio;
934     int i, irq, rdzero;
935 #endif
936     CPUState *cs = CPU(obj);
937     RISCVCPU *cpu = RISCV_CPU(cs);
938     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
939     CPURISCVState *env = &cpu->env;
940 
941     if (mcc->parent_phases.hold) {
942         mcc->parent_phases.hold(obj, type);
943     }
944 #ifndef CONFIG_USER_ONLY
945     env->misa_mxl = mcc->misa_mxl_max;
946     env->priv = PRV_M;
947     env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
948     if (env->misa_mxl > MXL_RV32) {
949         /*
950          * The reset status of SXL/UXL is undefined, but mstatus is WARL
951          * and we must ensure that the value after init is valid for read.
952          */
953         env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
954         env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
955         if (riscv_has_ext(env, RVH)) {
956             env->vsstatus = set_field(env->vsstatus,
957                                       MSTATUS64_SXL, env->misa_mxl);
958             env->vsstatus = set_field(env->vsstatus,
959                                       MSTATUS64_UXL, env->misa_mxl);
960             env->mstatus_hs = set_field(env->mstatus_hs,
961                                         MSTATUS64_SXL, env->misa_mxl);
962             env->mstatus_hs = set_field(env->mstatus_hs,
963                                         MSTATUS64_UXL, env->misa_mxl);
964         }
965     }
966     env->mcause = 0;
967     env->miclaim = MIP_SGEIP;
968     env->pc = env->resetvec;
969     env->bins = 0;
970     env->two_stage_lookup = false;
971 
972     env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
973                    (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ?
974                     MENVCFG_ADUE : 0);
975     env->henvcfg = 0;
976 
977     /* Initialized default priorities of local interrupts. */
978     for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
979         iprio = riscv_cpu_default_priority(i);
980         env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
981         env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
982         env->hviprio[i] = 0;
983     }
984     i = 0;
985     while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
986         if (!rdzero) {
987             env->hviprio[irq] = env->miprio[irq];
988         }
989         i++;
990     }
991     /* mmte is supposed to have pm.current hardwired to 1 */
992     env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
993 
994     /*
995      * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor
996      * extension is enabled.
997      */
998     if (riscv_has_ext(env, RVH)) {
999         env->mideleg |= HS_MODE_INTERRUPTS;
1000     }
1001 
1002     /*
1003      * Clear mseccfg and unlock all the PMP entries upon reset.
1004      * This is allowed as per the priv and smepmp specifications
1005      * and is needed to clear stale entries across reboots.
1006      */
1007     if (riscv_cpu_cfg(env)->ext_smepmp) {
1008         env->mseccfg = 0;
1009     }
1010 
1011     pmp_unlock_entries(env);
1012 #endif
1013     env->xl = riscv_cpu_mxl(env);
1014     riscv_cpu_update_mask(env);
1015     cs->exception_index = RISCV_EXCP_NONE;
1016     env->load_res = -1;
1017     set_default_nan_mode(1, &env->fp_status);
1018 
1019 #ifndef CONFIG_USER_ONLY
1020     if (cpu->cfg.debug) {
1021         riscv_trigger_reset_hold(env);
1022     }
1023 
1024     if (kvm_enabled()) {
1025         kvm_riscv_reset_vcpu(cpu);
1026     }
1027 #endif
1028 }
1029 
1030 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
1031 {
1032     RISCVCPU *cpu = RISCV_CPU(s);
1033     CPURISCVState *env = &cpu->env;
1034     info->target_info = &cpu->cfg;
1035 
1036     switch (env->xl) {
1037     case MXL_RV32:
1038         info->print_insn = print_insn_riscv32;
1039         break;
1040     case MXL_RV64:
1041         info->print_insn = print_insn_riscv64;
1042         break;
1043     case MXL_RV128:
1044         info->print_insn = print_insn_riscv128;
1045         break;
1046     default:
1047         g_assert_not_reached();
1048     }
1049 }
1050 
1051 #ifndef CONFIG_USER_ONLY
1052 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
1053 {
1054     bool rv32 = riscv_cpu_is_32bit(cpu);
1055     uint8_t satp_mode_map_max, satp_mode_supported_max;
1056 
1057     /* The CPU wants the OS to decide which satp mode to use */
1058     if (cpu->cfg.satp_mode.supported == 0) {
1059         return;
1060     }
1061 
1062     satp_mode_supported_max =
1063                     satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
1064 
1065     if (cpu->cfg.satp_mode.map == 0) {
1066         if (cpu->cfg.satp_mode.init == 0) {
1067             /* If unset by the user, we fallback to the default satp mode. */
1068             set_satp_mode_default_map(cpu);
1069         } else {
1070             /*
1071              * Find the lowest level that was disabled and then enable the
1072              * first valid level below which can be found in
1073              * valid_vm_1_10_32/64.
1074              */
1075             for (int i = 1; i < 16; ++i) {
1076                 if ((cpu->cfg.satp_mode.init & (1 << i)) &&
1077                     (cpu->cfg.satp_mode.supported & (1 << i))) {
1078                     for (int j = i - 1; j >= 0; --j) {
1079                         if (cpu->cfg.satp_mode.supported & (1 << j)) {
1080                             cpu->cfg.satp_mode.map |= (1 << j);
1081                             break;
1082                         }
1083                     }
1084                     break;
1085                 }
1086             }
1087         }
1088     }
1089 
1090     satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
1091 
1092     /* Make sure the user asked for a supported configuration (HW and qemu) */
1093     if (satp_mode_map_max > satp_mode_supported_max) {
1094         error_setg(errp, "satp_mode %s is higher than hw max capability %s",
1095                    satp_mode_str(satp_mode_map_max, rv32),
1096                    satp_mode_str(satp_mode_supported_max, rv32));
1097         return;
1098     }
1099 
1100     /*
1101      * Make sure the user did not ask for an invalid configuration as per
1102      * the specification.
1103      */
1104     if (!rv32) {
1105         for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1106             if (!(cpu->cfg.satp_mode.map & (1 << i)) &&
1107                 (cpu->cfg.satp_mode.init & (1 << i)) &&
1108                 (cpu->cfg.satp_mode.supported & (1 << i))) {
1109                 error_setg(errp, "cannot disable %s satp mode if %s "
1110                            "is enabled", satp_mode_str(i, false),
1111                            satp_mode_str(satp_mode_map_max, false));
1112                 return;
1113             }
1114         }
1115     }
1116 
1117     /* Finally expand the map so that all valid modes are set */
1118     for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1119         if (cpu->cfg.satp_mode.supported & (1 << i)) {
1120             cpu->cfg.satp_mode.map |= (1 << i);
1121         }
1122     }
1123 }
1124 #endif
1125 
1126 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1127 {
1128     Error *local_err = NULL;
1129 
1130 #ifndef CONFIG_USER_ONLY
1131     riscv_cpu_satp_mode_finalize(cpu, &local_err);
1132     if (local_err != NULL) {
1133         error_propagate(errp, local_err);
1134         return;
1135     }
1136 #endif
1137 
1138     if (tcg_enabled()) {
1139         riscv_tcg_cpu_finalize_features(cpu, &local_err);
1140         if (local_err != NULL) {
1141             error_propagate(errp, local_err);
1142             return;
1143         }
1144         riscv_tcg_cpu_finalize_dynamic_decoder(cpu);
1145     } else if (kvm_enabled()) {
1146         riscv_kvm_cpu_finalize_features(cpu, &local_err);
1147         if (local_err != NULL) {
1148             error_propagate(errp, local_err);
1149             return;
1150         }
1151     }
1152 }
1153 
1154 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
1155 {
1156     CPUState *cs = CPU(dev);
1157     RISCVCPU *cpu = RISCV_CPU(dev);
1158     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
1159     Error *local_err = NULL;
1160 
1161     if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) {
1162         warn_report("The 'any' CPU is deprecated and will be "
1163                     "removed in the future.");
1164     }
1165 
1166     cpu_exec_realizefn(cs, &local_err);
1167     if (local_err != NULL) {
1168         error_propagate(errp, local_err);
1169         return;
1170     }
1171 
1172     riscv_cpu_finalize_features(cpu, &local_err);
1173     if (local_err != NULL) {
1174         error_propagate(errp, local_err);
1175         return;
1176     }
1177 
1178     riscv_cpu_register_gdb_regs_for_features(cs);
1179 
1180 #ifndef CONFIG_USER_ONLY
1181     if (cpu->cfg.debug) {
1182         riscv_trigger_realize(&cpu->env);
1183     }
1184 #endif
1185 
1186     qemu_init_vcpu(cs);
1187     cpu_reset(cs);
1188 
1189     mcc->parent_realize(dev, errp);
1190 }
1191 
1192 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu)
1193 {
1194     if (tcg_enabled()) {
1195         return riscv_cpu_tcg_compatible(cpu);
1196     }
1197 
1198     return true;
1199 }
1200 
1201 #ifndef CONFIG_USER_ONLY
1202 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
1203                                void *opaque, Error **errp)
1204 {
1205     RISCVSATPMap *satp_map = opaque;
1206     uint8_t satp = satp_mode_from_str(name);
1207     bool value;
1208 
1209     value = satp_map->map & (1 << satp);
1210 
1211     visit_type_bool(v, name, &value, errp);
1212 }
1213 
1214 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
1215                                void *opaque, Error **errp)
1216 {
1217     RISCVSATPMap *satp_map = opaque;
1218     uint8_t satp = satp_mode_from_str(name);
1219     bool value;
1220 
1221     if (!visit_type_bool(v, name, &value, errp)) {
1222         return;
1223     }
1224 
1225     satp_map->map = deposit32(satp_map->map, satp, 1, value);
1226     satp_map->init |= 1 << satp;
1227 }
1228 
1229 void riscv_add_satp_mode_properties(Object *obj)
1230 {
1231     RISCVCPU *cpu = RISCV_CPU(obj);
1232 
1233     if (cpu->env.misa_mxl == MXL_RV32) {
1234         object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
1235                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1236     } else {
1237         object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
1238                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1239         object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
1240                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1241         object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
1242                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1243         object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
1244                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1245     }
1246 }
1247 
1248 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
1249 {
1250     RISCVCPU *cpu = RISCV_CPU(opaque);
1251     CPURISCVState *env = &cpu->env;
1252 
1253     if (irq < IRQ_LOCAL_MAX) {
1254         switch (irq) {
1255         case IRQ_U_SOFT:
1256         case IRQ_S_SOFT:
1257         case IRQ_VS_SOFT:
1258         case IRQ_M_SOFT:
1259         case IRQ_U_TIMER:
1260         case IRQ_S_TIMER:
1261         case IRQ_VS_TIMER:
1262         case IRQ_M_TIMER:
1263         case IRQ_U_EXT:
1264         case IRQ_VS_EXT:
1265         case IRQ_M_EXT:
1266             if (kvm_enabled()) {
1267                 kvm_riscv_set_irq(cpu, irq, level);
1268             } else {
1269                 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level));
1270             }
1271              break;
1272         case IRQ_S_EXT:
1273             if (kvm_enabled()) {
1274                 kvm_riscv_set_irq(cpu, irq, level);
1275             } else {
1276                 env->external_seip = level;
1277                 riscv_cpu_update_mip(env, 1 << irq,
1278                                      BOOL_TO_MASK(level | env->software_seip));
1279             }
1280             break;
1281         default:
1282             g_assert_not_reached();
1283         }
1284     } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
1285         /* Require H-extension for handling guest local interrupts */
1286         if (!riscv_has_ext(env, RVH)) {
1287             g_assert_not_reached();
1288         }
1289 
1290         /* Compute bit position in HGEIP CSR */
1291         irq = irq - IRQ_LOCAL_MAX + 1;
1292         if (env->geilen < irq) {
1293             g_assert_not_reached();
1294         }
1295 
1296         /* Update HGEIP CSR */
1297         env->hgeip &= ~((target_ulong)1 << irq);
1298         if (level) {
1299             env->hgeip |= (target_ulong)1 << irq;
1300         }
1301 
1302         /* Update mip.SGEIP bit */
1303         riscv_cpu_update_mip(env, MIP_SGEIP,
1304                              BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
1305     } else {
1306         g_assert_not_reached();
1307     }
1308 }
1309 #endif /* CONFIG_USER_ONLY */
1310 
1311 static bool riscv_cpu_is_dynamic(Object *cpu_obj)
1312 {
1313     return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
1314 }
1315 
1316 static void riscv_cpu_post_init(Object *obj)
1317 {
1318     accel_cpu_instance_init(CPU(obj));
1319 }
1320 
1321 static void riscv_cpu_init(Object *obj)
1322 {
1323     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
1324     RISCVCPU *cpu = RISCV_CPU(obj);
1325     CPURISCVState *env = &cpu->env;
1326 
1327     env->misa_mxl = mcc->misa_mxl_max;
1328 
1329 #ifndef CONFIG_USER_ONLY
1330     qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
1331                       IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
1332 #endif /* CONFIG_USER_ONLY */
1333 
1334     general_user_opts = g_hash_table_new(g_str_hash, g_str_equal);
1335 
1336     /*
1337      * The timer and performance counters extensions were supported
1338      * in QEMU before they were added as discrete extensions in the
1339      * ISA. To keep compatibility we'll always default them to 'true'
1340      * for all CPUs. Each accelerator will decide what to do when
1341      * users disable them.
1342      */
1343     RISCV_CPU(obj)->cfg.ext_zicntr = true;
1344     RISCV_CPU(obj)->cfg.ext_zihpm = true;
1345 
1346     /* Default values for non-bool cpu properties */
1347     cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16);
1348     cpu->cfg.vlenb = 128 >> 3;
1349     cpu->cfg.elen = 64;
1350     cpu->cfg.cbom_blocksize = 64;
1351     cpu->cfg.cbop_blocksize = 64;
1352     cpu->cfg.cboz_blocksize = 64;
1353     cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1354 }
1355 
1356 static void riscv_bare_cpu_init(Object *obj)
1357 {
1358     RISCVCPU *cpu = RISCV_CPU(obj);
1359 
1360     /*
1361      * Bare CPUs do not inherit the timer and performance
1362      * counters from the parent class (see riscv_cpu_init()
1363      * for info on why the parent enables them).
1364      *
1365      * Users have to explicitly enable these counters for
1366      * bare CPUs.
1367      */
1368     cpu->cfg.ext_zicntr = false;
1369     cpu->cfg.ext_zihpm = false;
1370 
1371     /* Set to QEMU's first supported priv version */
1372     cpu->env.priv_ver = PRIV_VERSION_1_10_0;
1373 
1374     /*
1375      * Support all available satp_mode settings. The default
1376      * value will be set to MBARE if the user doesn't set
1377      * satp_mode manually (see set_satp_mode_default()).
1378      */
1379 #ifndef CONFIG_USER_ONLY
1380     set_satp_mode_max_supported(cpu, VM_1_10_SV64);
1381 #endif
1382 }
1383 
1384 typedef struct misa_ext_info {
1385     const char *name;
1386     const char *description;
1387 } MISAExtInfo;
1388 
1389 #define MISA_INFO_IDX(_bit) \
1390     __builtin_ctz(_bit)
1391 
1392 #define MISA_EXT_INFO(_bit, _propname, _descr) \
1393     [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr}
1394 
1395 static const MISAExtInfo misa_ext_info_arr[] = {
1396     MISA_EXT_INFO(RVA, "a", "Atomic instructions"),
1397     MISA_EXT_INFO(RVC, "c", "Compressed instructions"),
1398     MISA_EXT_INFO(RVD, "d", "Double-precision float point"),
1399     MISA_EXT_INFO(RVF, "f", "Single-precision float point"),
1400     MISA_EXT_INFO(RVI, "i", "Base integer instruction set"),
1401     MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"),
1402     MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"),
1403     MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
1404     MISA_EXT_INFO(RVU, "u", "User-level instructions"),
1405     MISA_EXT_INFO(RVH, "h", "Hypervisor"),
1406     MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
1407     MISA_EXT_INFO(RVV, "v", "Vector operations"),
1408     MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
1409     MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)")
1410 };
1411 
1412 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc)
1413 {
1414     CPUClass *cc = CPU_CLASS(mcc);
1415 
1416     /* Validate that MISA_MXL is set properly. */
1417     switch (mcc->misa_mxl_max) {
1418 #ifdef TARGET_RISCV64
1419     case MXL_RV64:
1420     case MXL_RV128:
1421         cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
1422         break;
1423 #endif
1424     case MXL_RV32:
1425         cc->gdb_core_xml_file = "riscv-32bit-cpu.xml";
1426         break;
1427     default:
1428         g_assert_not_reached();
1429     }
1430 }
1431 
1432 static int riscv_validate_misa_info_idx(uint32_t bit)
1433 {
1434     int idx;
1435 
1436     /*
1437      * Our lowest valid input (RVA) is 1 and
1438      * __builtin_ctz() is UB with zero.
1439      */
1440     g_assert(bit != 0);
1441     idx = MISA_INFO_IDX(bit);
1442 
1443     g_assert(idx < ARRAY_SIZE(misa_ext_info_arr));
1444     return idx;
1445 }
1446 
1447 const char *riscv_get_misa_ext_name(uint32_t bit)
1448 {
1449     int idx = riscv_validate_misa_info_idx(bit);
1450     const char *val = misa_ext_info_arr[idx].name;
1451 
1452     g_assert(val != NULL);
1453     return val;
1454 }
1455 
1456 const char *riscv_get_misa_ext_description(uint32_t bit)
1457 {
1458     int idx = riscv_validate_misa_info_idx(bit);
1459     const char *val = misa_ext_info_arr[idx].description;
1460 
1461     g_assert(val != NULL);
1462     return val;
1463 }
1464 
1465 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \
1466     {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
1467      .enabled = _defval}
1468 
1469 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
1470     /* Defaults for standard extensions */
1471     MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
1472     MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
1473     MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
1474     MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
1475     MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
1476     MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false),
1477     MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false),
1478     MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false),
1479     MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false),
1480     MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false),
1481     MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
1482     MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true),
1483     MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false),
1484     MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false),
1485     MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false),
1486     MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false),
1487     MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false),
1488     MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false),
1489     MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false),
1490     MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false),
1491     MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false),
1492     MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false),
1493     MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false),
1494     MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false),
1495     MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
1496 
1497     MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false),
1498     MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
1499     MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
1500     MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false),
1501     MULTI_EXT_CFG_BOOL("svade", ext_svade, false),
1502     MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
1503     MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
1504     MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false),
1505     MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false),
1506 
1507     MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true),
1508     MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true),
1509 
1510     MULTI_EXT_CFG_BOOL("zba", ext_zba, true),
1511     MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true),
1512     MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true),
1513     MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false),
1514     MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false),
1515     MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false),
1516     MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true),
1517     MULTI_EXT_CFG_BOOL("zk", ext_zk, false),
1518     MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false),
1519     MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false),
1520     MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false),
1521     MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false),
1522     MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false),
1523     MULTI_EXT_CFG_BOOL("zks", ext_zks, false),
1524     MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false),
1525     MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false),
1526     MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false),
1527     MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false),
1528 
1529     MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false),
1530     MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false),
1531     MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false),
1532     MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false),
1533 
1534     MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true),
1535     MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true),
1536     MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true),
1537 
1538     MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false),
1539 
1540     MULTI_EXT_CFG_BOOL("zca", ext_zca, false),
1541     MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false),
1542     MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false),
1543     MULTI_EXT_CFG_BOOL("zce", ext_zce, false),
1544     MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false),
1545     MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false),
1546     MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false),
1547     MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false),
1548 
1549     /* Vector cryptography extensions */
1550     MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false),
1551     MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false),
1552     MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false),
1553     MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false),
1554     MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false),
1555     MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false),
1556     MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false),
1557     MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false),
1558     MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false),
1559     MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false),
1560     MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false),
1561     MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false),
1562     MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false),
1563     MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false),
1564     MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false),
1565     MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false),
1566 
1567     DEFINE_PROP_END_OF_LIST(),
1568 };
1569 
1570 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = {
1571     MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false),
1572     MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false),
1573     MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false),
1574     MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false),
1575     MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false),
1576     MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false),
1577     MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false),
1578     MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false),
1579     MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false),
1580     MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false),
1581     MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false),
1582     MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false),
1583 
1584     DEFINE_PROP_END_OF_LIST(),
1585 };
1586 
1587 /* These are experimental so mark with 'x-' */
1588 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
1589     DEFINE_PROP_END_OF_LIST(),
1590 };
1591 
1592 /*
1593  * 'Named features' is the name we give to extensions that we
1594  * don't want to expose to users. They are either immutable
1595  * (always enabled/disable) or they'll vary depending on
1596  * the resulting CPU state. They have riscv,isa strings
1597  * and priv_ver like regular extensions.
1598  */
1599 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
1600     MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true),
1601 
1602     DEFINE_PROP_END_OF_LIST(),
1603 };
1604 
1605 /* Deprecated entries marked for future removal */
1606 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = {
1607     MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true),
1608     MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true),
1609     MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true),
1610     MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true),
1611     MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true),
1612     MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true),
1613     MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false),
1614     MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false),
1615     MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false),
1616     MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false),
1617     MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false),
1618 
1619     DEFINE_PROP_END_OF_LIST(),
1620 };
1621 
1622 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname,
1623                              Error **errp)
1624 {
1625     g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1626     error_setg(errp, "CPU '%s' does not allow changing the value of '%s'",
1627                cpuname, propname);
1628 }
1629 
1630 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name,
1631                              void *opaque, Error **errp)
1632 {
1633     RISCVCPU *cpu = RISCV_CPU(obj);
1634     uint8_t pmu_num, curr_pmu_num;
1635     uint32_t pmu_mask;
1636 
1637     visit_type_uint8(v, name, &pmu_num, errp);
1638 
1639     curr_pmu_num = ctpop32(cpu->cfg.pmu_mask);
1640 
1641     if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) {
1642         cpu_set_prop_err(cpu, name, errp);
1643         error_append_hint(errp, "Current '%s' val: %u\n",
1644                           name, curr_pmu_num);
1645         return;
1646     }
1647 
1648     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1649         error_setg(errp, "Number of counters exceeds maximum available");
1650         return;
1651     }
1652 
1653     if (pmu_num == 0) {
1654         pmu_mask = 0;
1655     } else {
1656         pmu_mask = MAKE_64BIT_MASK(3, pmu_num);
1657     }
1658 
1659     warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\"");
1660     cpu->cfg.pmu_mask = pmu_mask;
1661     cpu_option_add_user_setting("pmu-mask", pmu_mask);
1662 }
1663 
1664 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name,
1665                              void *opaque, Error **errp)
1666 {
1667     RISCVCPU *cpu = RISCV_CPU(obj);
1668     uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask);
1669 
1670     visit_type_uint8(v, name, &pmu_num, errp);
1671 }
1672 
1673 static const PropertyInfo prop_pmu_num = {
1674     .name = "pmu-num",
1675     .get = prop_pmu_num_get,
1676     .set = prop_pmu_num_set,
1677 };
1678 
1679 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name,
1680                              void *opaque, Error **errp)
1681 {
1682     RISCVCPU *cpu = RISCV_CPU(obj);
1683     uint32_t value;
1684     uint8_t pmu_num;
1685 
1686     visit_type_uint32(v, name, &value, errp);
1687 
1688     if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) {
1689         cpu_set_prop_err(cpu, name, errp);
1690         error_append_hint(errp, "Current '%s' val: %x\n",
1691                           name, cpu->cfg.pmu_mask);
1692         return;
1693     }
1694 
1695     pmu_num = ctpop32(value);
1696 
1697     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1698         error_setg(errp, "Number of counters exceeds maximum available");
1699         return;
1700     }
1701 
1702     cpu_option_add_user_setting(name, value);
1703     cpu->cfg.pmu_mask = value;
1704 }
1705 
1706 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name,
1707                              void *opaque, Error **errp)
1708 {
1709     uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask;
1710 
1711     visit_type_uint8(v, name, &pmu_mask, errp);
1712 }
1713 
1714 static const PropertyInfo prop_pmu_mask = {
1715     .name = "pmu-mask",
1716     .get = prop_pmu_mask_get,
1717     .set = prop_pmu_mask_set,
1718 };
1719 
1720 static void prop_mmu_set(Object *obj, Visitor *v, const char *name,
1721                          void *opaque, Error **errp)
1722 {
1723     RISCVCPU *cpu = RISCV_CPU(obj);
1724     bool value;
1725 
1726     visit_type_bool(v, name, &value, errp);
1727 
1728     if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) {
1729         cpu_set_prop_err(cpu, "mmu", errp);
1730         return;
1731     }
1732 
1733     cpu_option_add_user_setting(name, value);
1734     cpu->cfg.mmu = value;
1735 }
1736 
1737 static void prop_mmu_get(Object *obj, Visitor *v, const char *name,
1738                          void *opaque, Error **errp)
1739 {
1740     bool value = RISCV_CPU(obj)->cfg.mmu;
1741 
1742     visit_type_bool(v, name, &value, errp);
1743 }
1744 
1745 static const PropertyInfo prop_mmu = {
1746     .name = "mmu",
1747     .get = prop_mmu_get,
1748     .set = prop_mmu_set,
1749 };
1750 
1751 static void prop_pmp_set(Object *obj, Visitor *v, const char *name,
1752                          void *opaque, Error **errp)
1753 {
1754     RISCVCPU *cpu = RISCV_CPU(obj);
1755     bool value;
1756 
1757     visit_type_bool(v, name, &value, errp);
1758 
1759     if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) {
1760         cpu_set_prop_err(cpu, name, errp);
1761         return;
1762     }
1763 
1764     cpu_option_add_user_setting(name, value);
1765     cpu->cfg.pmp = value;
1766 }
1767 
1768 static void prop_pmp_get(Object *obj, Visitor *v, const char *name,
1769                          void *opaque, Error **errp)
1770 {
1771     bool value = RISCV_CPU(obj)->cfg.pmp;
1772 
1773     visit_type_bool(v, name, &value, errp);
1774 }
1775 
1776 static const PropertyInfo prop_pmp = {
1777     .name = "pmp",
1778     .get = prop_pmp_get,
1779     .set = prop_pmp_set,
1780 };
1781 
1782 static int priv_spec_from_str(const char *priv_spec_str)
1783 {
1784     int priv_version = -1;
1785 
1786     if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) {
1787         priv_version = PRIV_VERSION_1_13_0;
1788     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) {
1789         priv_version = PRIV_VERSION_1_12_0;
1790     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) {
1791         priv_version = PRIV_VERSION_1_11_0;
1792     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) {
1793         priv_version = PRIV_VERSION_1_10_0;
1794     }
1795 
1796     return priv_version;
1797 }
1798 
1799 const char *priv_spec_to_str(int priv_version)
1800 {
1801     switch (priv_version) {
1802     case PRIV_VERSION_1_10_0:
1803         return PRIV_VER_1_10_0_STR;
1804     case PRIV_VERSION_1_11_0:
1805         return PRIV_VER_1_11_0_STR;
1806     case PRIV_VERSION_1_12_0:
1807         return PRIV_VER_1_12_0_STR;
1808     case PRIV_VERSION_1_13_0:
1809         return PRIV_VER_1_13_0_STR;
1810     default:
1811         return NULL;
1812     }
1813 }
1814 
1815 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name,
1816                                void *opaque, Error **errp)
1817 {
1818     RISCVCPU *cpu = RISCV_CPU(obj);
1819     g_autofree char *value = NULL;
1820     int priv_version = -1;
1821 
1822     visit_type_str(v, name, &value, errp);
1823 
1824     priv_version = priv_spec_from_str(value);
1825     if (priv_version < 0) {
1826         error_setg(errp, "Unsupported privilege spec version '%s'", value);
1827         return;
1828     }
1829 
1830     if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) {
1831         cpu_set_prop_err(cpu, name, errp);
1832         error_append_hint(errp, "Current '%s' val: %s\n", name,
1833                           object_property_get_str(obj, name, NULL));
1834         return;
1835     }
1836 
1837     cpu_option_add_user_setting(name, priv_version);
1838     cpu->env.priv_ver = priv_version;
1839 }
1840 
1841 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name,
1842                                void *opaque, Error **errp)
1843 {
1844     RISCVCPU *cpu = RISCV_CPU(obj);
1845     const char *value = priv_spec_to_str(cpu->env.priv_ver);
1846 
1847     visit_type_str(v, name, (char **)&value, errp);
1848 }
1849 
1850 static const PropertyInfo prop_priv_spec = {
1851     .name = "priv_spec",
1852     .get = prop_priv_spec_get,
1853     .set = prop_priv_spec_set,
1854 };
1855 
1856 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name,
1857                                void *opaque, Error **errp)
1858 {
1859     RISCVCPU *cpu = RISCV_CPU(obj);
1860     g_autofree char *value = NULL;
1861 
1862     visit_type_str(v, name, &value, errp);
1863 
1864     if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) {
1865         error_setg(errp, "Unsupported vector spec version '%s'", value);
1866         return;
1867     }
1868 
1869     cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0);
1870     cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1871 }
1872 
1873 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name,
1874                                void *opaque, Error **errp)
1875 {
1876     const char *value = VEXT_VER_1_00_0_STR;
1877 
1878     visit_type_str(v, name, (char **)&value, errp);
1879 }
1880 
1881 static const PropertyInfo prop_vext_spec = {
1882     .name = "vext_spec",
1883     .get = prop_vext_spec_get,
1884     .set = prop_vext_spec_set,
1885 };
1886 
1887 static void prop_vlen_set(Object *obj, Visitor *v, const char *name,
1888                          void *opaque, Error **errp)
1889 {
1890     RISCVCPU *cpu = RISCV_CPU(obj);
1891     uint16_t value;
1892 
1893     if (!visit_type_uint16(v, name, &value, errp)) {
1894         return;
1895     }
1896 
1897     if (!is_power_of_2(value)) {
1898         error_setg(errp, "Vector extension VLEN must be power of 2");
1899         return;
1900     }
1901 
1902     if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) {
1903         cpu_set_prop_err(cpu, name, errp);
1904         error_append_hint(errp, "Current '%s' val: %u\n",
1905                           name, cpu->cfg.vlenb << 3);
1906         return;
1907     }
1908 
1909     cpu_option_add_user_setting(name, value);
1910     cpu->cfg.vlenb = value >> 3;
1911 }
1912 
1913 static void prop_vlen_get(Object *obj, Visitor *v, const char *name,
1914                          void *opaque, Error **errp)
1915 {
1916     uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3;
1917 
1918     visit_type_uint16(v, name, &value, errp);
1919 }
1920 
1921 static const PropertyInfo prop_vlen = {
1922     .name = "vlen",
1923     .get = prop_vlen_get,
1924     .set = prop_vlen_set,
1925 };
1926 
1927 static void prop_elen_set(Object *obj, Visitor *v, const char *name,
1928                          void *opaque, Error **errp)
1929 {
1930     RISCVCPU *cpu = RISCV_CPU(obj);
1931     uint16_t value;
1932 
1933     if (!visit_type_uint16(v, name, &value, errp)) {
1934         return;
1935     }
1936 
1937     if (!is_power_of_2(value)) {
1938         error_setg(errp, "Vector extension ELEN must be power of 2");
1939         return;
1940     }
1941 
1942     if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) {
1943         cpu_set_prop_err(cpu, name, errp);
1944         error_append_hint(errp, "Current '%s' val: %u\n",
1945                           name, cpu->cfg.elen);
1946         return;
1947     }
1948 
1949     cpu_option_add_user_setting(name, value);
1950     cpu->cfg.elen = value;
1951 }
1952 
1953 static void prop_elen_get(Object *obj, Visitor *v, const char *name,
1954                          void *opaque, Error **errp)
1955 {
1956     uint16_t value = RISCV_CPU(obj)->cfg.elen;
1957 
1958     visit_type_uint16(v, name, &value, errp);
1959 }
1960 
1961 static const PropertyInfo prop_elen = {
1962     .name = "elen",
1963     .get = prop_elen_get,
1964     .set = prop_elen_set,
1965 };
1966 
1967 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name,
1968                                   void *opaque, Error **errp)
1969 {
1970     RISCVCPU *cpu = RISCV_CPU(obj);
1971     uint16_t value;
1972 
1973     if (!visit_type_uint16(v, name, &value, errp)) {
1974         return;
1975     }
1976 
1977     if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) {
1978         cpu_set_prop_err(cpu, name, errp);
1979         error_append_hint(errp, "Current '%s' val: %u\n",
1980                           name, cpu->cfg.cbom_blocksize);
1981         return;
1982     }
1983 
1984     cpu_option_add_user_setting(name, value);
1985     cpu->cfg.cbom_blocksize = value;
1986 }
1987 
1988 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name,
1989                          void *opaque, Error **errp)
1990 {
1991     uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize;
1992 
1993     visit_type_uint16(v, name, &value, errp);
1994 }
1995 
1996 static const PropertyInfo prop_cbom_blksize = {
1997     .name = "cbom_blocksize",
1998     .get = prop_cbom_blksize_get,
1999     .set = prop_cbom_blksize_set,
2000 };
2001 
2002 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name,
2003                                   void *opaque, Error **errp)
2004 {
2005     RISCVCPU *cpu = RISCV_CPU(obj);
2006     uint16_t value;
2007 
2008     if (!visit_type_uint16(v, name, &value, errp)) {
2009         return;
2010     }
2011 
2012     if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) {
2013         cpu_set_prop_err(cpu, name, errp);
2014         error_append_hint(errp, "Current '%s' val: %u\n",
2015                           name, cpu->cfg.cbop_blocksize);
2016         return;
2017     }
2018 
2019     cpu_option_add_user_setting(name, value);
2020     cpu->cfg.cbop_blocksize = value;
2021 }
2022 
2023 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name,
2024                          void *opaque, Error **errp)
2025 {
2026     uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize;
2027 
2028     visit_type_uint16(v, name, &value, errp);
2029 }
2030 
2031 static const PropertyInfo prop_cbop_blksize = {
2032     .name = "cbop_blocksize",
2033     .get = prop_cbop_blksize_get,
2034     .set = prop_cbop_blksize_set,
2035 };
2036 
2037 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name,
2038                                   void *opaque, Error **errp)
2039 {
2040     RISCVCPU *cpu = RISCV_CPU(obj);
2041     uint16_t value;
2042 
2043     if (!visit_type_uint16(v, name, &value, errp)) {
2044         return;
2045     }
2046 
2047     if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) {
2048         cpu_set_prop_err(cpu, name, errp);
2049         error_append_hint(errp, "Current '%s' val: %u\n",
2050                           name, cpu->cfg.cboz_blocksize);
2051         return;
2052     }
2053 
2054     cpu_option_add_user_setting(name, value);
2055     cpu->cfg.cboz_blocksize = value;
2056 }
2057 
2058 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name,
2059                          void *opaque, Error **errp)
2060 {
2061     uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize;
2062 
2063     visit_type_uint16(v, name, &value, errp);
2064 }
2065 
2066 static const PropertyInfo prop_cboz_blksize = {
2067     .name = "cboz_blocksize",
2068     .get = prop_cboz_blksize_get,
2069     .set = prop_cboz_blksize_set,
2070 };
2071 
2072 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name,
2073                                void *opaque, Error **errp)
2074 {
2075     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2076     RISCVCPU *cpu = RISCV_CPU(obj);
2077     uint32_t prev_val = cpu->cfg.mvendorid;
2078     uint32_t value;
2079 
2080     if (!visit_type_uint32(v, name, &value, errp)) {
2081         return;
2082     }
2083 
2084     if (!dynamic_cpu && prev_val != value) {
2085         error_setg(errp, "Unable to change %s mvendorid (0x%x)",
2086                    object_get_typename(obj), prev_val);
2087         return;
2088     }
2089 
2090     cpu->cfg.mvendorid = value;
2091 }
2092 
2093 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name,
2094                                void *opaque, Error **errp)
2095 {
2096     uint32_t value = RISCV_CPU(obj)->cfg.mvendorid;
2097 
2098     visit_type_uint32(v, name, &value, errp);
2099 }
2100 
2101 static const PropertyInfo prop_mvendorid = {
2102     .name = "mvendorid",
2103     .get = prop_mvendorid_get,
2104     .set = prop_mvendorid_set,
2105 };
2106 
2107 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name,
2108                             void *opaque, Error **errp)
2109 {
2110     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2111     RISCVCPU *cpu = RISCV_CPU(obj);
2112     uint64_t prev_val = cpu->cfg.mimpid;
2113     uint64_t value;
2114 
2115     if (!visit_type_uint64(v, name, &value, errp)) {
2116         return;
2117     }
2118 
2119     if (!dynamic_cpu && prev_val != value) {
2120         error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")",
2121                    object_get_typename(obj), prev_val);
2122         return;
2123     }
2124 
2125     cpu->cfg.mimpid = value;
2126 }
2127 
2128 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name,
2129                             void *opaque, Error **errp)
2130 {
2131     uint64_t value = RISCV_CPU(obj)->cfg.mimpid;
2132 
2133     visit_type_uint64(v, name, &value, errp);
2134 }
2135 
2136 static const PropertyInfo prop_mimpid = {
2137     .name = "mimpid",
2138     .get = prop_mimpid_get,
2139     .set = prop_mimpid_set,
2140 };
2141 
2142 static void prop_marchid_set(Object *obj, Visitor *v, const char *name,
2143                              void *opaque, Error **errp)
2144 {
2145     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2146     RISCVCPU *cpu = RISCV_CPU(obj);
2147     uint64_t prev_val = cpu->cfg.marchid;
2148     uint64_t value, invalid_val;
2149     uint32_t mxlen = 0;
2150 
2151     if (!visit_type_uint64(v, name, &value, errp)) {
2152         return;
2153     }
2154 
2155     if (!dynamic_cpu && prev_val != value) {
2156         error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")",
2157                    object_get_typename(obj), prev_val);
2158         return;
2159     }
2160 
2161     switch (riscv_cpu_mxl(&cpu->env)) {
2162     case MXL_RV32:
2163         mxlen = 32;
2164         break;
2165     case MXL_RV64:
2166     case MXL_RV128:
2167         mxlen = 64;
2168         break;
2169     default:
2170         g_assert_not_reached();
2171     }
2172 
2173     invalid_val = 1LL << (mxlen - 1);
2174 
2175     if (value == invalid_val) {
2176         error_setg(errp, "Unable to set marchid with MSB (%u) bit set "
2177                          "and the remaining bits zero", mxlen);
2178         return;
2179     }
2180 
2181     cpu->cfg.marchid = value;
2182 }
2183 
2184 static void prop_marchid_get(Object *obj, Visitor *v, const char *name,
2185                              void *opaque, Error **errp)
2186 {
2187     uint64_t value = RISCV_CPU(obj)->cfg.marchid;
2188 
2189     visit_type_uint64(v, name, &value, errp);
2190 }
2191 
2192 static const PropertyInfo prop_marchid = {
2193     .name = "marchid",
2194     .get = prop_marchid_get,
2195     .set = prop_marchid_set,
2196 };
2197 
2198 /*
2199  * RVA22U64 defines some 'named features' that are cache
2200  * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa
2201  * and Zicclsm. They are always implemented in TCG and
2202  * doesn't need to be manually enabled by the profile.
2203  */
2204 static RISCVCPUProfile RVA22U64 = {
2205     .parent = NULL,
2206     .name = "rva22u64",
2207     .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU,
2208     .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
2209     .satp_mode = RISCV_PROFILE_ATTR_UNUSED,
2210     .ext_offsets = {
2211         CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause),
2212         CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb),
2213         CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin),
2214         CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr),
2215         CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom),
2216         CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz),
2217 
2218         /* mandatory named features for this profile */
2219         CPU_CFG_OFFSET(ext_zic64b),
2220 
2221         RISCV_PROFILE_EXT_LIST_END
2222     }
2223 };
2224 
2225 /*
2226  * As with RVA22U64, RVA22S64 also defines 'named features'.
2227  *
2228  * Cache related features that we consider enabled since we don't
2229  * implement cache: Ssccptr
2230  *
2231  * Other named features that we already implement: Sstvecd, Sstvala,
2232  * Sscounterenw
2233  *
2234  * The remaining features/extensions comes from RVA22U64.
2235  */
2236 static RISCVCPUProfile RVA22S64 = {
2237     .parent = &RVA22U64,
2238     .name = "rva22s64",
2239     .misa_ext = RVS,
2240     .priv_spec = PRIV_VERSION_1_12_0,
2241     .satp_mode = VM_1_10_SV39,
2242     .ext_offsets = {
2243         /* rva22s64 exts */
2244         CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt),
2245         CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade),
2246 
2247         RISCV_PROFILE_EXT_LIST_END
2248     }
2249 };
2250 
2251 RISCVCPUProfile *riscv_profiles[] = {
2252     &RVA22U64,
2253     &RVA22S64,
2254     NULL,
2255 };
2256 
2257 static RISCVCPUImpliedExtsRule RVA_IMPLIED = {
2258     .is_misa = true,
2259     .ext = RVA,
2260     .implied_multi_exts = {
2261         CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo),
2262 
2263         RISCV_IMPLIED_EXTS_RULE_END
2264     },
2265 };
2266 
2267 static RISCVCPUImpliedExtsRule RVD_IMPLIED = {
2268     .is_misa = true,
2269     .ext = RVD,
2270     .implied_misa_exts = RVF,
2271     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2272 };
2273 
2274 static RISCVCPUImpliedExtsRule RVF_IMPLIED = {
2275     .is_misa = true,
2276     .ext = RVF,
2277     .implied_multi_exts = {
2278         CPU_CFG_OFFSET(ext_zicsr),
2279 
2280         RISCV_IMPLIED_EXTS_RULE_END
2281     },
2282 };
2283 
2284 static RISCVCPUImpliedExtsRule RVM_IMPLIED = {
2285     .is_misa = true,
2286     .ext = RVM,
2287     .implied_multi_exts = {
2288         CPU_CFG_OFFSET(ext_zmmul),
2289 
2290         RISCV_IMPLIED_EXTS_RULE_END
2291     },
2292 };
2293 
2294 static RISCVCPUImpliedExtsRule RVV_IMPLIED = {
2295     .is_misa = true,
2296     .ext = RVV,
2297     .implied_multi_exts = {
2298         CPU_CFG_OFFSET(ext_zve64d),
2299 
2300         RISCV_IMPLIED_EXTS_RULE_END
2301     },
2302 };
2303 
2304 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = {
2305     .ext = CPU_CFG_OFFSET(ext_zcb),
2306     .implied_multi_exts = {
2307         CPU_CFG_OFFSET(ext_zca),
2308 
2309         RISCV_IMPLIED_EXTS_RULE_END
2310     },
2311 };
2312 
2313 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = {
2314     .ext = CPU_CFG_OFFSET(ext_zcd),
2315     .implied_misa_exts = RVD,
2316     .implied_multi_exts = {
2317         CPU_CFG_OFFSET(ext_zca),
2318 
2319         RISCV_IMPLIED_EXTS_RULE_END
2320     },
2321 };
2322 
2323 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = {
2324     .ext = CPU_CFG_OFFSET(ext_zce),
2325     .implied_multi_exts = {
2326         CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp),
2327         CPU_CFG_OFFSET(ext_zcmt),
2328 
2329         RISCV_IMPLIED_EXTS_RULE_END
2330     },
2331 };
2332 
2333 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = {
2334     .ext = CPU_CFG_OFFSET(ext_zcf),
2335     .implied_misa_exts = RVF,
2336     .implied_multi_exts = {
2337         CPU_CFG_OFFSET(ext_zca),
2338 
2339         RISCV_IMPLIED_EXTS_RULE_END
2340     },
2341 };
2342 
2343 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = {
2344     .ext = CPU_CFG_OFFSET(ext_zcmp),
2345     .implied_multi_exts = {
2346         CPU_CFG_OFFSET(ext_zca),
2347 
2348         RISCV_IMPLIED_EXTS_RULE_END
2349     },
2350 };
2351 
2352 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = {
2353     .ext = CPU_CFG_OFFSET(ext_zcmt),
2354     .implied_multi_exts = {
2355         CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr),
2356 
2357         RISCV_IMPLIED_EXTS_RULE_END
2358     },
2359 };
2360 
2361 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = {
2362     .ext = CPU_CFG_OFFSET(ext_zdinx),
2363     .implied_multi_exts = {
2364         CPU_CFG_OFFSET(ext_zfinx),
2365 
2366         RISCV_IMPLIED_EXTS_RULE_END
2367     },
2368 };
2369 
2370 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = {
2371     .ext = CPU_CFG_OFFSET(ext_zfa),
2372     .implied_misa_exts = RVF,
2373     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2374 };
2375 
2376 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = {
2377     .ext = CPU_CFG_OFFSET(ext_zfbfmin),
2378     .implied_misa_exts = RVF,
2379     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2380 };
2381 
2382 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = {
2383     .ext = CPU_CFG_OFFSET(ext_zfh),
2384     .implied_multi_exts = {
2385         CPU_CFG_OFFSET(ext_zfhmin),
2386 
2387         RISCV_IMPLIED_EXTS_RULE_END
2388     },
2389 };
2390 
2391 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = {
2392     .ext = CPU_CFG_OFFSET(ext_zfhmin),
2393     .implied_misa_exts = RVF,
2394     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2395 };
2396 
2397 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = {
2398     .ext = CPU_CFG_OFFSET(ext_zfinx),
2399     .implied_multi_exts = {
2400         CPU_CFG_OFFSET(ext_zicsr),
2401 
2402         RISCV_IMPLIED_EXTS_RULE_END
2403     },
2404 };
2405 
2406 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = {
2407     .ext = CPU_CFG_OFFSET(ext_zhinx),
2408     .implied_multi_exts = {
2409         CPU_CFG_OFFSET(ext_zhinxmin),
2410 
2411         RISCV_IMPLIED_EXTS_RULE_END
2412     },
2413 };
2414 
2415 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = {
2416     .ext = CPU_CFG_OFFSET(ext_zhinxmin),
2417     .implied_multi_exts = {
2418         CPU_CFG_OFFSET(ext_zfinx),
2419 
2420         RISCV_IMPLIED_EXTS_RULE_END
2421     },
2422 };
2423 
2424 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = {
2425     .ext = CPU_CFG_OFFSET(ext_zicntr),
2426     .implied_multi_exts = {
2427         CPU_CFG_OFFSET(ext_zicsr),
2428 
2429         RISCV_IMPLIED_EXTS_RULE_END
2430     },
2431 };
2432 
2433 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = {
2434     .ext = CPU_CFG_OFFSET(ext_zihpm),
2435     .implied_multi_exts = {
2436         CPU_CFG_OFFSET(ext_zicsr),
2437 
2438         RISCV_IMPLIED_EXTS_RULE_END
2439     },
2440 };
2441 
2442 static RISCVCPUImpliedExtsRule ZK_IMPLIED = {
2443     .ext = CPU_CFG_OFFSET(ext_zk),
2444     .implied_multi_exts = {
2445         CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr),
2446         CPU_CFG_OFFSET(ext_zkt),
2447 
2448         RISCV_IMPLIED_EXTS_RULE_END
2449     },
2450 };
2451 
2452 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = {
2453     .ext = CPU_CFG_OFFSET(ext_zkn),
2454     .implied_multi_exts = {
2455         CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
2456         CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne),
2457         CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh),
2458 
2459         RISCV_IMPLIED_EXTS_RULE_END
2460     },
2461 };
2462 
2463 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = {
2464     .ext = CPU_CFG_OFFSET(ext_zks),
2465     .implied_multi_exts = {
2466         CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
2467         CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed),
2468         CPU_CFG_OFFSET(ext_zksh),
2469 
2470         RISCV_IMPLIED_EXTS_RULE_END
2471     },
2472 };
2473 
2474 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = {
2475     .ext = CPU_CFG_OFFSET(ext_zvbb),
2476     .implied_multi_exts = {
2477         CPU_CFG_OFFSET(ext_zvkb),
2478 
2479         RISCV_IMPLIED_EXTS_RULE_END
2480     },
2481 };
2482 
2483 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = {
2484     .ext = CPU_CFG_OFFSET(ext_zve32f),
2485     .implied_misa_exts = RVF,
2486     .implied_multi_exts = {
2487         CPU_CFG_OFFSET(ext_zve32x),
2488 
2489         RISCV_IMPLIED_EXTS_RULE_END
2490     },
2491 };
2492 
2493 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = {
2494     .ext = CPU_CFG_OFFSET(ext_zve32x),
2495     .implied_multi_exts = {
2496         CPU_CFG_OFFSET(ext_zicsr),
2497 
2498         RISCV_IMPLIED_EXTS_RULE_END
2499     },
2500 };
2501 
2502 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = {
2503     .ext = CPU_CFG_OFFSET(ext_zve64d),
2504     .implied_misa_exts = RVD,
2505     .implied_multi_exts = {
2506         CPU_CFG_OFFSET(ext_zve64f),
2507 
2508         RISCV_IMPLIED_EXTS_RULE_END
2509     },
2510 };
2511 
2512 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = {
2513     .ext = CPU_CFG_OFFSET(ext_zve64f),
2514     .implied_misa_exts = RVF,
2515     .implied_multi_exts = {
2516         CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x),
2517 
2518         RISCV_IMPLIED_EXTS_RULE_END
2519     },
2520 };
2521 
2522 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = {
2523     .ext = CPU_CFG_OFFSET(ext_zve64x),
2524     .implied_multi_exts = {
2525         CPU_CFG_OFFSET(ext_zve32x),
2526 
2527         RISCV_IMPLIED_EXTS_RULE_END
2528     },
2529 };
2530 
2531 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = {
2532     .ext = CPU_CFG_OFFSET(ext_zvfbfmin),
2533     .implied_multi_exts = {
2534         CPU_CFG_OFFSET(ext_zve32f),
2535 
2536         RISCV_IMPLIED_EXTS_RULE_END
2537     },
2538 };
2539 
2540 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = {
2541     .ext = CPU_CFG_OFFSET(ext_zvfbfwma),
2542     .implied_multi_exts = {
2543         CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin),
2544 
2545         RISCV_IMPLIED_EXTS_RULE_END
2546     },
2547 };
2548 
2549 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = {
2550     .ext = CPU_CFG_OFFSET(ext_zvfh),
2551     .implied_multi_exts = {
2552         CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin),
2553 
2554         RISCV_IMPLIED_EXTS_RULE_END
2555     },
2556 };
2557 
2558 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = {
2559     .ext = CPU_CFG_OFFSET(ext_zvfhmin),
2560     .implied_multi_exts = {
2561         CPU_CFG_OFFSET(ext_zve32f),
2562 
2563         RISCV_IMPLIED_EXTS_RULE_END
2564     },
2565 };
2566 
2567 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = {
2568     .ext = CPU_CFG_OFFSET(ext_zvkn),
2569     .implied_multi_exts = {
2570         CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb),
2571         CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
2572 
2573         RISCV_IMPLIED_EXTS_RULE_END
2574     },
2575 };
2576 
2577 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = {
2578     .ext = CPU_CFG_OFFSET(ext_zvknc),
2579     .implied_multi_exts = {
2580         CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc),
2581 
2582         RISCV_IMPLIED_EXTS_RULE_END
2583     },
2584 };
2585 
2586 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = {
2587     .ext = CPU_CFG_OFFSET(ext_zvkng),
2588     .implied_multi_exts = {
2589         CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg),
2590 
2591         RISCV_IMPLIED_EXTS_RULE_END
2592     },
2593 };
2594 
2595 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = {
2596     .ext = CPU_CFG_OFFSET(ext_zvknhb),
2597     .implied_multi_exts = {
2598         CPU_CFG_OFFSET(ext_zve64x),
2599 
2600         RISCV_IMPLIED_EXTS_RULE_END
2601     },
2602 };
2603 
2604 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = {
2605     .ext = CPU_CFG_OFFSET(ext_zvks),
2606     .implied_multi_exts = {
2607         CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh),
2608         CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
2609 
2610         RISCV_IMPLIED_EXTS_RULE_END
2611     },
2612 };
2613 
2614 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = {
2615     .ext = CPU_CFG_OFFSET(ext_zvksc),
2616     .implied_multi_exts = {
2617         CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc),
2618 
2619         RISCV_IMPLIED_EXTS_RULE_END
2620     },
2621 };
2622 
2623 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = {
2624     .ext = CPU_CFG_OFFSET(ext_zvksg),
2625     .implied_multi_exts = {
2626         CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg),
2627 
2628         RISCV_IMPLIED_EXTS_RULE_END
2629     },
2630 };
2631 
2632 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = {
2633     &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED,
2634     &RVM_IMPLIED, &RVV_IMPLIED, NULL
2635 };
2636 
2637 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = {
2638     &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED,
2639     &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED,
2640     &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED,
2641     &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED,
2642     &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED,
2643     &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED,
2644     &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED,
2645     &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED,
2646     &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED,
2647     &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED,
2648     &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED,
2649     &ZVKS_IMPLIED,  &ZVKSC_IMPLIED, &ZVKSG_IMPLIED,
2650     NULL
2651 };
2652 
2653 static Property riscv_cpu_properties[] = {
2654     DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
2655 
2656     {.name = "pmu-mask", .info = &prop_pmu_mask},
2657     {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */
2658 
2659     {.name = "mmu", .info = &prop_mmu},
2660     {.name = "pmp", .info = &prop_pmp},
2661 
2662     {.name = "priv_spec", .info = &prop_priv_spec},
2663     {.name = "vext_spec", .info = &prop_vext_spec},
2664 
2665     {.name = "vlen", .info = &prop_vlen},
2666     {.name = "elen", .info = &prop_elen},
2667 
2668     {.name = "cbom_blocksize", .info = &prop_cbom_blksize},
2669     {.name = "cbop_blocksize", .info = &prop_cbop_blksize},
2670     {.name = "cboz_blocksize", .info = &prop_cboz_blksize},
2671 
2672      {.name = "mvendorid", .info = &prop_mvendorid},
2673      {.name = "mimpid", .info = &prop_mimpid},
2674      {.name = "marchid", .info = &prop_marchid},
2675 
2676 #ifndef CONFIG_USER_ONLY
2677     DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
2678 #endif
2679 
2680     DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
2681 
2682     DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
2683     DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
2684 
2685     /*
2686      * write_misa() is marked as experimental for now so mark
2687      * it with -x and default to 'false'.
2688      */
2689     DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
2690     DEFINE_PROP_END_OF_LIST(),
2691 };
2692 
2693 #if defined(TARGET_RISCV64)
2694 static void rva22u64_profile_cpu_init(Object *obj)
2695 {
2696     rv64i_bare_cpu_init(obj);
2697 
2698     RVA22U64.enabled = true;
2699 }
2700 
2701 static void rva22s64_profile_cpu_init(Object *obj)
2702 {
2703     rv64i_bare_cpu_init(obj);
2704 
2705     RVA22S64.enabled = true;
2706 }
2707 #endif
2708 
2709 static const gchar *riscv_gdb_arch_name(CPUState *cs)
2710 {
2711     RISCVCPU *cpu = RISCV_CPU(cs);
2712     CPURISCVState *env = &cpu->env;
2713 
2714     switch (riscv_cpu_mxl(env)) {
2715     case MXL_RV32:
2716         return "riscv:rv32";
2717     case MXL_RV64:
2718     case MXL_RV128:
2719         return "riscv:rv64";
2720     default:
2721         g_assert_not_reached();
2722     }
2723 }
2724 
2725 #ifndef CONFIG_USER_ONLY
2726 static int64_t riscv_get_arch_id(CPUState *cs)
2727 {
2728     RISCVCPU *cpu = RISCV_CPU(cs);
2729 
2730     return cpu->env.mhartid;
2731 }
2732 
2733 #include "hw/core/sysemu-cpu-ops.h"
2734 
2735 static const struct SysemuCPUOps riscv_sysemu_ops = {
2736     .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
2737     .write_elf64_note = riscv_cpu_write_elf64_note,
2738     .write_elf32_note = riscv_cpu_write_elf32_note,
2739     .legacy_vmsd = &vmstate_riscv_cpu,
2740 };
2741 #endif
2742 
2743 static void riscv_cpu_common_class_init(ObjectClass *c, void *data)
2744 {
2745     RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2746     CPUClass *cc = CPU_CLASS(c);
2747     DeviceClass *dc = DEVICE_CLASS(c);
2748     ResettableClass *rc = RESETTABLE_CLASS(c);
2749 
2750     device_class_set_parent_realize(dc, riscv_cpu_realize,
2751                                     &mcc->parent_realize);
2752 
2753     resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL,
2754                                        &mcc->parent_phases);
2755 
2756     cc->class_by_name = riscv_cpu_class_by_name;
2757     cc->has_work = riscv_cpu_has_work;
2758     cc->mmu_index = riscv_cpu_mmu_index;
2759     cc->dump_state = riscv_cpu_dump_state;
2760     cc->set_pc = riscv_cpu_set_pc;
2761     cc->get_pc = riscv_cpu_get_pc;
2762     cc->gdb_read_register = riscv_cpu_gdb_read_register;
2763     cc->gdb_write_register = riscv_cpu_gdb_write_register;
2764     cc->gdb_stop_before_watchpoint = true;
2765     cc->disas_set_info = riscv_cpu_disas_set_info;
2766 #ifndef CONFIG_USER_ONLY
2767     cc->sysemu_ops = &riscv_sysemu_ops;
2768     cc->get_arch_id = riscv_get_arch_id;
2769 #endif
2770     cc->gdb_arch_name = riscv_gdb_arch_name;
2771 
2772     device_class_set_props(dc, riscv_cpu_properties);
2773 }
2774 
2775 static void riscv_cpu_class_init(ObjectClass *c, void *data)
2776 {
2777     RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2778 
2779     mcc->misa_mxl_max = (uint32_t)(uintptr_t)data;
2780     riscv_cpu_validate_misa_mxl(mcc);
2781 }
2782 
2783 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
2784                                  int max_str_len)
2785 {
2786     const RISCVIsaExtData *edata;
2787     char *old = *isa_str;
2788     char *new = *isa_str;
2789 
2790     for (edata = isa_edata_arr; edata && edata->name; edata++) {
2791         if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2792             new = g_strconcat(old, "_", edata->name, NULL);
2793             g_free(old);
2794             old = new;
2795         }
2796     }
2797 
2798     *isa_str = new;
2799 }
2800 
2801 char *riscv_isa_string(RISCVCPU *cpu)
2802 {
2803     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
2804     int i;
2805     const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
2806     char *isa_str = g_new(char, maxlen);
2807     int xlen = riscv_cpu_max_xlen(mcc);
2808     char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen);
2809 
2810     for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2811         if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2812             *p++ = qemu_tolower(riscv_single_letter_exts[i]);
2813         }
2814     }
2815     *p = '\0';
2816     if (!cpu->cfg.short_isa_string) {
2817         riscv_isa_string_ext(cpu, &isa_str, maxlen);
2818     }
2819     return isa_str;
2820 }
2821 
2822 #ifndef CONFIG_USER_ONLY
2823 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count)
2824 {
2825     int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr);
2826     char **extensions = g_new(char *, maxlen);
2827 
2828     for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2829         if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2830             extensions[*count] = g_new(char, 2);
2831             snprintf(extensions[*count], 2, "%c",
2832                      qemu_tolower(riscv_single_letter_exts[i]));
2833             (*count)++;
2834         }
2835     }
2836 
2837     for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) {
2838         if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2839             extensions[*count] = g_strdup(edata->name);
2840             (*count)++;
2841         }
2842     }
2843 
2844     return extensions;
2845 }
2846 
2847 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename)
2848 {
2849     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
2850     const size_t maxlen = sizeof("rv128i");
2851     g_autofree char *isa_base = g_new(char, maxlen);
2852     g_autofree char *riscv_isa;
2853     char **isa_extensions;
2854     int count = 0;
2855     int xlen = riscv_cpu_max_xlen(mcc);
2856 
2857     riscv_isa = riscv_isa_string(cpu);
2858     qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa);
2859 
2860     snprintf(isa_base, maxlen, "rv%di", xlen);
2861     qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base);
2862 
2863     isa_extensions = riscv_isa_extensions_list(cpu, &count);
2864     qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions",
2865                                   isa_extensions, count);
2866 
2867     for (int i = 0; i < count; i++) {
2868         g_free(isa_extensions[i]);
2869     }
2870 
2871     g_free(isa_extensions);
2872 }
2873 #endif
2874 
2875 #define DEFINE_CPU(type_name, misa_mxl_max, initfn)         \
2876     {                                                       \
2877         .name = (type_name),                                \
2878         .parent = TYPE_RISCV_CPU,                           \
2879         .instance_init = (initfn),                          \
2880         .class_init = riscv_cpu_class_init,                 \
2881         .class_data = (void *)(misa_mxl_max)                \
2882     }
2883 
2884 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \
2885     {                                                       \
2886         .name = (type_name),                                \
2887         .parent = TYPE_RISCV_DYNAMIC_CPU,                   \
2888         .instance_init = (initfn),                          \
2889         .class_init = riscv_cpu_class_init,                 \
2890         .class_data = (void *)(misa_mxl_max)                \
2891     }
2892 
2893 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn)  \
2894     {                                                       \
2895         .name = (type_name),                                \
2896         .parent = TYPE_RISCV_VENDOR_CPU,                    \
2897         .instance_init = (initfn),                          \
2898         .class_init = riscv_cpu_class_init,                 \
2899         .class_data = (void *)(misa_mxl_max)                \
2900     }
2901 
2902 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn)    \
2903     {                                                       \
2904         .name = (type_name),                                \
2905         .parent = TYPE_RISCV_BARE_CPU,                      \
2906         .instance_init = (initfn),                          \
2907         .class_init = riscv_cpu_class_init,                 \
2908         .class_data = (void *)(misa_mxl_max)                \
2909     }
2910 
2911 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \
2912     {                                                       \
2913         .name = (type_name),                                \
2914         .parent = TYPE_RISCV_BARE_CPU,                      \
2915         .instance_init = (initfn),                          \
2916         .class_init = riscv_cpu_class_init,                 \
2917         .class_data = (void *)(misa_mxl_max)                \
2918     }
2919 
2920 static const TypeInfo riscv_cpu_type_infos[] = {
2921     {
2922         .name = TYPE_RISCV_CPU,
2923         .parent = TYPE_CPU,
2924         .instance_size = sizeof(RISCVCPU),
2925         .instance_align = __alignof(RISCVCPU),
2926         .instance_init = riscv_cpu_init,
2927         .instance_post_init = riscv_cpu_post_init,
2928         .abstract = true,
2929         .class_size = sizeof(RISCVCPUClass),
2930         .class_init = riscv_cpu_common_class_init,
2931     },
2932     {
2933         .name = TYPE_RISCV_DYNAMIC_CPU,
2934         .parent = TYPE_RISCV_CPU,
2935         .abstract = true,
2936     },
2937     {
2938         .name = TYPE_RISCV_VENDOR_CPU,
2939         .parent = TYPE_RISCV_CPU,
2940         .abstract = true,
2941     },
2942     {
2943         .name = TYPE_RISCV_BARE_CPU,
2944         .parent = TYPE_RISCV_CPU,
2945         .instance_init = riscv_bare_cpu_init,
2946         .abstract = true,
2947     },
2948 #if defined(TARGET_RISCV32)
2949     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY,       MXL_RV32,  riscv_any_cpu_init),
2950     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX,       MXL_RV32,  riscv_max_cpu_init),
2951     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32,    MXL_RV32,  rv32_base_cpu_init),
2952     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX,       MXL_RV32,  rv32_ibex_cpu_init),
2953     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32,  rv32_sifive_e_cpu_init),
2954     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32,  rv32_imafcu_nommu_cpu_init),
2955     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32,  rv32_sifive_u_cpu_init),
2956     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I,        MXL_RV32,  rv32i_bare_cpu_init),
2957     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E,        MXL_RV32,  rv32e_bare_cpu_init),
2958 #elif defined(TARGET_RISCV64)
2959     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY,       MXL_RV64,  riscv_any_cpu_init),
2960     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX,       MXL_RV64,  riscv_max_cpu_init),
2961     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64,    MXL_RV64,  rv64_base_cpu_init),
2962     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64,  rv64_sifive_e_cpu_init),
2963     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64,  rv64_sifive_u_cpu_init),
2964     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C,   MXL_RV64,  rv64_sifive_u_cpu_init),
2965     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64,  rv64_thead_c906_cpu_init),
2966     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1,  MXL_RV64,  rv64_veyron_v1_cpu_init),
2967 #ifdef CONFIG_TCG
2968     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128,   MXL_RV128, rv128_base_cpu_init),
2969 #endif /* CONFIG_TCG */
2970     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I,        MXL_RV64,  rv64i_bare_cpu_init),
2971     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E,        MXL_RV64,  rv64e_bare_cpu_init),
2972     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64,  MXL_RV64,  rva22u64_profile_cpu_init),
2973     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64,  MXL_RV64,  rva22s64_profile_cpu_init),
2974 #endif /* TARGET_RISCV64 */
2975 };
2976 
2977 DEFINE_TYPES(riscv_cpu_type_infos)
2978