xref: /openbmc/qemu/target/riscv/cpu.c (revision 41fc1f02947dd7a33b2c1d0e8474744b12f2514e)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "cpu_vendorid.h"
26 #include "internals.h"
27 #include "exec/exec-all.h"
28 #include "qapi/error.h"
29 #include "qapi/visitor.h"
30 #include "qemu/error-report.h"
31 #include "hw/qdev-properties.h"
32 #include "hw/core/qdev-prop-internal.h"
33 #include "migration/vmstate.h"
34 #include "fpu/softfloat-helpers.h"
35 #include "sysemu/device_tree.h"
36 #include "sysemu/kvm.h"
37 #include "sysemu/tcg.h"
38 #include "kvm/kvm_riscv.h"
39 #include "tcg/tcg-cpu.h"
40 #include "tcg/tcg.h"
41 
42 /* RISC-V CPU definitions */
43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH";
44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
45                               RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0};
46 
47 /*
48  * From vector_helper.c
49  * Note that vector data is stored in host-endian 64-bit chunks,
50  * so addressing bytes needs a host-endian fixup.
51  */
52 #if HOST_BIG_ENDIAN
53 #define BYTE(x)   ((x) ^ 7)
54 #else
55 #define BYTE(x)   (x)
56 #endif
57 
58 bool riscv_cpu_is_32bit(RISCVCPU *cpu)
59 {
60     return riscv_cpu_mxl(&cpu->env) == MXL_RV32;
61 }
62 
63 /* Hash that stores general user set numeric options */
64 static GHashTable *general_user_opts;
65 
66 static void cpu_option_add_user_setting(const char *optname, uint32_t value)
67 {
68     g_hash_table_insert(general_user_opts, (gpointer)optname,
69                         GUINT_TO_POINTER(value));
70 }
71 
72 bool riscv_cpu_option_set(const char *optname)
73 {
74     return g_hash_table_contains(general_user_opts, optname);
75 }
76 
77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
78     {#_name, _min_ver, CPU_CFG_OFFSET(_prop)}
79 
80 /*
81  * Here are the ordering rules of extension naming defined by RISC-V
82  * specification :
83  * 1. All extensions should be separated from other multi-letter extensions
84  *    by an underscore.
85  * 2. The first letter following the 'Z' conventionally indicates the most
86  *    closely related alphabetical extension category, IMAFDQLCBKJTPVH.
87  *    If multiple 'Z' extensions are named, they should be ordered first
88  *    by category, then alphabetically within a category.
89  * 3. Standard supervisor-level extensions (starts with 'S') should be
90  *    listed after standard unprivileged extensions.  If multiple
91  *    supervisor-level extensions are listed, they should be ordered
92  *    alphabetically.
93  * 4. Non-standard extensions (starts with 'X') must be listed after all
94  *    standard extensions. They must be separated from other multi-letter
95  *    extensions by an underscore.
96  *
97  * Single letter extensions are checked in riscv_cpu_validate_misa_priv()
98  * instead.
99  */
100 const RISCVIsaExtData isa_edata_arr[] = {
101     ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b),
102     ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom),
103     ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop),
104     ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz),
105     ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11),
106     ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11),
107     ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11),
108     ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11),
109     ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
110     ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr),
111     ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr),
112     ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei),
113     ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl),
114     ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
115     ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm),
116     ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop),
117     ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
118     ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_12),
119     ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo),
120     ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha),
121     ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
122     ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b),
123     ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc),
124     ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
125     ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
126     ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
127     ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh),
128     ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin),
129     ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx),
130     ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx),
131     ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca),
132     ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb),
133     ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf),
134     ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd),
135     ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce),
136     ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop),
137     ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp),
138     ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt),
139     ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba),
140     ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb),
141     ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc),
142     ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb),
143     ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc),
144     ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx),
145     ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs),
146     ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk),
147     ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn),
148     ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd),
149     ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne),
150     ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh),
151     ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr),
152     ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks),
153     ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
154     ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
155     ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
156     ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso),
157     ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb),
158     ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
159     ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
160     ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x),
161     ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
162     ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
163     ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x),
164     ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin),
165     ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
166     ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
167     ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
168     ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb),
169     ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg),
170     ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn),
171     ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc),
172     ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
173     ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng),
174     ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
175     ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
176     ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks),
177     ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc),
178     ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed),
179     ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg),
180     ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
181     ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt),
182     ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
183     ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
184     ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
185     ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf),
186     ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
187     ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
188     ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
189     ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11),
190     ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
191     ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
192     ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
193     ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
194     ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
195     ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade),
196     ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
197     ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
198     ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
199     ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt),
200     ISA_EXT_DATA_ENTRY(svvptc, PRIV_VERSION_1_13_0, ext_svvptc),
201     ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba),
202     ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb),
203     ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs),
204     ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo),
205     ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov),
206     ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx),
207     ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv),
208     ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac),
209     ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx),
210     ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair),
211     ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync),
212     ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
213 
214     DEFINE_PROP_END_OF_LIST(),
215 };
216 
217 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset)
218 {
219     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
220 
221     return *ext_enabled;
222 }
223 
224 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en)
225 {
226     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
227 
228     *ext_enabled = en;
229 }
230 
231 bool riscv_cpu_is_vendor(Object *cpu_obj)
232 {
233     return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL;
234 }
235 
236 const char * const riscv_int_regnames[] = {
237     "x0/zero", "x1/ra",  "x2/sp",  "x3/gp",  "x4/tp",  "x5/t0",   "x6/t1",
238     "x7/t2",   "x8/s0",  "x9/s1",  "x10/a0", "x11/a1", "x12/a2",  "x13/a3",
239     "x14/a4",  "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3",  "x20/s4",
240     "x21/s5",  "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
241     "x28/t3",  "x29/t4", "x30/t5", "x31/t6"
242 };
243 
244 const char * const riscv_int_regnamesh[] = {
245     "x0h/zeroh", "x1h/rah",  "x2h/sph",   "x3h/gph",   "x4h/tph",  "x5h/t0h",
246     "x6h/t1h",   "x7h/t2h",  "x8h/s0h",   "x9h/s1h",   "x10h/a0h", "x11h/a1h",
247     "x12h/a2h",  "x13h/a3h", "x14h/a4h",  "x15h/a5h",  "x16h/a6h", "x17h/a7h",
248     "x18h/s2h",  "x19h/s3h", "x20h/s4h",  "x21h/s5h",  "x22h/s6h", "x23h/s7h",
249     "x24h/s8h",  "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
250     "x30h/t5h",  "x31h/t6h"
251 };
252 
253 const char * const riscv_fpr_regnames[] = {
254     "f0/ft0",   "f1/ft1",  "f2/ft2",   "f3/ft3",   "f4/ft4",  "f5/ft5",
255     "f6/ft6",   "f7/ft7",  "f8/fs0",   "f9/fs1",   "f10/fa0", "f11/fa1",
256     "f12/fa2",  "f13/fa3", "f14/fa4",  "f15/fa5",  "f16/fa6", "f17/fa7",
257     "f18/fs2",  "f19/fs3", "f20/fs4",  "f21/fs5",  "f22/fs6", "f23/fs7",
258     "f24/fs8",  "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
259     "f30/ft10", "f31/ft11"
260 };
261 
262 const char * const riscv_rvv_regnames[] = {
263   "v0",  "v1",  "v2",  "v3",  "v4",  "v5",  "v6",
264   "v7",  "v8",  "v9",  "v10", "v11", "v12", "v13",
265   "v14", "v15", "v16", "v17", "v18", "v19", "v20",
266   "v21", "v22", "v23", "v24", "v25", "v26", "v27",
267   "v28", "v29", "v30", "v31"
268 };
269 
270 static const char * const riscv_excp_names[] = {
271     "misaligned_fetch",
272     "fault_fetch",
273     "illegal_instruction",
274     "breakpoint",
275     "misaligned_load",
276     "fault_load",
277     "misaligned_store",
278     "fault_store",
279     "user_ecall",
280     "supervisor_ecall",
281     "hypervisor_ecall",
282     "machine_ecall",
283     "exec_page_fault",
284     "load_page_fault",
285     "reserved",
286     "store_page_fault",
287     "reserved",
288     "reserved",
289     "reserved",
290     "reserved",
291     "guest_exec_page_fault",
292     "guest_load_page_fault",
293     "reserved",
294     "guest_store_page_fault",
295 };
296 
297 static const char * const riscv_intr_names[] = {
298     "u_software",
299     "s_software",
300     "vs_software",
301     "m_software",
302     "u_timer",
303     "s_timer",
304     "vs_timer",
305     "m_timer",
306     "u_external",
307     "s_external",
308     "vs_external",
309     "m_external",
310     "reserved",
311     "reserved",
312     "reserved",
313     "reserved"
314 };
315 
316 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
317 {
318     if (async) {
319         return (cause < ARRAY_SIZE(riscv_intr_names)) ?
320                riscv_intr_names[cause] : "(unknown)";
321     } else {
322         return (cause < ARRAY_SIZE(riscv_excp_names)) ?
323                riscv_excp_names[cause] : "(unknown)";
324     }
325 }
326 
327 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext)
328 {
329     env->misa_ext_mask = env->misa_ext = ext;
330 }
331 
332 int riscv_cpu_max_xlen(RISCVCPUClass *mcc)
333 {
334     return 16 << mcc->misa_mxl_max;
335 }
336 
337 #ifndef CONFIG_USER_ONLY
338 static uint8_t satp_mode_from_str(const char *satp_mode_str)
339 {
340     if (!strncmp(satp_mode_str, "mbare", 5)) {
341         return VM_1_10_MBARE;
342     }
343 
344     if (!strncmp(satp_mode_str, "sv32", 4)) {
345         return VM_1_10_SV32;
346     }
347 
348     if (!strncmp(satp_mode_str, "sv39", 4)) {
349         return VM_1_10_SV39;
350     }
351 
352     if (!strncmp(satp_mode_str, "sv48", 4)) {
353         return VM_1_10_SV48;
354     }
355 
356     if (!strncmp(satp_mode_str, "sv57", 4)) {
357         return VM_1_10_SV57;
358     }
359 
360     if (!strncmp(satp_mode_str, "sv64", 4)) {
361         return VM_1_10_SV64;
362     }
363 
364     g_assert_not_reached();
365 }
366 
367 uint8_t satp_mode_max_from_map(uint32_t map)
368 {
369     /*
370      * 'map = 0' will make us return (31 - 32), which C will
371      * happily overflow to UINT_MAX. There's no good result to
372      * return if 'map = 0' (e.g. returning 0 will be ambiguous
373      * with the result for 'map = 1').
374      *
375      * Assert out if map = 0. Callers will have to deal with
376      * it outside of this function.
377      */
378     g_assert(map > 0);
379 
380     /* map here has at least one bit set, so no problem with clz */
381     return 31 - __builtin_clz(map);
382 }
383 
384 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
385 {
386     if (is_32_bit) {
387         switch (satp_mode) {
388         case VM_1_10_SV32:
389             return "sv32";
390         case VM_1_10_MBARE:
391             return "none";
392         }
393     } else {
394         switch (satp_mode) {
395         case VM_1_10_SV64:
396             return "sv64";
397         case VM_1_10_SV57:
398             return "sv57";
399         case VM_1_10_SV48:
400             return "sv48";
401         case VM_1_10_SV39:
402             return "sv39";
403         case VM_1_10_MBARE:
404             return "none";
405         }
406     }
407 
408     g_assert_not_reached();
409 }
410 
411 static void set_satp_mode_max_supported(RISCVCPU *cpu,
412                                         uint8_t satp_mode)
413 {
414     bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
415     const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
416 
417     for (int i = 0; i <= satp_mode; ++i) {
418         if (valid_vm[i]) {
419             cpu->cfg.satp_mode.supported |= (1 << i);
420         }
421     }
422 }
423 
424 /* Set the satp mode to the max supported */
425 static void set_satp_mode_default_map(RISCVCPU *cpu)
426 {
427     /*
428      * Bare CPUs do not default to the max available.
429      * Users must set a valid satp_mode in the command
430      * line.
431      */
432     if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) {
433         warn_report("No satp mode set. Defaulting to 'bare'");
434         cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE);
435         return;
436     }
437 
438     cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
439 }
440 #endif
441 
442 static void riscv_max_cpu_init(Object *obj)
443 {
444     RISCVCPU *cpu = RISCV_CPU(obj);
445     CPURISCVState *env = &cpu->env;
446 
447     cpu->cfg.mmu = true;
448     cpu->cfg.pmp = true;
449 
450     env->priv_ver = PRIV_VERSION_LATEST;
451 #ifndef CONFIG_USER_ONLY
452     set_satp_mode_max_supported(RISCV_CPU(obj),
453         riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ?
454         VM_1_10_SV32 : VM_1_10_SV57);
455 #endif
456 }
457 
458 #if defined(TARGET_RISCV64)
459 static void rv64_base_cpu_init(Object *obj)
460 {
461     RISCVCPU *cpu = RISCV_CPU(obj);
462     CPURISCVState *env = &cpu->env;
463 
464     cpu->cfg.mmu = true;
465     cpu->cfg.pmp = true;
466 
467     /* Set latest version of privileged specification */
468     env->priv_ver = PRIV_VERSION_LATEST;
469 #ifndef CONFIG_USER_ONLY
470     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
471 #endif
472 }
473 
474 static void rv64_sifive_u_cpu_init(Object *obj)
475 {
476     RISCVCPU *cpu = RISCV_CPU(obj);
477     CPURISCVState *env = &cpu->env;
478     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
479     env->priv_ver = PRIV_VERSION_1_10_0;
480 #ifndef CONFIG_USER_ONLY
481     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39);
482 #endif
483 
484     /* inherited from parent obj via riscv_cpu_init() */
485     cpu->cfg.ext_zifencei = true;
486     cpu->cfg.ext_zicsr = true;
487     cpu->cfg.mmu = true;
488     cpu->cfg.pmp = true;
489 }
490 
491 static void rv64_sifive_e_cpu_init(Object *obj)
492 {
493     CPURISCVState *env = &RISCV_CPU(obj)->env;
494     RISCVCPU *cpu = RISCV_CPU(obj);
495 
496     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
497     env->priv_ver = PRIV_VERSION_1_10_0;
498 #ifndef CONFIG_USER_ONLY
499     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
500 #endif
501 
502     /* inherited from parent obj via riscv_cpu_init() */
503     cpu->cfg.ext_zifencei = true;
504     cpu->cfg.ext_zicsr = true;
505     cpu->cfg.pmp = true;
506 }
507 
508 static void rv64_thead_c906_cpu_init(Object *obj)
509 {
510     CPURISCVState *env = &RISCV_CPU(obj)->env;
511     RISCVCPU *cpu = RISCV_CPU(obj);
512 
513     riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU);
514     env->priv_ver = PRIV_VERSION_1_11_0;
515 
516     cpu->cfg.ext_zfa = true;
517     cpu->cfg.ext_zfh = true;
518     cpu->cfg.mmu = true;
519     cpu->cfg.ext_xtheadba = true;
520     cpu->cfg.ext_xtheadbb = true;
521     cpu->cfg.ext_xtheadbs = true;
522     cpu->cfg.ext_xtheadcmo = true;
523     cpu->cfg.ext_xtheadcondmov = true;
524     cpu->cfg.ext_xtheadfmemidx = true;
525     cpu->cfg.ext_xtheadmac = true;
526     cpu->cfg.ext_xtheadmemidx = true;
527     cpu->cfg.ext_xtheadmempair = true;
528     cpu->cfg.ext_xtheadsync = true;
529 
530     cpu->cfg.mvendorid = THEAD_VENDOR_ID;
531 #ifndef CONFIG_USER_ONLY
532     set_satp_mode_max_supported(cpu, VM_1_10_SV39);
533     th_register_custom_csrs(cpu);
534 #endif
535 
536     /* inherited from parent obj via riscv_cpu_init() */
537     cpu->cfg.pmp = true;
538 }
539 
540 static void rv64_veyron_v1_cpu_init(Object *obj)
541 {
542     CPURISCVState *env = &RISCV_CPU(obj)->env;
543     RISCVCPU *cpu = RISCV_CPU(obj);
544 
545     riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH);
546     env->priv_ver = PRIV_VERSION_1_12_0;
547 
548     /* Enable ISA extensions */
549     cpu->cfg.mmu = true;
550     cpu->cfg.ext_zifencei = true;
551     cpu->cfg.ext_zicsr = true;
552     cpu->cfg.pmp = true;
553     cpu->cfg.ext_zicbom = true;
554     cpu->cfg.cbom_blocksize = 64;
555     cpu->cfg.cboz_blocksize = 64;
556     cpu->cfg.ext_zicboz = true;
557     cpu->cfg.ext_smaia = true;
558     cpu->cfg.ext_ssaia = true;
559     cpu->cfg.ext_sscofpmf = true;
560     cpu->cfg.ext_sstc = true;
561     cpu->cfg.ext_svinval = true;
562     cpu->cfg.ext_svnapot = true;
563     cpu->cfg.ext_svpbmt = true;
564     cpu->cfg.ext_smstateen = true;
565     cpu->cfg.ext_zba = true;
566     cpu->cfg.ext_zbb = true;
567     cpu->cfg.ext_zbc = true;
568     cpu->cfg.ext_zbs = true;
569     cpu->cfg.ext_XVentanaCondOps = true;
570 
571     cpu->cfg.mvendorid = VEYRON_V1_MVENDORID;
572     cpu->cfg.marchid = VEYRON_V1_MARCHID;
573     cpu->cfg.mimpid = VEYRON_V1_MIMPID;
574 
575 #ifndef CONFIG_USER_ONLY
576     set_satp_mode_max_supported(cpu, VM_1_10_SV48);
577 #endif
578 }
579 
580 #ifdef CONFIG_TCG
581 static void rv128_base_cpu_init(Object *obj)
582 {
583     RISCVCPU *cpu = RISCV_CPU(obj);
584     CPURISCVState *env = &cpu->env;
585 
586     if (qemu_tcg_mttcg_enabled()) {
587         /* Missing 128-bit aligned atomics */
588         error_report("128-bit RISC-V currently does not work with Multi "
589                      "Threaded TCG. Please use: -accel tcg,thread=single");
590         exit(EXIT_FAILURE);
591     }
592 
593     cpu->cfg.mmu = true;
594     cpu->cfg.pmp = true;
595 
596     /* Set latest version of privileged specification */
597     env->priv_ver = PRIV_VERSION_LATEST;
598 #ifndef CONFIG_USER_ONLY
599     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
600 #endif
601 }
602 #endif /* CONFIG_TCG */
603 
604 static void rv64i_bare_cpu_init(Object *obj)
605 {
606     CPURISCVState *env = &RISCV_CPU(obj)->env;
607     riscv_cpu_set_misa_ext(env, RVI);
608 }
609 
610 static void rv64e_bare_cpu_init(Object *obj)
611 {
612     CPURISCVState *env = &RISCV_CPU(obj)->env;
613     riscv_cpu_set_misa_ext(env, RVE);
614 }
615 
616 #endif /* !TARGET_RISCV64 */
617 
618 #if defined(TARGET_RISCV32) || \
619     (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY))
620 
621 static void rv32_base_cpu_init(Object *obj)
622 {
623     RISCVCPU *cpu = RISCV_CPU(obj);
624     CPURISCVState *env = &cpu->env;
625 
626     cpu->cfg.mmu = true;
627     cpu->cfg.pmp = true;
628 
629     /* Set latest version of privileged specification */
630     env->priv_ver = PRIV_VERSION_LATEST;
631 #ifndef CONFIG_USER_ONLY
632     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
633 #endif
634 }
635 
636 static void rv32_sifive_u_cpu_init(Object *obj)
637 {
638     RISCVCPU *cpu = RISCV_CPU(obj);
639     CPURISCVState *env = &cpu->env;
640     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
641     env->priv_ver = PRIV_VERSION_1_10_0;
642 #ifndef CONFIG_USER_ONLY
643     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
644 #endif
645 
646     /* inherited from parent obj via riscv_cpu_init() */
647     cpu->cfg.ext_zifencei = true;
648     cpu->cfg.ext_zicsr = true;
649     cpu->cfg.mmu = true;
650     cpu->cfg.pmp = true;
651 }
652 
653 static void rv32_sifive_e_cpu_init(Object *obj)
654 {
655     CPURISCVState *env = &RISCV_CPU(obj)->env;
656     RISCVCPU *cpu = RISCV_CPU(obj);
657 
658     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
659     env->priv_ver = PRIV_VERSION_1_10_0;
660 #ifndef CONFIG_USER_ONLY
661     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
662 #endif
663 
664     /* inherited from parent obj via riscv_cpu_init() */
665     cpu->cfg.ext_zifencei = true;
666     cpu->cfg.ext_zicsr = true;
667     cpu->cfg.pmp = true;
668 }
669 
670 static void rv32_ibex_cpu_init(Object *obj)
671 {
672     CPURISCVState *env = &RISCV_CPU(obj)->env;
673     RISCVCPU *cpu = RISCV_CPU(obj);
674 
675     riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU);
676     env->priv_ver = PRIV_VERSION_1_12_0;
677 #ifndef CONFIG_USER_ONLY
678     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
679 #endif
680     /* inherited from parent obj via riscv_cpu_init() */
681     cpu->cfg.ext_zifencei = true;
682     cpu->cfg.ext_zicsr = true;
683     cpu->cfg.pmp = true;
684     cpu->cfg.ext_smepmp = true;
685 
686     cpu->cfg.ext_zba = true;
687     cpu->cfg.ext_zbb = true;
688     cpu->cfg.ext_zbc = true;
689     cpu->cfg.ext_zbs = true;
690 }
691 
692 static void rv32_imafcu_nommu_cpu_init(Object *obj)
693 {
694     CPURISCVState *env = &RISCV_CPU(obj)->env;
695     RISCVCPU *cpu = RISCV_CPU(obj);
696 
697     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU);
698     env->priv_ver = PRIV_VERSION_1_10_0;
699 #ifndef CONFIG_USER_ONLY
700     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
701 #endif
702 
703     /* inherited from parent obj via riscv_cpu_init() */
704     cpu->cfg.ext_zifencei = true;
705     cpu->cfg.ext_zicsr = true;
706     cpu->cfg.pmp = true;
707 }
708 
709 static void rv32i_bare_cpu_init(Object *obj)
710 {
711     CPURISCVState *env = &RISCV_CPU(obj)->env;
712     riscv_cpu_set_misa_ext(env, RVI);
713 }
714 
715 static void rv32e_bare_cpu_init(Object *obj)
716 {
717     CPURISCVState *env = &RISCV_CPU(obj)->env;
718     riscv_cpu_set_misa_ext(env, RVE);
719 }
720 #endif
721 
722 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
723 {
724     ObjectClass *oc;
725     char *typename;
726     char **cpuname;
727 
728     cpuname = g_strsplit(cpu_model, ",", 1);
729     typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
730     oc = object_class_by_name(typename);
731     g_strfreev(cpuname);
732     g_free(typename);
733 
734     return oc;
735 }
736 
737 char *riscv_cpu_get_name(RISCVCPU *cpu)
738 {
739     RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu);
740     const char *typename = object_class_get_name(OBJECT_CLASS(rcc));
741 
742     g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX));
743 
744     return cpu_model_from_type(typename);
745 }
746 
747 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
748 {
749     RISCVCPU *cpu = RISCV_CPU(cs);
750     CPURISCVState *env = &cpu->env;
751     int i, j;
752     uint8_t *p;
753 
754 #if !defined(CONFIG_USER_ONLY)
755     if (riscv_has_ext(env, RVH)) {
756         qemu_fprintf(f, " %s %d\n", "V      =  ", env->virt_enabled);
757     }
758 #endif
759     qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc      ", env->pc);
760 #ifndef CONFIG_USER_ONLY
761     {
762         static const int dump_csrs[] = {
763             CSR_MHARTID,
764             CSR_MSTATUS,
765             CSR_MSTATUSH,
766             /*
767              * CSR_SSTATUS is intentionally omitted here as its value
768              * can be figured out by looking at CSR_MSTATUS
769              */
770             CSR_HSTATUS,
771             CSR_VSSTATUS,
772             CSR_MIP,
773             CSR_MIE,
774             CSR_MIDELEG,
775             CSR_HIDELEG,
776             CSR_MEDELEG,
777             CSR_HEDELEG,
778             CSR_MTVEC,
779             CSR_STVEC,
780             CSR_VSTVEC,
781             CSR_MEPC,
782             CSR_SEPC,
783             CSR_VSEPC,
784             CSR_MCAUSE,
785             CSR_SCAUSE,
786             CSR_VSCAUSE,
787             CSR_MTVAL,
788             CSR_STVAL,
789             CSR_HTVAL,
790             CSR_MTVAL2,
791             CSR_MSCRATCH,
792             CSR_SSCRATCH,
793             CSR_SATP,
794             CSR_MMTE,
795             CSR_UPMBASE,
796             CSR_UPMMASK,
797             CSR_SPMBASE,
798             CSR_SPMMASK,
799             CSR_MPMBASE,
800             CSR_MPMMASK,
801         };
802 
803         for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
804             int csrno = dump_csrs[i];
805             target_ulong val = 0;
806             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
807 
808             /*
809              * Rely on the smode, hmode, etc, predicates within csr.c
810              * to do the filtering of the registers that are present.
811              */
812             if (res == RISCV_EXCP_NONE) {
813                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
814                              csr_ops[csrno].name, val);
815             }
816         }
817     }
818 #endif
819 
820     for (i = 0; i < 32; i++) {
821         qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
822                      riscv_int_regnames[i], env->gpr[i]);
823         if ((i & 3) == 3) {
824             qemu_fprintf(f, "\n");
825         }
826     }
827     if (flags & CPU_DUMP_FPU) {
828         target_ulong val = 0;
829         RISCVException res = riscv_csrrw_debug(env, CSR_FCSR, &val, 0, 0);
830         if (res == RISCV_EXCP_NONE) {
831             qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
832                     csr_ops[CSR_FCSR].name, val);
833         }
834         for (i = 0; i < 32; i++) {
835             qemu_fprintf(f, " %-8s %016" PRIx64,
836                          riscv_fpr_regnames[i], env->fpr[i]);
837             if ((i & 3) == 3) {
838                 qemu_fprintf(f, "\n");
839             }
840         }
841     }
842     if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) {
843         static const int dump_rvv_csrs[] = {
844                     CSR_VSTART,
845                     CSR_VXSAT,
846                     CSR_VXRM,
847                     CSR_VCSR,
848                     CSR_VL,
849                     CSR_VTYPE,
850                     CSR_VLENB,
851                 };
852         for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
853             int csrno = dump_rvv_csrs[i];
854             target_ulong val = 0;
855             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
856 
857             /*
858              * Rely on the smode, hmode, etc, predicates within csr.c
859              * to do the filtering of the registers that are present.
860              */
861             if (res == RISCV_EXCP_NONE) {
862                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
863                              csr_ops[csrno].name, val);
864             }
865         }
866         uint16_t vlenb = cpu->cfg.vlenb;
867 
868         for (i = 0; i < 32; i++) {
869             qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]);
870             p = (uint8_t *)env->vreg;
871             for (j = vlenb - 1 ; j >= 0; j--) {
872                 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j)));
873             }
874             qemu_fprintf(f, "\n");
875         }
876     }
877 }
878 
879 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
880 {
881     RISCVCPU *cpu = RISCV_CPU(cs);
882     CPURISCVState *env = &cpu->env;
883 
884     if (env->xl == MXL_RV32) {
885         env->pc = (int32_t)value;
886     } else {
887         env->pc = value;
888     }
889 }
890 
891 static vaddr riscv_cpu_get_pc(CPUState *cs)
892 {
893     RISCVCPU *cpu = RISCV_CPU(cs);
894     CPURISCVState *env = &cpu->env;
895 
896     /* Match cpu_get_tb_cpu_state. */
897     if (env->xl == MXL_RV32) {
898         return env->pc & UINT32_MAX;
899     }
900     return env->pc;
901 }
902 
903 bool riscv_cpu_has_work(CPUState *cs)
904 {
905 #ifndef CONFIG_USER_ONLY
906     RISCVCPU *cpu = RISCV_CPU(cs);
907     CPURISCVState *env = &cpu->env;
908     /*
909      * Definition of the WFI instruction requires it to ignore the privilege
910      * mode and delegation registers, but respect individual enables
911      */
912     return riscv_cpu_all_pending(env) != 0 ||
913         riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE ||
914         riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE;
915 #else
916     return true;
917 #endif
918 }
919 
920 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch)
921 {
922     return riscv_env_mmu_index(cpu_env(cs), ifetch);
923 }
924 
925 static void riscv_cpu_reset_hold(Object *obj, ResetType type)
926 {
927 #ifndef CONFIG_USER_ONLY
928     uint8_t iprio;
929     int i, irq, rdzero;
930 #endif
931     CPUState *cs = CPU(obj);
932     RISCVCPU *cpu = RISCV_CPU(cs);
933     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
934     CPURISCVState *env = &cpu->env;
935 
936     if (mcc->parent_phases.hold) {
937         mcc->parent_phases.hold(obj, type);
938     }
939 #ifndef CONFIG_USER_ONLY
940     env->misa_mxl = mcc->misa_mxl_max;
941     env->priv = PRV_M;
942     env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
943     if (env->misa_mxl > MXL_RV32) {
944         /*
945          * The reset status of SXL/UXL is undefined, but mstatus is WARL
946          * and we must ensure that the value after init is valid for read.
947          */
948         env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
949         env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
950         if (riscv_has_ext(env, RVH)) {
951             env->vsstatus = set_field(env->vsstatus,
952                                       MSTATUS64_SXL, env->misa_mxl);
953             env->vsstatus = set_field(env->vsstatus,
954                                       MSTATUS64_UXL, env->misa_mxl);
955             env->mstatus_hs = set_field(env->mstatus_hs,
956                                         MSTATUS64_SXL, env->misa_mxl);
957             env->mstatus_hs = set_field(env->mstatus_hs,
958                                         MSTATUS64_UXL, env->misa_mxl);
959         }
960     }
961     env->mcause = 0;
962     env->miclaim = MIP_SGEIP;
963     env->pc = env->resetvec;
964     env->bins = 0;
965     env->two_stage_lookup = false;
966 
967     env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
968                    (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ?
969                     MENVCFG_ADUE : 0);
970     env->henvcfg = 0;
971 
972     /* Initialized default priorities of local interrupts. */
973     for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
974         iprio = riscv_cpu_default_priority(i);
975         env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
976         env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
977         env->hviprio[i] = 0;
978     }
979     i = 0;
980     while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
981         if (!rdzero) {
982             env->hviprio[irq] = env->miprio[irq];
983         }
984         i++;
985     }
986     /* mmte is supposed to have pm.current hardwired to 1 */
987     env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
988 
989     /*
990      * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor
991      * extension is enabled.
992      */
993     if (riscv_has_ext(env, RVH)) {
994         env->mideleg |= HS_MODE_INTERRUPTS;
995     }
996 
997     /*
998      * Clear mseccfg and unlock all the PMP entries upon reset.
999      * This is allowed as per the priv and smepmp specifications
1000      * and is needed to clear stale entries across reboots.
1001      */
1002     if (riscv_cpu_cfg(env)->ext_smepmp) {
1003         env->mseccfg = 0;
1004     }
1005 
1006     pmp_unlock_entries(env);
1007 #endif
1008     env->xl = riscv_cpu_mxl(env);
1009     riscv_cpu_update_mask(env);
1010     cs->exception_index = RISCV_EXCP_NONE;
1011     env->load_res = -1;
1012     set_default_nan_mode(1, &env->fp_status);
1013 
1014 #ifndef CONFIG_USER_ONLY
1015     if (cpu->cfg.debug) {
1016         riscv_trigger_reset_hold(env);
1017     }
1018 
1019     if (kvm_enabled()) {
1020         kvm_riscv_reset_vcpu(cpu);
1021     }
1022 #endif
1023 }
1024 
1025 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
1026 {
1027     RISCVCPU *cpu = RISCV_CPU(s);
1028     CPURISCVState *env = &cpu->env;
1029     info->target_info = &cpu->cfg;
1030 
1031     switch (env->xl) {
1032     case MXL_RV32:
1033         info->print_insn = print_insn_riscv32;
1034         break;
1035     case MXL_RV64:
1036         info->print_insn = print_insn_riscv64;
1037         break;
1038     case MXL_RV128:
1039         info->print_insn = print_insn_riscv128;
1040         break;
1041     default:
1042         g_assert_not_reached();
1043     }
1044 }
1045 
1046 #ifndef CONFIG_USER_ONLY
1047 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
1048 {
1049     bool rv32 = riscv_cpu_is_32bit(cpu);
1050     uint8_t satp_mode_map_max, satp_mode_supported_max;
1051 
1052     /* The CPU wants the OS to decide which satp mode to use */
1053     if (cpu->cfg.satp_mode.supported == 0) {
1054         return;
1055     }
1056 
1057     satp_mode_supported_max =
1058                     satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
1059 
1060     if (cpu->cfg.satp_mode.map == 0) {
1061         if (cpu->cfg.satp_mode.init == 0) {
1062             /* If unset by the user, we fallback to the default satp mode. */
1063             set_satp_mode_default_map(cpu);
1064         } else {
1065             /*
1066              * Find the lowest level that was disabled and then enable the
1067              * first valid level below which can be found in
1068              * valid_vm_1_10_32/64.
1069              */
1070             for (int i = 1; i < 16; ++i) {
1071                 if ((cpu->cfg.satp_mode.init & (1 << i)) &&
1072                     (cpu->cfg.satp_mode.supported & (1 << i))) {
1073                     for (int j = i - 1; j >= 0; --j) {
1074                         if (cpu->cfg.satp_mode.supported & (1 << j)) {
1075                             cpu->cfg.satp_mode.map |= (1 << j);
1076                             break;
1077                         }
1078                     }
1079                     break;
1080                 }
1081             }
1082         }
1083     }
1084 
1085     satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
1086 
1087     /* Make sure the user asked for a supported configuration (HW and qemu) */
1088     if (satp_mode_map_max > satp_mode_supported_max) {
1089         error_setg(errp, "satp_mode %s is higher than hw max capability %s",
1090                    satp_mode_str(satp_mode_map_max, rv32),
1091                    satp_mode_str(satp_mode_supported_max, rv32));
1092         return;
1093     }
1094 
1095     /*
1096      * Make sure the user did not ask for an invalid configuration as per
1097      * the specification.
1098      */
1099     if (!rv32) {
1100         for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1101             if (!(cpu->cfg.satp_mode.map & (1 << i)) &&
1102                 (cpu->cfg.satp_mode.init & (1 << i)) &&
1103                 (cpu->cfg.satp_mode.supported & (1 << i))) {
1104                 error_setg(errp, "cannot disable %s satp mode if %s "
1105                            "is enabled", satp_mode_str(i, false),
1106                            satp_mode_str(satp_mode_map_max, false));
1107                 return;
1108             }
1109         }
1110     }
1111 
1112     /* Finally expand the map so that all valid modes are set */
1113     for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1114         if (cpu->cfg.satp_mode.supported & (1 << i)) {
1115             cpu->cfg.satp_mode.map |= (1 << i);
1116         }
1117     }
1118 }
1119 #endif
1120 
1121 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1122 {
1123     Error *local_err = NULL;
1124 
1125 #ifndef CONFIG_USER_ONLY
1126     riscv_cpu_satp_mode_finalize(cpu, &local_err);
1127     if (local_err != NULL) {
1128         error_propagate(errp, local_err);
1129         return;
1130     }
1131 #endif
1132 
1133     if (tcg_enabled()) {
1134         riscv_tcg_cpu_finalize_features(cpu, &local_err);
1135         if (local_err != NULL) {
1136             error_propagate(errp, local_err);
1137             return;
1138         }
1139         riscv_tcg_cpu_finalize_dynamic_decoder(cpu);
1140     } else if (kvm_enabled()) {
1141         riscv_kvm_cpu_finalize_features(cpu, &local_err);
1142         if (local_err != NULL) {
1143             error_propagate(errp, local_err);
1144             return;
1145         }
1146     }
1147 }
1148 
1149 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
1150 {
1151     CPUState *cs = CPU(dev);
1152     RISCVCPU *cpu = RISCV_CPU(dev);
1153     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
1154     Error *local_err = NULL;
1155 
1156     cpu_exec_realizefn(cs, &local_err);
1157     if (local_err != NULL) {
1158         error_propagate(errp, local_err);
1159         return;
1160     }
1161 
1162     riscv_cpu_finalize_features(cpu, &local_err);
1163     if (local_err != NULL) {
1164         error_propagate(errp, local_err);
1165         return;
1166     }
1167 
1168     riscv_cpu_register_gdb_regs_for_features(cs);
1169 
1170 #ifndef CONFIG_USER_ONLY
1171     if (cpu->cfg.debug) {
1172         riscv_trigger_realize(&cpu->env);
1173     }
1174 #endif
1175 
1176     qemu_init_vcpu(cs);
1177     cpu_reset(cs);
1178 
1179     mcc->parent_realize(dev, errp);
1180 }
1181 
1182 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu)
1183 {
1184     if (tcg_enabled()) {
1185         return riscv_cpu_tcg_compatible(cpu);
1186     }
1187 
1188     return true;
1189 }
1190 
1191 #ifndef CONFIG_USER_ONLY
1192 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
1193                                void *opaque, Error **errp)
1194 {
1195     RISCVSATPMap *satp_map = opaque;
1196     uint8_t satp = satp_mode_from_str(name);
1197     bool value;
1198 
1199     value = satp_map->map & (1 << satp);
1200 
1201     visit_type_bool(v, name, &value, errp);
1202 }
1203 
1204 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
1205                                void *opaque, Error **errp)
1206 {
1207     RISCVSATPMap *satp_map = opaque;
1208     uint8_t satp = satp_mode_from_str(name);
1209     bool value;
1210 
1211     if (!visit_type_bool(v, name, &value, errp)) {
1212         return;
1213     }
1214 
1215     satp_map->map = deposit32(satp_map->map, satp, 1, value);
1216     satp_map->init |= 1 << satp;
1217 }
1218 
1219 void riscv_add_satp_mode_properties(Object *obj)
1220 {
1221     RISCVCPU *cpu = RISCV_CPU(obj);
1222 
1223     if (cpu->env.misa_mxl == MXL_RV32) {
1224         object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
1225                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1226     } else {
1227         object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
1228                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1229         object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
1230                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1231         object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
1232                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1233         object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
1234                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1235     }
1236 }
1237 
1238 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
1239 {
1240     RISCVCPU *cpu = RISCV_CPU(opaque);
1241     CPURISCVState *env = &cpu->env;
1242 
1243     if (irq < IRQ_LOCAL_MAX) {
1244         switch (irq) {
1245         case IRQ_U_SOFT:
1246         case IRQ_S_SOFT:
1247         case IRQ_VS_SOFT:
1248         case IRQ_M_SOFT:
1249         case IRQ_U_TIMER:
1250         case IRQ_S_TIMER:
1251         case IRQ_VS_TIMER:
1252         case IRQ_M_TIMER:
1253         case IRQ_U_EXT:
1254         case IRQ_VS_EXT:
1255         case IRQ_M_EXT:
1256             if (kvm_enabled()) {
1257                 kvm_riscv_set_irq(cpu, irq, level);
1258             } else {
1259                 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level));
1260             }
1261              break;
1262         case IRQ_S_EXT:
1263             if (kvm_enabled()) {
1264                 kvm_riscv_set_irq(cpu, irq, level);
1265             } else {
1266                 env->external_seip = level;
1267                 riscv_cpu_update_mip(env, 1 << irq,
1268                                      BOOL_TO_MASK(level | env->software_seip));
1269             }
1270             break;
1271         default:
1272             g_assert_not_reached();
1273         }
1274     } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
1275         /* Require H-extension for handling guest local interrupts */
1276         if (!riscv_has_ext(env, RVH)) {
1277             g_assert_not_reached();
1278         }
1279 
1280         /* Compute bit position in HGEIP CSR */
1281         irq = irq - IRQ_LOCAL_MAX + 1;
1282         if (env->geilen < irq) {
1283             g_assert_not_reached();
1284         }
1285 
1286         /* Update HGEIP CSR */
1287         env->hgeip &= ~((target_ulong)1 << irq);
1288         if (level) {
1289             env->hgeip |= (target_ulong)1 << irq;
1290         }
1291 
1292         /* Update mip.SGEIP bit */
1293         riscv_cpu_update_mip(env, MIP_SGEIP,
1294                              BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
1295     } else {
1296         g_assert_not_reached();
1297     }
1298 }
1299 #endif /* CONFIG_USER_ONLY */
1300 
1301 static bool riscv_cpu_is_dynamic(Object *cpu_obj)
1302 {
1303     return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
1304 }
1305 
1306 static void riscv_cpu_post_init(Object *obj)
1307 {
1308     accel_cpu_instance_init(CPU(obj));
1309 }
1310 
1311 static void riscv_cpu_init(Object *obj)
1312 {
1313     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
1314     RISCVCPU *cpu = RISCV_CPU(obj);
1315     CPURISCVState *env = &cpu->env;
1316 
1317     env->misa_mxl = mcc->misa_mxl_max;
1318 
1319 #ifndef CONFIG_USER_ONLY
1320     qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
1321                       IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
1322 #endif /* CONFIG_USER_ONLY */
1323 
1324     general_user_opts = g_hash_table_new(g_str_hash, g_str_equal);
1325 
1326     /*
1327      * The timer and performance counters extensions were supported
1328      * in QEMU before they were added as discrete extensions in the
1329      * ISA. To keep compatibility we'll always default them to 'true'
1330      * for all CPUs. Each accelerator will decide what to do when
1331      * users disable them.
1332      */
1333     RISCV_CPU(obj)->cfg.ext_zicntr = true;
1334     RISCV_CPU(obj)->cfg.ext_zihpm = true;
1335 
1336     /* Default values for non-bool cpu properties */
1337     cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16);
1338     cpu->cfg.vlenb = 128 >> 3;
1339     cpu->cfg.elen = 64;
1340     cpu->cfg.cbom_blocksize = 64;
1341     cpu->cfg.cbop_blocksize = 64;
1342     cpu->cfg.cboz_blocksize = 64;
1343     cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1344 }
1345 
1346 static void riscv_bare_cpu_init(Object *obj)
1347 {
1348     RISCVCPU *cpu = RISCV_CPU(obj);
1349 
1350     /*
1351      * Bare CPUs do not inherit the timer and performance
1352      * counters from the parent class (see riscv_cpu_init()
1353      * for info on why the parent enables them).
1354      *
1355      * Users have to explicitly enable these counters for
1356      * bare CPUs.
1357      */
1358     cpu->cfg.ext_zicntr = false;
1359     cpu->cfg.ext_zihpm = false;
1360 
1361     /* Set to QEMU's first supported priv version */
1362     cpu->env.priv_ver = PRIV_VERSION_1_10_0;
1363 
1364     /*
1365      * Support all available satp_mode settings. The default
1366      * value will be set to MBARE if the user doesn't set
1367      * satp_mode manually (see set_satp_mode_default()).
1368      */
1369 #ifndef CONFIG_USER_ONLY
1370     set_satp_mode_max_supported(cpu, VM_1_10_SV64);
1371 #endif
1372 }
1373 
1374 typedef struct misa_ext_info {
1375     const char *name;
1376     const char *description;
1377 } MISAExtInfo;
1378 
1379 #define MISA_INFO_IDX(_bit) \
1380     __builtin_ctz(_bit)
1381 
1382 #define MISA_EXT_INFO(_bit, _propname, _descr) \
1383     [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr}
1384 
1385 static const MISAExtInfo misa_ext_info_arr[] = {
1386     MISA_EXT_INFO(RVA, "a", "Atomic instructions"),
1387     MISA_EXT_INFO(RVC, "c", "Compressed instructions"),
1388     MISA_EXT_INFO(RVD, "d", "Double-precision float point"),
1389     MISA_EXT_INFO(RVF, "f", "Single-precision float point"),
1390     MISA_EXT_INFO(RVI, "i", "Base integer instruction set"),
1391     MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"),
1392     MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"),
1393     MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
1394     MISA_EXT_INFO(RVU, "u", "User-level instructions"),
1395     MISA_EXT_INFO(RVH, "h", "Hypervisor"),
1396     MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
1397     MISA_EXT_INFO(RVV, "v", "Vector operations"),
1398     MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
1399     MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)")
1400 };
1401 
1402 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc)
1403 {
1404     CPUClass *cc = CPU_CLASS(mcc);
1405 
1406     /* Validate that MISA_MXL is set properly. */
1407     switch (mcc->misa_mxl_max) {
1408 #ifdef TARGET_RISCV64
1409     case MXL_RV64:
1410     case MXL_RV128:
1411         cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
1412         break;
1413 #endif
1414     case MXL_RV32:
1415         cc->gdb_core_xml_file = "riscv-32bit-cpu.xml";
1416         break;
1417     default:
1418         g_assert_not_reached();
1419     }
1420 }
1421 
1422 static int riscv_validate_misa_info_idx(uint32_t bit)
1423 {
1424     int idx;
1425 
1426     /*
1427      * Our lowest valid input (RVA) is 1 and
1428      * __builtin_ctz() is UB with zero.
1429      */
1430     g_assert(bit != 0);
1431     idx = MISA_INFO_IDX(bit);
1432 
1433     g_assert(idx < ARRAY_SIZE(misa_ext_info_arr));
1434     return idx;
1435 }
1436 
1437 const char *riscv_get_misa_ext_name(uint32_t bit)
1438 {
1439     int idx = riscv_validate_misa_info_idx(bit);
1440     const char *val = misa_ext_info_arr[idx].name;
1441 
1442     g_assert(val != NULL);
1443     return val;
1444 }
1445 
1446 const char *riscv_get_misa_ext_description(uint32_t bit)
1447 {
1448     int idx = riscv_validate_misa_info_idx(bit);
1449     const char *val = misa_ext_info_arr[idx].description;
1450 
1451     g_assert(val != NULL);
1452     return val;
1453 }
1454 
1455 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \
1456     {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
1457      .enabled = _defval}
1458 
1459 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
1460     /* Defaults for standard extensions */
1461     MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
1462     MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false),
1463     MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
1464     MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
1465     MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
1466     MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
1467     MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false),
1468     MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false),
1469     MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false),
1470     MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false),
1471     MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false),
1472     MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false),
1473     MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false),
1474     MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
1475     MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true),
1476     MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false),
1477     MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false),
1478     MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false),
1479     MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false),
1480     MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false),
1481     MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false),
1482     MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false),
1483     MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false),
1484     MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false),
1485     MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false),
1486     MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false),
1487     MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false),
1488     MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
1489 
1490     MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false),
1491     MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
1492     MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
1493     MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false),
1494     MULTI_EXT_CFG_BOOL("svade", ext_svade, false),
1495     MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
1496     MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
1497     MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false),
1498     MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false),
1499     MULTI_EXT_CFG_BOOL("svvptc", ext_svvptc, true),
1500 
1501     MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true),
1502     MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true),
1503 
1504     MULTI_EXT_CFG_BOOL("zba", ext_zba, true),
1505     MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true),
1506     MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true),
1507     MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false),
1508     MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false),
1509     MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false),
1510     MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true),
1511     MULTI_EXT_CFG_BOOL("zk", ext_zk, false),
1512     MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false),
1513     MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false),
1514     MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false),
1515     MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false),
1516     MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false),
1517     MULTI_EXT_CFG_BOOL("zks", ext_zks, false),
1518     MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false),
1519     MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false),
1520     MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false),
1521     MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false),
1522 
1523     MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false),
1524     MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false),
1525     MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false),
1526     MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false),
1527 
1528     MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true),
1529     MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true),
1530     MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true),
1531 
1532     MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false),
1533 
1534     MULTI_EXT_CFG_BOOL("zca", ext_zca, false),
1535     MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false),
1536     MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false),
1537     MULTI_EXT_CFG_BOOL("zce", ext_zce, false),
1538     MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false),
1539     MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false),
1540     MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false),
1541     MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false),
1542 
1543     /* Vector cryptography extensions */
1544     MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false),
1545     MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false),
1546     MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false),
1547     MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false),
1548     MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false),
1549     MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false),
1550     MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false),
1551     MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false),
1552     MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false),
1553     MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false),
1554     MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false),
1555     MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false),
1556     MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false),
1557     MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false),
1558     MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false),
1559     MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false),
1560 
1561     DEFINE_PROP_END_OF_LIST(),
1562 };
1563 
1564 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = {
1565     MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false),
1566     MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false),
1567     MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false),
1568     MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false),
1569     MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false),
1570     MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false),
1571     MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false),
1572     MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false),
1573     MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false),
1574     MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false),
1575     MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false),
1576     MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false),
1577 
1578     DEFINE_PROP_END_OF_LIST(),
1579 };
1580 
1581 /* These are experimental so mark with 'x-' */
1582 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
1583     DEFINE_PROP_END_OF_LIST(),
1584 };
1585 
1586 /*
1587  * 'Named features' is the name we give to extensions that we
1588  * don't want to expose to users. They are either immutable
1589  * (always enabled/disable) or they'll vary depending on
1590  * the resulting CPU state. They have riscv,isa strings
1591  * and priv_ver like regular extensions.
1592  */
1593 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
1594     MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true),
1595 
1596     DEFINE_PROP_END_OF_LIST(),
1597 };
1598 
1599 /* Deprecated entries marked for future removal */
1600 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = {
1601     MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true),
1602     MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true),
1603     MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true),
1604     MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true),
1605     MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true),
1606     MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true),
1607     MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false),
1608     MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false),
1609     MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false),
1610     MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false),
1611     MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false),
1612 
1613     DEFINE_PROP_END_OF_LIST(),
1614 };
1615 
1616 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname,
1617                              Error **errp)
1618 {
1619     g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1620     error_setg(errp, "CPU '%s' does not allow changing the value of '%s'",
1621                cpuname, propname);
1622 }
1623 
1624 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name,
1625                              void *opaque, Error **errp)
1626 {
1627     RISCVCPU *cpu = RISCV_CPU(obj);
1628     uint8_t pmu_num, curr_pmu_num;
1629     uint32_t pmu_mask;
1630 
1631     visit_type_uint8(v, name, &pmu_num, errp);
1632 
1633     curr_pmu_num = ctpop32(cpu->cfg.pmu_mask);
1634 
1635     if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) {
1636         cpu_set_prop_err(cpu, name, errp);
1637         error_append_hint(errp, "Current '%s' val: %u\n",
1638                           name, curr_pmu_num);
1639         return;
1640     }
1641 
1642     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1643         error_setg(errp, "Number of counters exceeds maximum available");
1644         return;
1645     }
1646 
1647     if (pmu_num == 0) {
1648         pmu_mask = 0;
1649     } else {
1650         pmu_mask = MAKE_64BIT_MASK(3, pmu_num);
1651     }
1652 
1653     warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\"");
1654     cpu->cfg.pmu_mask = pmu_mask;
1655     cpu_option_add_user_setting("pmu-mask", pmu_mask);
1656 }
1657 
1658 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name,
1659                              void *opaque, Error **errp)
1660 {
1661     RISCVCPU *cpu = RISCV_CPU(obj);
1662     uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask);
1663 
1664     visit_type_uint8(v, name, &pmu_num, errp);
1665 }
1666 
1667 static const PropertyInfo prop_pmu_num = {
1668     .name = "pmu-num",
1669     .get = prop_pmu_num_get,
1670     .set = prop_pmu_num_set,
1671 };
1672 
1673 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name,
1674                              void *opaque, Error **errp)
1675 {
1676     RISCVCPU *cpu = RISCV_CPU(obj);
1677     uint32_t value;
1678     uint8_t pmu_num;
1679 
1680     visit_type_uint32(v, name, &value, errp);
1681 
1682     if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) {
1683         cpu_set_prop_err(cpu, name, errp);
1684         error_append_hint(errp, "Current '%s' val: %x\n",
1685                           name, cpu->cfg.pmu_mask);
1686         return;
1687     }
1688 
1689     pmu_num = ctpop32(value);
1690 
1691     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1692         error_setg(errp, "Number of counters exceeds maximum available");
1693         return;
1694     }
1695 
1696     cpu_option_add_user_setting(name, value);
1697     cpu->cfg.pmu_mask = value;
1698 }
1699 
1700 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name,
1701                              void *opaque, Error **errp)
1702 {
1703     uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask;
1704 
1705     visit_type_uint8(v, name, &pmu_mask, errp);
1706 }
1707 
1708 static const PropertyInfo prop_pmu_mask = {
1709     .name = "pmu-mask",
1710     .get = prop_pmu_mask_get,
1711     .set = prop_pmu_mask_set,
1712 };
1713 
1714 static void prop_mmu_set(Object *obj, Visitor *v, const char *name,
1715                          void *opaque, Error **errp)
1716 {
1717     RISCVCPU *cpu = RISCV_CPU(obj);
1718     bool value;
1719 
1720     visit_type_bool(v, name, &value, errp);
1721 
1722     if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) {
1723         cpu_set_prop_err(cpu, "mmu", errp);
1724         return;
1725     }
1726 
1727     cpu_option_add_user_setting(name, value);
1728     cpu->cfg.mmu = value;
1729 }
1730 
1731 static void prop_mmu_get(Object *obj, Visitor *v, const char *name,
1732                          void *opaque, Error **errp)
1733 {
1734     bool value = RISCV_CPU(obj)->cfg.mmu;
1735 
1736     visit_type_bool(v, name, &value, errp);
1737 }
1738 
1739 static const PropertyInfo prop_mmu = {
1740     .name = "mmu",
1741     .get = prop_mmu_get,
1742     .set = prop_mmu_set,
1743 };
1744 
1745 static void prop_pmp_set(Object *obj, Visitor *v, const char *name,
1746                          void *opaque, Error **errp)
1747 {
1748     RISCVCPU *cpu = RISCV_CPU(obj);
1749     bool value;
1750 
1751     visit_type_bool(v, name, &value, errp);
1752 
1753     if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) {
1754         cpu_set_prop_err(cpu, name, errp);
1755         return;
1756     }
1757 
1758     cpu_option_add_user_setting(name, value);
1759     cpu->cfg.pmp = value;
1760 }
1761 
1762 static void prop_pmp_get(Object *obj, Visitor *v, const char *name,
1763                          void *opaque, Error **errp)
1764 {
1765     bool value = RISCV_CPU(obj)->cfg.pmp;
1766 
1767     visit_type_bool(v, name, &value, errp);
1768 }
1769 
1770 static const PropertyInfo prop_pmp = {
1771     .name = "pmp",
1772     .get = prop_pmp_get,
1773     .set = prop_pmp_set,
1774 };
1775 
1776 static int priv_spec_from_str(const char *priv_spec_str)
1777 {
1778     int priv_version = -1;
1779 
1780     if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) {
1781         priv_version = PRIV_VERSION_1_13_0;
1782     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) {
1783         priv_version = PRIV_VERSION_1_12_0;
1784     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) {
1785         priv_version = PRIV_VERSION_1_11_0;
1786     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) {
1787         priv_version = PRIV_VERSION_1_10_0;
1788     }
1789 
1790     return priv_version;
1791 }
1792 
1793 const char *priv_spec_to_str(int priv_version)
1794 {
1795     switch (priv_version) {
1796     case PRIV_VERSION_1_10_0:
1797         return PRIV_VER_1_10_0_STR;
1798     case PRIV_VERSION_1_11_0:
1799         return PRIV_VER_1_11_0_STR;
1800     case PRIV_VERSION_1_12_0:
1801         return PRIV_VER_1_12_0_STR;
1802     case PRIV_VERSION_1_13_0:
1803         return PRIV_VER_1_13_0_STR;
1804     default:
1805         return NULL;
1806     }
1807 }
1808 
1809 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name,
1810                                void *opaque, Error **errp)
1811 {
1812     RISCVCPU *cpu = RISCV_CPU(obj);
1813     g_autofree char *value = NULL;
1814     int priv_version = -1;
1815 
1816     visit_type_str(v, name, &value, errp);
1817 
1818     priv_version = priv_spec_from_str(value);
1819     if (priv_version < 0) {
1820         error_setg(errp, "Unsupported privilege spec version '%s'", value);
1821         return;
1822     }
1823 
1824     if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) {
1825         cpu_set_prop_err(cpu, name, errp);
1826         error_append_hint(errp, "Current '%s' val: %s\n", name,
1827                           object_property_get_str(obj, name, NULL));
1828         return;
1829     }
1830 
1831     cpu_option_add_user_setting(name, priv_version);
1832     cpu->env.priv_ver = priv_version;
1833 }
1834 
1835 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name,
1836                                void *opaque, Error **errp)
1837 {
1838     RISCVCPU *cpu = RISCV_CPU(obj);
1839     const char *value = priv_spec_to_str(cpu->env.priv_ver);
1840 
1841     visit_type_str(v, name, (char **)&value, errp);
1842 }
1843 
1844 static const PropertyInfo prop_priv_spec = {
1845     .name = "priv_spec",
1846     .get = prop_priv_spec_get,
1847     .set = prop_priv_spec_set,
1848 };
1849 
1850 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name,
1851                                void *opaque, Error **errp)
1852 {
1853     RISCVCPU *cpu = RISCV_CPU(obj);
1854     g_autofree char *value = NULL;
1855 
1856     visit_type_str(v, name, &value, errp);
1857 
1858     if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) {
1859         error_setg(errp, "Unsupported vector spec version '%s'", value);
1860         return;
1861     }
1862 
1863     cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0);
1864     cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1865 }
1866 
1867 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name,
1868                                void *opaque, Error **errp)
1869 {
1870     const char *value = VEXT_VER_1_00_0_STR;
1871 
1872     visit_type_str(v, name, (char **)&value, errp);
1873 }
1874 
1875 static const PropertyInfo prop_vext_spec = {
1876     .name = "vext_spec",
1877     .get = prop_vext_spec_get,
1878     .set = prop_vext_spec_set,
1879 };
1880 
1881 static void prop_vlen_set(Object *obj, Visitor *v, const char *name,
1882                          void *opaque, Error **errp)
1883 {
1884     RISCVCPU *cpu = RISCV_CPU(obj);
1885     uint16_t value;
1886 
1887     if (!visit_type_uint16(v, name, &value, errp)) {
1888         return;
1889     }
1890 
1891     if (!is_power_of_2(value)) {
1892         error_setg(errp, "Vector extension VLEN must be power of 2");
1893         return;
1894     }
1895 
1896     if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) {
1897         cpu_set_prop_err(cpu, name, errp);
1898         error_append_hint(errp, "Current '%s' val: %u\n",
1899                           name, cpu->cfg.vlenb << 3);
1900         return;
1901     }
1902 
1903     cpu_option_add_user_setting(name, value);
1904     cpu->cfg.vlenb = value >> 3;
1905 }
1906 
1907 static void prop_vlen_get(Object *obj, Visitor *v, const char *name,
1908                          void *opaque, Error **errp)
1909 {
1910     uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3;
1911 
1912     visit_type_uint16(v, name, &value, errp);
1913 }
1914 
1915 static const PropertyInfo prop_vlen = {
1916     .name = "vlen",
1917     .get = prop_vlen_get,
1918     .set = prop_vlen_set,
1919 };
1920 
1921 static void prop_elen_set(Object *obj, Visitor *v, const char *name,
1922                          void *opaque, Error **errp)
1923 {
1924     RISCVCPU *cpu = RISCV_CPU(obj);
1925     uint16_t value;
1926 
1927     if (!visit_type_uint16(v, name, &value, errp)) {
1928         return;
1929     }
1930 
1931     if (!is_power_of_2(value)) {
1932         error_setg(errp, "Vector extension ELEN must be power of 2");
1933         return;
1934     }
1935 
1936     if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) {
1937         cpu_set_prop_err(cpu, name, errp);
1938         error_append_hint(errp, "Current '%s' val: %u\n",
1939                           name, cpu->cfg.elen);
1940         return;
1941     }
1942 
1943     cpu_option_add_user_setting(name, value);
1944     cpu->cfg.elen = value;
1945 }
1946 
1947 static void prop_elen_get(Object *obj, Visitor *v, const char *name,
1948                          void *opaque, Error **errp)
1949 {
1950     uint16_t value = RISCV_CPU(obj)->cfg.elen;
1951 
1952     visit_type_uint16(v, name, &value, errp);
1953 }
1954 
1955 static const PropertyInfo prop_elen = {
1956     .name = "elen",
1957     .get = prop_elen_get,
1958     .set = prop_elen_set,
1959 };
1960 
1961 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name,
1962                                   void *opaque, Error **errp)
1963 {
1964     RISCVCPU *cpu = RISCV_CPU(obj);
1965     uint16_t value;
1966 
1967     if (!visit_type_uint16(v, name, &value, errp)) {
1968         return;
1969     }
1970 
1971     if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) {
1972         cpu_set_prop_err(cpu, name, errp);
1973         error_append_hint(errp, "Current '%s' val: %u\n",
1974                           name, cpu->cfg.cbom_blocksize);
1975         return;
1976     }
1977 
1978     cpu_option_add_user_setting(name, value);
1979     cpu->cfg.cbom_blocksize = value;
1980 }
1981 
1982 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name,
1983                          void *opaque, Error **errp)
1984 {
1985     uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize;
1986 
1987     visit_type_uint16(v, name, &value, errp);
1988 }
1989 
1990 static const PropertyInfo prop_cbom_blksize = {
1991     .name = "cbom_blocksize",
1992     .get = prop_cbom_blksize_get,
1993     .set = prop_cbom_blksize_set,
1994 };
1995 
1996 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name,
1997                                   void *opaque, Error **errp)
1998 {
1999     RISCVCPU *cpu = RISCV_CPU(obj);
2000     uint16_t value;
2001 
2002     if (!visit_type_uint16(v, name, &value, errp)) {
2003         return;
2004     }
2005 
2006     if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) {
2007         cpu_set_prop_err(cpu, name, errp);
2008         error_append_hint(errp, "Current '%s' val: %u\n",
2009                           name, cpu->cfg.cbop_blocksize);
2010         return;
2011     }
2012 
2013     cpu_option_add_user_setting(name, value);
2014     cpu->cfg.cbop_blocksize = value;
2015 }
2016 
2017 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name,
2018                          void *opaque, Error **errp)
2019 {
2020     uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize;
2021 
2022     visit_type_uint16(v, name, &value, errp);
2023 }
2024 
2025 static const PropertyInfo prop_cbop_blksize = {
2026     .name = "cbop_blocksize",
2027     .get = prop_cbop_blksize_get,
2028     .set = prop_cbop_blksize_set,
2029 };
2030 
2031 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name,
2032                                   void *opaque, Error **errp)
2033 {
2034     RISCVCPU *cpu = RISCV_CPU(obj);
2035     uint16_t value;
2036 
2037     if (!visit_type_uint16(v, name, &value, errp)) {
2038         return;
2039     }
2040 
2041     if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) {
2042         cpu_set_prop_err(cpu, name, errp);
2043         error_append_hint(errp, "Current '%s' val: %u\n",
2044                           name, cpu->cfg.cboz_blocksize);
2045         return;
2046     }
2047 
2048     cpu_option_add_user_setting(name, value);
2049     cpu->cfg.cboz_blocksize = value;
2050 }
2051 
2052 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name,
2053                          void *opaque, Error **errp)
2054 {
2055     uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize;
2056 
2057     visit_type_uint16(v, name, &value, errp);
2058 }
2059 
2060 static const PropertyInfo prop_cboz_blksize = {
2061     .name = "cboz_blocksize",
2062     .get = prop_cboz_blksize_get,
2063     .set = prop_cboz_blksize_set,
2064 };
2065 
2066 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name,
2067                                void *opaque, Error **errp)
2068 {
2069     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2070     RISCVCPU *cpu = RISCV_CPU(obj);
2071     uint32_t prev_val = cpu->cfg.mvendorid;
2072     uint32_t value;
2073 
2074     if (!visit_type_uint32(v, name, &value, errp)) {
2075         return;
2076     }
2077 
2078     if (!dynamic_cpu && prev_val != value) {
2079         error_setg(errp, "Unable to change %s mvendorid (0x%x)",
2080                    object_get_typename(obj), prev_val);
2081         return;
2082     }
2083 
2084     cpu->cfg.mvendorid = value;
2085 }
2086 
2087 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name,
2088                                void *opaque, Error **errp)
2089 {
2090     uint32_t value = RISCV_CPU(obj)->cfg.mvendorid;
2091 
2092     visit_type_uint32(v, name, &value, errp);
2093 }
2094 
2095 static const PropertyInfo prop_mvendorid = {
2096     .name = "mvendorid",
2097     .get = prop_mvendorid_get,
2098     .set = prop_mvendorid_set,
2099 };
2100 
2101 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name,
2102                             void *opaque, Error **errp)
2103 {
2104     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2105     RISCVCPU *cpu = RISCV_CPU(obj);
2106     uint64_t prev_val = cpu->cfg.mimpid;
2107     uint64_t value;
2108 
2109     if (!visit_type_uint64(v, name, &value, errp)) {
2110         return;
2111     }
2112 
2113     if (!dynamic_cpu && prev_val != value) {
2114         error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")",
2115                    object_get_typename(obj), prev_val);
2116         return;
2117     }
2118 
2119     cpu->cfg.mimpid = value;
2120 }
2121 
2122 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name,
2123                             void *opaque, Error **errp)
2124 {
2125     uint64_t value = RISCV_CPU(obj)->cfg.mimpid;
2126 
2127     visit_type_uint64(v, name, &value, errp);
2128 }
2129 
2130 static const PropertyInfo prop_mimpid = {
2131     .name = "mimpid",
2132     .get = prop_mimpid_get,
2133     .set = prop_mimpid_set,
2134 };
2135 
2136 static void prop_marchid_set(Object *obj, Visitor *v, const char *name,
2137                              void *opaque, Error **errp)
2138 {
2139     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2140     RISCVCPU *cpu = RISCV_CPU(obj);
2141     uint64_t prev_val = cpu->cfg.marchid;
2142     uint64_t value, invalid_val;
2143     uint32_t mxlen = 0;
2144 
2145     if (!visit_type_uint64(v, name, &value, errp)) {
2146         return;
2147     }
2148 
2149     if (!dynamic_cpu && prev_val != value) {
2150         error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")",
2151                    object_get_typename(obj), prev_val);
2152         return;
2153     }
2154 
2155     switch (riscv_cpu_mxl(&cpu->env)) {
2156     case MXL_RV32:
2157         mxlen = 32;
2158         break;
2159     case MXL_RV64:
2160     case MXL_RV128:
2161         mxlen = 64;
2162         break;
2163     default:
2164         g_assert_not_reached();
2165     }
2166 
2167     invalid_val = 1LL << (mxlen - 1);
2168 
2169     if (value == invalid_val) {
2170         error_setg(errp, "Unable to set marchid with MSB (%u) bit set "
2171                          "and the remaining bits zero", mxlen);
2172         return;
2173     }
2174 
2175     cpu->cfg.marchid = value;
2176 }
2177 
2178 static void prop_marchid_get(Object *obj, Visitor *v, const char *name,
2179                              void *opaque, Error **errp)
2180 {
2181     uint64_t value = RISCV_CPU(obj)->cfg.marchid;
2182 
2183     visit_type_uint64(v, name, &value, errp);
2184 }
2185 
2186 static const PropertyInfo prop_marchid = {
2187     .name = "marchid",
2188     .get = prop_marchid_get,
2189     .set = prop_marchid_set,
2190 };
2191 
2192 /*
2193  * RVA22U64 defines some 'named features' that are cache
2194  * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa
2195  * and Zicclsm. They are always implemented in TCG and
2196  * doesn't need to be manually enabled by the profile.
2197  */
2198 static RISCVCPUProfile RVA22U64 = {
2199     .parent = NULL,
2200     .name = "rva22u64",
2201     .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU,
2202     .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
2203     .satp_mode = RISCV_PROFILE_ATTR_UNUSED,
2204     .ext_offsets = {
2205         CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause),
2206         CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb),
2207         CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin),
2208         CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr),
2209         CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom),
2210         CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz),
2211 
2212         /* mandatory named features for this profile */
2213         CPU_CFG_OFFSET(ext_zic64b),
2214 
2215         RISCV_PROFILE_EXT_LIST_END
2216     }
2217 };
2218 
2219 /*
2220  * As with RVA22U64, RVA22S64 also defines 'named features'.
2221  *
2222  * Cache related features that we consider enabled since we don't
2223  * implement cache: Ssccptr
2224  *
2225  * Other named features that we already implement: Sstvecd, Sstvala,
2226  * Sscounterenw
2227  *
2228  * The remaining features/extensions comes from RVA22U64.
2229  */
2230 static RISCVCPUProfile RVA22S64 = {
2231     .parent = &RVA22U64,
2232     .name = "rva22s64",
2233     .misa_ext = RVS,
2234     .priv_spec = PRIV_VERSION_1_12_0,
2235     .satp_mode = VM_1_10_SV39,
2236     .ext_offsets = {
2237         /* rva22s64 exts */
2238         CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt),
2239         CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade),
2240 
2241         RISCV_PROFILE_EXT_LIST_END
2242     }
2243 };
2244 
2245 RISCVCPUProfile *riscv_profiles[] = {
2246     &RVA22U64,
2247     &RVA22S64,
2248     NULL,
2249 };
2250 
2251 static RISCVCPUImpliedExtsRule RVA_IMPLIED = {
2252     .is_misa = true,
2253     .ext = RVA,
2254     .implied_multi_exts = {
2255         CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo),
2256 
2257         RISCV_IMPLIED_EXTS_RULE_END
2258     },
2259 };
2260 
2261 static RISCVCPUImpliedExtsRule RVD_IMPLIED = {
2262     .is_misa = true,
2263     .ext = RVD,
2264     .implied_misa_exts = RVF,
2265     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2266 };
2267 
2268 static RISCVCPUImpliedExtsRule RVF_IMPLIED = {
2269     .is_misa = true,
2270     .ext = RVF,
2271     .implied_multi_exts = {
2272         CPU_CFG_OFFSET(ext_zicsr),
2273 
2274         RISCV_IMPLIED_EXTS_RULE_END
2275     },
2276 };
2277 
2278 static RISCVCPUImpliedExtsRule RVM_IMPLIED = {
2279     .is_misa = true,
2280     .ext = RVM,
2281     .implied_multi_exts = {
2282         CPU_CFG_OFFSET(ext_zmmul),
2283 
2284         RISCV_IMPLIED_EXTS_RULE_END
2285     },
2286 };
2287 
2288 static RISCVCPUImpliedExtsRule RVV_IMPLIED = {
2289     .is_misa = true,
2290     .ext = RVV,
2291     .implied_multi_exts = {
2292         CPU_CFG_OFFSET(ext_zve64d),
2293 
2294         RISCV_IMPLIED_EXTS_RULE_END
2295     },
2296 };
2297 
2298 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = {
2299     .ext = CPU_CFG_OFFSET(ext_zcb),
2300     .implied_multi_exts = {
2301         CPU_CFG_OFFSET(ext_zca),
2302 
2303         RISCV_IMPLIED_EXTS_RULE_END
2304     },
2305 };
2306 
2307 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = {
2308     .ext = CPU_CFG_OFFSET(ext_zcd),
2309     .implied_misa_exts = RVD,
2310     .implied_multi_exts = {
2311         CPU_CFG_OFFSET(ext_zca),
2312 
2313         RISCV_IMPLIED_EXTS_RULE_END
2314     },
2315 };
2316 
2317 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = {
2318     .ext = CPU_CFG_OFFSET(ext_zce),
2319     .implied_multi_exts = {
2320         CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp),
2321         CPU_CFG_OFFSET(ext_zcmt),
2322 
2323         RISCV_IMPLIED_EXTS_RULE_END
2324     },
2325 };
2326 
2327 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = {
2328     .ext = CPU_CFG_OFFSET(ext_zcf),
2329     .implied_misa_exts = RVF,
2330     .implied_multi_exts = {
2331         CPU_CFG_OFFSET(ext_zca),
2332 
2333         RISCV_IMPLIED_EXTS_RULE_END
2334     },
2335 };
2336 
2337 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = {
2338     .ext = CPU_CFG_OFFSET(ext_zcmp),
2339     .implied_multi_exts = {
2340         CPU_CFG_OFFSET(ext_zca),
2341 
2342         RISCV_IMPLIED_EXTS_RULE_END
2343     },
2344 };
2345 
2346 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = {
2347     .ext = CPU_CFG_OFFSET(ext_zcmt),
2348     .implied_multi_exts = {
2349         CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr),
2350 
2351         RISCV_IMPLIED_EXTS_RULE_END
2352     },
2353 };
2354 
2355 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = {
2356     .ext = CPU_CFG_OFFSET(ext_zdinx),
2357     .implied_multi_exts = {
2358         CPU_CFG_OFFSET(ext_zfinx),
2359 
2360         RISCV_IMPLIED_EXTS_RULE_END
2361     },
2362 };
2363 
2364 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = {
2365     .ext = CPU_CFG_OFFSET(ext_zfa),
2366     .implied_misa_exts = RVF,
2367     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2368 };
2369 
2370 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = {
2371     .ext = CPU_CFG_OFFSET(ext_zfbfmin),
2372     .implied_misa_exts = RVF,
2373     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2374 };
2375 
2376 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = {
2377     .ext = CPU_CFG_OFFSET(ext_zfh),
2378     .implied_multi_exts = {
2379         CPU_CFG_OFFSET(ext_zfhmin),
2380 
2381         RISCV_IMPLIED_EXTS_RULE_END
2382     },
2383 };
2384 
2385 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = {
2386     .ext = CPU_CFG_OFFSET(ext_zfhmin),
2387     .implied_misa_exts = RVF,
2388     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2389 };
2390 
2391 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = {
2392     .ext = CPU_CFG_OFFSET(ext_zfinx),
2393     .implied_multi_exts = {
2394         CPU_CFG_OFFSET(ext_zicsr),
2395 
2396         RISCV_IMPLIED_EXTS_RULE_END
2397     },
2398 };
2399 
2400 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = {
2401     .ext = CPU_CFG_OFFSET(ext_zhinx),
2402     .implied_multi_exts = {
2403         CPU_CFG_OFFSET(ext_zhinxmin),
2404 
2405         RISCV_IMPLIED_EXTS_RULE_END
2406     },
2407 };
2408 
2409 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = {
2410     .ext = CPU_CFG_OFFSET(ext_zhinxmin),
2411     .implied_multi_exts = {
2412         CPU_CFG_OFFSET(ext_zfinx),
2413 
2414         RISCV_IMPLIED_EXTS_RULE_END
2415     },
2416 };
2417 
2418 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = {
2419     .ext = CPU_CFG_OFFSET(ext_zicntr),
2420     .implied_multi_exts = {
2421         CPU_CFG_OFFSET(ext_zicsr),
2422 
2423         RISCV_IMPLIED_EXTS_RULE_END
2424     },
2425 };
2426 
2427 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = {
2428     .ext = CPU_CFG_OFFSET(ext_zihpm),
2429     .implied_multi_exts = {
2430         CPU_CFG_OFFSET(ext_zicsr),
2431 
2432         RISCV_IMPLIED_EXTS_RULE_END
2433     },
2434 };
2435 
2436 static RISCVCPUImpliedExtsRule ZK_IMPLIED = {
2437     .ext = CPU_CFG_OFFSET(ext_zk),
2438     .implied_multi_exts = {
2439         CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr),
2440         CPU_CFG_OFFSET(ext_zkt),
2441 
2442         RISCV_IMPLIED_EXTS_RULE_END
2443     },
2444 };
2445 
2446 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = {
2447     .ext = CPU_CFG_OFFSET(ext_zkn),
2448     .implied_multi_exts = {
2449         CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
2450         CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne),
2451         CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh),
2452 
2453         RISCV_IMPLIED_EXTS_RULE_END
2454     },
2455 };
2456 
2457 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = {
2458     .ext = CPU_CFG_OFFSET(ext_zks),
2459     .implied_multi_exts = {
2460         CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
2461         CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed),
2462         CPU_CFG_OFFSET(ext_zksh),
2463 
2464         RISCV_IMPLIED_EXTS_RULE_END
2465     },
2466 };
2467 
2468 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = {
2469     .ext = CPU_CFG_OFFSET(ext_zvbb),
2470     .implied_multi_exts = {
2471         CPU_CFG_OFFSET(ext_zvkb),
2472 
2473         RISCV_IMPLIED_EXTS_RULE_END
2474     },
2475 };
2476 
2477 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = {
2478     .ext = CPU_CFG_OFFSET(ext_zve32f),
2479     .implied_misa_exts = RVF,
2480     .implied_multi_exts = {
2481         CPU_CFG_OFFSET(ext_zve32x),
2482 
2483         RISCV_IMPLIED_EXTS_RULE_END
2484     },
2485 };
2486 
2487 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = {
2488     .ext = CPU_CFG_OFFSET(ext_zve32x),
2489     .implied_multi_exts = {
2490         CPU_CFG_OFFSET(ext_zicsr),
2491 
2492         RISCV_IMPLIED_EXTS_RULE_END
2493     },
2494 };
2495 
2496 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = {
2497     .ext = CPU_CFG_OFFSET(ext_zve64d),
2498     .implied_misa_exts = RVD,
2499     .implied_multi_exts = {
2500         CPU_CFG_OFFSET(ext_zve64f),
2501 
2502         RISCV_IMPLIED_EXTS_RULE_END
2503     },
2504 };
2505 
2506 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = {
2507     .ext = CPU_CFG_OFFSET(ext_zve64f),
2508     .implied_misa_exts = RVF,
2509     .implied_multi_exts = {
2510         CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x),
2511 
2512         RISCV_IMPLIED_EXTS_RULE_END
2513     },
2514 };
2515 
2516 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = {
2517     .ext = CPU_CFG_OFFSET(ext_zve64x),
2518     .implied_multi_exts = {
2519         CPU_CFG_OFFSET(ext_zve32x),
2520 
2521         RISCV_IMPLIED_EXTS_RULE_END
2522     },
2523 };
2524 
2525 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = {
2526     .ext = CPU_CFG_OFFSET(ext_zvfbfmin),
2527     .implied_multi_exts = {
2528         CPU_CFG_OFFSET(ext_zve32f),
2529 
2530         RISCV_IMPLIED_EXTS_RULE_END
2531     },
2532 };
2533 
2534 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = {
2535     .ext = CPU_CFG_OFFSET(ext_zvfbfwma),
2536     .implied_multi_exts = {
2537         CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin),
2538 
2539         RISCV_IMPLIED_EXTS_RULE_END
2540     },
2541 };
2542 
2543 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = {
2544     .ext = CPU_CFG_OFFSET(ext_zvfh),
2545     .implied_multi_exts = {
2546         CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin),
2547 
2548         RISCV_IMPLIED_EXTS_RULE_END
2549     },
2550 };
2551 
2552 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = {
2553     .ext = CPU_CFG_OFFSET(ext_zvfhmin),
2554     .implied_multi_exts = {
2555         CPU_CFG_OFFSET(ext_zve32f),
2556 
2557         RISCV_IMPLIED_EXTS_RULE_END
2558     },
2559 };
2560 
2561 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = {
2562     .ext = CPU_CFG_OFFSET(ext_zvkn),
2563     .implied_multi_exts = {
2564         CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb),
2565         CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
2566 
2567         RISCV_IMPLIED_EXTS_RULE_END
2568     },
2569 };
2570 
2571 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = {
2572     .ext = CPU_CFG_OFFSET(ext_zvknc),
2573     .implied_multi_exts = {
2574         CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc),
2575 
2576         RISCV_IMPLIED_EXTS_RULE_END
2577     },
2578 };
2579 
2580 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = {
2581     .ext = CPU_CFG_OFFSET(ext_zvkng),
2582     .implied_multi_exts = {
2583         CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg),
2584 
2585         RISCV_IMPLIED_EXTS_RULE_END
2586     },
2587 };
2588 
2589 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = {
2590     .ext = CPU_CFG_OFFSET(ext_zvknhb),
2591     .implied_multi_exts = {
2592         CPU_CFG_OFFSET(ext_zve64x),
2593 
2594         RISCV_IMPLIED_EXTS_RULE_END
2595     },
2596 };
2597 
2598 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = {
2599     .ext = CPU_CFG_OFFSET(ext_zvks),
2600     .implied_multi_exts = {
2601         CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh),
2602         CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
2603 
2604         RISCV_IMPLIED_EXTS_RULE_END
2605     },
2606 };
2607 
2608 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = {
2609     .ext = CPU_CFG_OFFSET(ext_zvksc),
2610     .implied_multi_exts = {
2611         CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc),
2612 
2613         RISCV_IMPLIED_EXTS_RULE_END
2614     },
2615 };
2616 
2617 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = {
2618     .ext = CPU_CFG_OFFSET(ext_zvksg),
2619     .implied_multi_exts = {
2620         CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg),
2621 
2622         RISCV_IMPLIED_EXTS_RULE_END
2623     },
2624 };
2625 
2626 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = {
2627     &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED,
2628     &RVM_IMPLIED, &RVV_IMPLIED, NULL
2629 };
2630 
2631 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = {
2632     &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED,
2633     &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED,
2634     &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED,
2635     &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED,
2636     &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED,
2637     &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED,
2638     &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED,
2639     &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED,
2640     &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED,
2641     &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED,
2642     &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED,
2643     &ZVKS_IMPLIED,  &ZVKSC_IMPLIED, &ZVKSG_IMPLIED,
2644     NULL
2645 };
2646 
2647 static Property riscv_cpu_properties[] = {
2648     DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
2649 
2650     {.name = "pmu-mask", .info = &prop_pmu_mask},
2651     {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */
2652 
2653     {.name = "mmu", .info = &prop_mmu},
2654     {.name = "pmp", .info = &prop_pmp},
2655 
2656     {.name = "priv_spec", .info = &prop_priv_spec},
2657     {.name = "vext_spec", .info = &prop_vext_spec},
2658 
2659     {.name = "vlen", .info = &prop_vlen},
2660     {.name = "elen", .info = &prop_elen},
2661 
2662     {.name = "cbom_blocksize", .info = &prop_cbom_blksize},
2663     {.name = "cbop_blocksize", .info = &prop_cbop_blksize},
2664     {.name = "cboz_blocksize", .info = &prop_cboz_blksize},
2665 
2666      {.name = "mvendorid", .info = &prop_mvendorid},
2667      {.name = "mimpid", .info = &prop_mimpid},
2668      {.name = "marchid", .info = &prop_marchid},
2669 
2670 #ifndef CONFIG_USER_ONLY
2671     DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
2672 #endif
2673 
2674     DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
2675 
2676     DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
2677     DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
2678     DEFINE_PROP_BOOL("rvv_vl_half_avl", RISCVCPU, cfg.rvv_vl_half_avl, false),
2679 
2680     /*
2681      * write_misa() is marked as experimental for now so mark
2682      * it with -x and default to 'false'.
2683      */
2684     DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
2685     DEFINE_PROP_END_OF_LIST(),
2686 };
2687 
2688 #if defined(TARGET_RISCV64)
2689 static void rva22u64_profile_cpu_init(Object *obj)
2690 {
2691     rv64i_bare_cpu_init(obj);
2692 
2693     RVA22U64.enabled = true;
2694 }
2695 
2696 static void rva22s64_profile_cpu_init(Object *obj)
2697 {
2698     rv64i_bare_cpu_init(obj);
2699 
2700     RVA22S64.enabled = true;
2701 }
2702 #endif
2703 
2704 static const gchar *riscv_gdb_arch_name(CPUState *cs)
2705 {
2706     RISCVCPU *cpu = RISCV_CPU(cs);
2707     CPURISCVState *env = &cpu->env;
2708 
2709     switch (riscv_cpu_mxl(env)) {
2710     case MXL_RV32:
2711         return "riscv:rv32";
2712     case MXL_RV64:
2713     case MXL_RV128:
2714         return "riscv:rv64";
2715     default:
2716         g_assert_not_reached();
2717     }
2718 }
2719 
2720 #ifndef CONFIG_USER_ONLY
2721 static int64_t riscv_get_arch_id(CPUState *cs)
2722 {
2723     RISCVCPU *cpu = RISCV_CPU(cs);
2724 
2725     return cpu->env.mhartid;
2726 }
2727 
2728 #include "hw/core/sysemu-cpu-ops.h"
2729 
2730 static const struct SysemuCPUOps riscv_sysemu_ops = {
2731     .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
2732     .write_elf64_note = riscv_cpu_write_elf64_note,
2733     .write_elf32_note = riscv_cpu_write_elf32_note,
2734     .legacy_vmsd = &vmstate_riscv_cpu,
2735 };
2736 #endif
2737 
2738 static void riscv_cpu_common_class_init(ObjectClass *c, void *data)
2739 {
2740     RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2741     CPUClass *cc = CPU_CLASS(c);
2742     DeviceClass *dc = DEVICE_CLASS(c);
2743     ResettableClass *rc = RESETTABLE_CLASS(c);
2744 
2745     device_class_set_parent_realize(dc, riscv_cpu_realize,
2746                                     &mcc->parent_realize);
2747 
2748     resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL,
2749                                        &mcc->parent_phases);
2750 
2751     cc->class_by_name = riscv_cpu_class_by_name;
2752     cc->has_work = riscv_cpu_has_work;
2753     cc->mmu_index = riscv_cpu_mmu_index;
2754     cc->dump_state = riscv_cpu_dump_state;
2755     cc->set_pc = riscv_cpu_set_pc;
2756     cc->get_pc = riscv_cpu_get_pc;
2757     cc->gdb_read_register = riscv_cpu_gdb_read_register;
2758     cc->gdb_write_register = riscv_cpu_gdb_write_register;
2759     cc->gdb_stop_before_watchpoint = true;
2760     cc->disas_set_info = riscv_cpu_disas_set_info;
2761 #ifndef CONFIG_USER_ONLY
2762     cc->sysemu_ops = &riscv_sysemu_ops;
2763     cc->get_arch_id = riscv_get_arch_id;
2764 #endif
2765     cc->gdb_arch_name = riscv_gdb_arch_name;
2766 
2767     device_class_set_props(dc, riscv_cpu_properties);
2768 }
2769 
2770 static void riscv_cpu_class_init(ObjectClass *c, void *data)
2771 {
2772     RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2773 
2774     mcc->misa_mxl_max = (uint32_t)(uintptr_t)data;
2775     riscv_cpu_validate_misa_mxl(mcc);
2776 }
2777 
2778 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
2779                                  int max_str_len)
2780 {
2781     const RISCVIsaExtData *edata;
2782     char *old = *isa_str;
2783     char *new = *isa_str;
2784 
2785     for (edata = isa_edata_arr; edata && edata->name; edata++) {
2786         if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2787             new = g_strconcat(old, "_", edata->name, NULL);
2788             g_free(old);
2789             old = new;
2790         }
2791     }
2792 
2793     *isa_str = new;
2794 }
2795 
2796 char *riscv_isa_string(RISCVCPU *cpu)
2797 {
2798     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
2799     int i;
2800     const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
2801     char *isa_str = g_new(char, maxlen);
2802     int xlen = riscv_cpu_max_xlen(mcc);
2803     char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen);
2804 
2805     for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2806         if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2807             *p++ = qemu_tolower(riscv_single_letter_exts[i]);
2808         }
2809     }
2810     *p = '\0';
2811     if (!cpu->cfg.short_isa_string) {
2812         riscv_isa_string_ext(cpu, &isa_str, maxlen);
2813     }
2814     return isa_str;
2815 }
2816 
2817 #ifndef CONFIG_USER_ONLY
2818 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count)
2819 {
2820     int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr);
2821     char **extensions = g_new(char *, maxlen);
2822 
2823     for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2824         if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2825             extensions[*count] = g_new(char, 2);
2826             snprintf(extensions[*count], 2, "%c",
2827                      qemu_tolower(riscv_single_letter_exts[i]));
2828             (*count)++;
2829         }
2830     }
2831 
2832     for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) {
2833         if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2834             extensions[*count] = g_strdup(edata->name);
2835             (*count)++;
2836         }
2837     }
2838 
2839     return extensions;
2840 }
2841 
2842 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename)
2843 {
2844     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
2845     const size_t maxlen = sizeof("rv128i");
2846     g_autofree char *isa_base = g_new(char, maxlen);
2847     g_autofree char *riscv_isa;
2848     char **isa_extensions;
2849     int count = 0;
2850     int xlen = riscv_cpu_max_xlen(mcc);
2851 
2852     riscv_isa = riscv_isa_string(cpu);
2853     qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa);
2854 
2855     snprintf(isa_base, maxlen, "rv%di", xlen);
2856     qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base);
2857 
2858     isa_extensions = riscv_isa_extensions_list(cpu, &count);
2859     qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions",
2860                                   isa_extensions, count);
2861 
2862     for (int i = 0; i < count; i++) {
2863         g_free(isa_extensions[i]);
2864     }
2865 
2866     g_free(isa_extensions);
2867 }
2868 #endif
2869 
2870 #define DEFINE_CPU(type_name, misa_mxl_max, initfn)         \
2871     {                                                       \
2872         .name = (type_name),                                \
2873         .parent = TYPE_RISCV_CPU,                           \
2874         .instance_init = (initfn),                          \
2875         .class_init = riscv_cpu_class_init,                 \
2876         .class_data = (void *)(misa_mxl_max)                \
2877     }
2878 
2879 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \
2880     {                                                       \
2881         .name = (type_name),                                \
2882         .parent = TYPE_RISCV_DYNAMIC_CPU,                   \
2883         .instance_init = (initfn),                          \
2884         .class_init = riscv_cpu_class_init,                 \
2885         .class_data = (void *)(misa_mxl_max)                \
2886     }
2887 
2888 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn)  \
2889     {                                                       \
2890         .name = (type_name),                                \
2891         .parent = TYPE_RISCV_VENDOR_CPU,                    \
2892         .instance_init = (initfn),                          \
2893         .class_init = riscv_cpu_class_init,                 \
2894         .class_data = (void *)(misa_mxl_max)                \
2895     }
2896 
2897 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn)    \
2898     {                                                       \
2899         .name = (type_name),                                \
2900         .parent = TYPE_RISCV_BARE_CPU,                      \
2901         .instance_init = (initfn),                          \
2902         .class_init = riscv_cpu_class_init,                 \
2903         .class_data = (void *)(misa_mxl_max)                \
2904     }
2905 
2906 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \
2907     {                                                       \
2908         .name = (type_name),                                \
2909         .parent = TYPE_RISCV_BARE_CPU,                      \
2910         .instance_init = (initfn),                          \
2911         .class_init = riscv_cpu_class_init,                 \
2912         .class_data = (void *)(misa_mxl_max)                \
2913     }
2914 
2915 static const TypeInfo riscv_cpu_type_infos[] = {
2916     {
2917         .name = TYPE_RISCV_CPU,
2918         .parent = TYPE_CPU,
2919         .instance_size = sizeof(RISCVCPU),
2920         .instance_align = __alignof(RISCVCPU),
2921         .instance_init = riscv_cpu_init,
2922         .instance_post_init = riscv_cpu_post_init,
2923         .abstract = true,
2924         .class_size = sizeof(RISCVCPUClass),
2925         .class_init = riscv_cpu_common_class_init,
2926     },
2927     {
2928         .name = TYPE_RISCV_DYNAMIC_CPU,
2929         .parent = TYPE_RISCV_CPU,
2930         .abstract = true,
2931     },
2932     {
2933         .name = TYPE_RISCV_VENDOR_CPU,
2934         .parent = TYPE_RISCV_CPU,
2935         .abstract = true,
2936     },
2937     {
2938         .name = TYPE_RISCV_BARE_CPU,
2939         .parent = TYPE_RISCV_CPU,
2940         .instance_init = riscv_bare_cpu_init,
2941         .abstract = true,
2942     },
2943 #if defined(TARGET_RISCV32)
2944     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX,       MXL_RV32,  riscv_max_cpu_init),
2945 #elif defined(TARGET_RISCV64)
2946     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX,       MXL_RV64,  riscv_max_cpu_init),
2947 #endif
2948 
2949 #if defined(TARGET_RISCV32) || \
2950     (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY))
2951     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32,    MXL_RV32,  rv32_base_cpu_init),
2952     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX,       MXL_RV32,  rv32_ibex_cpu_init),
2953     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32,  rv32_sifive_e_cpu_init),
2954     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32,  rv32_imafcu_nommu_cpu_init),
2955     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32,  rv32_sifive_u_cpu_init),
2956     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I,        MXL_RV32,  rv32i_bare_cpu_init),
2957     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E,        MXL_RV32,  rv32e_bare_cpu_init),
2958 #endif
2959 
2960 #if (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY))
2961     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX32,     MXL_RV32,  riscv_max_cpu_init),
2962 #endif
2963 
2964 #if defined(TARGET_RISCV64)
2965     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64,    MXL_RV64,  rv64_base_cpu_init),
2966     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64,  rv64_sifive_e_cpu_init),
2967     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64,  rv64_sifive_u_cpu_init),
2968     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C,   MXL_RV64,  rv64_sifive_u_cpu_init),
2969     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64,  rv64_thead_c906_cpu_init),
2970     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1,  MXL_RV64,  rv64_veyron_v1_cpu_init),
2971 #ifdef CONFIG_TCG
2972     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128,   MXL_RV128, rv128_base_cpu_init),
2973 #endif /* CONFIG_TCG */
2974     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I,        MXL_RV64,  rv64i_bare_cpu_init),
2975     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E,        MXL_RV64,  rv64e_bare_cpu_init),
2976     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64,  MXL_RV64,  rva22u64_profile_cpu_init),
2977     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64,  MXL_RV64,  rva22s64_profile_cpu_init),
2978 #endif /* TARGET_RISCV64 */
2979 };
2980 
2981 DEFINE_TYPES(riscv_cpu_type_infos)
2982