xref: /openbmc/qemu/target/riscv/cpu.c (revision 77dd098a)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "cpu_vendorid.h"
26 #include "internals.h"
27 #include "exec/exec-all.h"
28 #include "qapi/error.h"
29 #include "qapi/visitor.h"
30 #include "qemu/error-report.h"
31 #include "hw/qdev-properties.h"
32 #include "hw/core/qdev-prop-internal.h"
33 #include "migration/vmstate.h"
34 #include "fpu/softfloat-helpers.h"
35 #include "sysemu/device_tree.h"
36 #include "sysemu/kvm.h"
37 #include "sysemu/tcg.h"
38 #include "kvm/kvm_riscv.h"
39 #include "tcg/tcg-cpu.h"
40 #include "tcg/tcg.h"
41 
42 /* RISC-V CPU definitions */
43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH";
44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
45                               RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0};
46 
47 /*
48  * From vector_helper.c
49  * Note that vector data is stored in host-endian 64-bit chunks,
50  * so addressing bytes needs a host-endian fixup.
51  */
52 #if HOST_BIG_ENDIAN
53 #define BYTE(x)   ((x) ^ 7)
54 #else
55 #define BYTE(x)   (x)
56 #endif
57 
58 bool riscv_cpu_is_32bit(RISCVCPU *cpu)
59 {
60     return riscv_cpu_mxl(&cpu->env) == MXL_RV32;
61 }
62 
63 /* Hash that stores general user set numeric options */
64 static GHashTable *general_user_opts;
65 
66 static void cpu_option_add_user_setting(const char *optname, uint32_t value)
67 {
68     g_hash_table_insert(general_user_opts, (gpointer)optname,
69                         GUINT_TO_POINTER(value));
70 }
71 
72 bool riscv_cpu_option_set(const char *optname)
73 {
74     return g_hash_table_contains(general_user_opts, optname);
75 }
76 
77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
78     {#_name, _min_ver, CPU_CFG_OFFSET(_prop)}
79 
80 /*
81  * Here are the ordering rules of extension naming defined by RISC-V
82  * specification :
83  * 1. All extensions should be separated from other multi-letter extensions
84  *    by an underscore.
85  * 2. The first letter following the 'Z' conventionally indicates the most
86  *    closely related alphabetical extension category, IMAFDQLCBKJTPVH.
87  *    If multiple 'Z' extensions are named, they should be ordered first
88  *    by category, then alphabetically within a category.
89  * 3. Standard supervisor-level extensions (starts with 'S') should be
90  *    listed after standard unprivileged extensions.  If multiple
91  *    supervisor-level extensions are listed, they should be ordered
92  *    alphabetically.
93  * 4. Non-standard extensions (starts with 'X') must be listed after all
94  *    standard extensions. They must be separated from other multi-letter
95  *    extensions by an underscore.
96  *
97  * Single letter extensions are checked in riscv_cpu_validate_misa_priv()
98  * instead.
99  */
100 const RISCVIsaExtData isa_edata_arr[] = {
101     ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b),
102     ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom),
103     ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop),
104     ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz),
105     ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11),
106     ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11),
107     ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11),
108     ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11),
109     ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
110     ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr),
111     ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr),
112     ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei),
113     ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl),
114     ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
115     ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm),
116     ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop),
117     ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
118     ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_12),
119     ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo),
120     ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha),
121     ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
122     ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b),
123     ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc),
124     ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
125     ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
126     ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
127     ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh),
128     ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin),
129     ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx),
130     ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx),
131     ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca),
132     ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb),
133     ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf),
134     ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd),
135     ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce),
136     ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop),
137     ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp),
138     ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt),
139     ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba),
140     ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb),
141     ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc),
142     ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb),
143     ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc),
144     ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx),
145     ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs),
146     ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk),
147     ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn),
148     ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd),
149     ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne),
150     ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh),
151     ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr),
152     ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks),
153     ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
154     ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
155     ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
156     ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso),
157     ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb),
158     ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
159     ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
160     ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x),
161     ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
162     ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
163     ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x),
164     ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin),
165     ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
166     ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
167     ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
168     ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb),
169     ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg),
170     ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn),
171     ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc),
172     ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
173     ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng),
174     ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
175     ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
176     ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks),
177     ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc),
178     ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed),
179     ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg),
180     ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
181     ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt),
182     ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
183     ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
184     ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
185     ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf),
186     ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
187     ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
188     ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
189     ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11),
190     ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
191     ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
192     ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
193     ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
194     ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
195     ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade),
196     ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
197     ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
198     ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
199     ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt),
200     ISA_EXT_DATA_ENTRY(svvptc, PRIV_VERSION_1_13_0, ext_svvptc),
201     ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba),
202     ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb),
203     ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs),
204     ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo),
205     ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov),
206     ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx),
207     ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv),
208     ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac),
209     ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx),
210     ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair),
211     ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync),
212     ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
213 
214     DEFINE_PROP_END_OF_LIST(),
215 };
216 
217 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset)
218 {
219     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
220 
221     return *ext_enabled;
222 }
223 
224 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en)
225 {
226     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
227 
228     *ext_enabled = en;
229 }
230 
231 bool riscv_cpu_is_vendor(Object *cpu_obj)
232 {
233     return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL;
234 }
235 
236 const char * const riscv_int_regnames[] = {
237     "x0/zero", "x1/ra",  "x2/sp",  "x3/gp",  "x4/tp",  "x5/t0",   "x6/t1",
238     "x7/t2",   "x8/s0",  "x9/s1",  "x10/a0", "x11/a1", "x12/a2",  "x13/a3",
239     "x14/a4",  "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3",  "x20/s4",
240     "x21/s5",  "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
241     "x28/t3",  "x29/t4", "x30/t5", "x31/t6"
242 };
243 
244 const char * const riscv_int_regnamesh[] = {
245     "x0h/zeroh", "x1h/rah",  "x2h/sph",   "x3h/gph",   "x4h/tph",  "x5h/t0h",
246     "x6h/t1h",   "x7h/t2h",  "x8h/s0h",   "x9h/s1h",   "x10h/a0h", "x11h/a1h",
247     "x12h/a2h",  "x13h/a3h", "x14h/a4h",  "x15h/a5h",  "x16h/a6h", "x17h/a7h",
248     "x18h/s2h",  "x19h/s3h", "x20h/s4h",  "x21h/s5h",  "x22h/s6h", "x23h/s7h",
249     "x24h/s8h",  "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
250     "x30h/t5h",  "x31h/t6h"
251 };
252 
253 const char * const riscv_fpr_regnames[] = {
254     "f0/ft0",   "f1/ft1",  "f2/ft2",   "f3/ft3",   "f4/ft4",  "f5/ft5",
255     "f6/ft6",   "f7/ft7",  "f8/fs0",   "f9/fs1",   "f10/fa0", "f11/fa1",
256     "f12/fa2",  "f13/fa3", "f14/fa4",  "f15/fa5",  "f16/fa6", "f17/fa7",
257     "f18/fs2",  "f19/fs3", "f20/fs4",  "f21/fs5",  "f22/fs6", "f23/fs7",
258     "f24/fs8",  "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
259     "f30/ft10", "f31/ft11"
260 };
261 
262 const char * const riscv_rvv_regnames[] = {
263   "v0",  "v1",  "v2",  "v3",  "v4",  "v5",  "v6",
264   "v7",  "v8",  "v9",  "v10", "v11", "v12", "v13",
265   "v14", "v15", "v16", "v17", "v18", "v19", "v20",
266   "v21", "v22", "v23", "v24", "v25", "v26", "v27",
267   "v28", "v29", "v30", "v31"
268 };
269 
270 static const char * const riscv_excp_names[] = {
271     "misaligned_fetch",
272     "fault_fetch",
273     "illegal_instruction",
274     "breakpoint",
275     "misaligned_load",
276     "fault_load",
277     "misaligned_store",
278     "fault_store",
279     "user_ecall",
280     "supervisor_ecall",
281     "hypervisor_ecall",
282     "machine_ecall",
283     "exec_page_fault",
284     "load_page_fault",
285     "reserved",
286     "store_page_fault",
287     "reserved",
288     "reserved",
289     "reserved",
290     "reserved",
291     "guest_exec_page_fault",
292     "guest_load_page_fault",
293     "reserved",
294     "guest_store_page_fault",
295 };
296 
297 static const char * const riscv_intr_names[] = {
298     "u_software",
299     "s_software",
300     "vs_software",
301     "m_software",
302     "u_timer",
303     "s_timer",
304     "vs_timer",
305     "m_timer",
306     "u_external",
307     "s_external",
308     "vs_external",
309     "m_external",
310     "reserved",
311     "reserved",
312     "reserved",
313     "reserved"
314 };
315 
316 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
317 {
318     if (async) {
319         return (cause < ARRAY_SIZE(riscv_intr_names)) ?
320                riscv_intr_names[cause] : "(unknown)";
321     } else {
322         return (cause < ARRAY_SIZE(riscv_excp_names)) ?
323                riscv_excp_names[cause] : "(unknown)";
324     }
325 }
326 
327 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext)
328 {
329     env->misa_ext_mask = env->misa_ext = ext;
330 }
331 
332 int riscv_cpu_max_xlen(RISCVCPUClass *mcc)
333 {
334     return 16 << mcc->misa_mxl_max;
335 }
336 
337 #ifndef CONFIG_USER_ONLY
338 static uint8_t satp_mode_from_str(const char *satp_mode_str)
339 {
340     if (!strncmp(satp_mode_str, "mbare", 5)) {
341         return VM_1_10_MBARE;
342     }
343 
344     if (!strncmp(satp_mode_str, "sv32", 4)) {
345         return VM_1_10_SV32;
346     }
347 
348     if (!strncmp(satp_mode_str, "sv39", 4)) {
349         return VM_1_10_SV39;
350     }
351 
352     if (!strncmp(satp_mode_str, "sv48", 4)) {
353         return VM_1_10_SV48;
354     }
355 
356     if (!strncmp(satp_mode_str, "sv57", 4)) {
357         return VM_1_10_SV57;
358     }
359 
360     if (!strncmp(satp_mode_str, "sv64", 4)) {
361         return VM_1_10_SV64;
362     }
363 
364     g_assert_not_reached();
365 }
366 
367 uint8_t satp_mode_max_from_map(uint32_t map)
368 {
369     /*
370      * 'map = 0' will make us return (31 - 32), which C will
371      * happily overflow to UINT_MAX. There's no good result to
372      * return if 'map = 0' (e.g. returning 0 will be ambiguous
373      * with the result for 'map = 1').
374      *
375      * Assert out if map = 0. Callers will have to deal with
376      * it outside of this function.
377      */
378     g_assert(map > 0);
379 
380     /* map here has at least one bit set, so no problem with clz */
381     return 31 - __builtin_clz(map);
382 }
383 
384 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
385 {
386     if (is_32_bit) {
387         switch (satp_mode) {
388         case VM_1_10_SV32:
389             return "sv32";
390         case VM_1_10_MBARE:
391             return "none";
392         }
393     } else {
394         switch (satp_mode) {
395         case VM_1_10_SV64:
396             return "sv64";
397         case VM_1_10_SV57:
398             return "sv57";
399         case VM_1_10_SV48:
400             return "sv48";
401         case VM_1_10_SV39:
402             return "sv39";
403         case VM_1_10_MBARE:
404             return "none";
405         }
406     }
407 
408     g_assert_not_reached();
409 }
410 
411 static void set_satp_mode_max_supported(RISCVCPU *cpu,
412                                         uint8_t satp_mode)
413 {
414     bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
415     const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
416 
417     for (int i = 0; i <= satp_mode; ++i) {
418         if (valid_vm[i]) {
419             cpu->cfg.satp_mode.supported |= (1 << i);
420         }
421     }
422 }
423 
424 /* Set the satp mode to the max supported */
425 static void set_satp_mode_default_map(RISCVCPU *cpu)
426 {
427     /*
428      * Bare CPUs do not default to the max available.
429      * Users must set a valid satp_mode in the command
430      * line.
431      */
432     if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) {
433         warn_report("No satp mode set. Defaulting to 'bare'");
434         cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE);
435         return;
436     }
437 
438     cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
439 }
440 #endif
441 
442 static void riscv_max_cpu_init(Object *obj)
443 {
444     RISCVCPU *cpu = RISCV_CPU(obj);
445     CPURISCVState *env = &cpu->env;
446 
447     cpu->cfg.mmu = true;
448     cpu->cfg.pmp = true;
449 
450     env->priv_ver = PRIV_VERSION_LATEST;
451 #ifndef CONFIG_USER_ONLY
452 #ifdef TARGET_RISCV32
453     set_satp_mode_max_supported(cpu, VM_1_10_SV32);
454 #else
455     set_satp_mode_max_supported(cpu, VM_1_10_SV57);
456 #endif
457 #endif
458 }
459 
460 #if defined(TARGET_RISCV64)
461 static void rv64_base_cpu_init(Object *obj)
462 {
463     RISCVCPU *cpu = RISCV_CPU(obj);
464     CPURISCVState *env = &cpu->env;
465 
466     cpu->cfg.mmu = true;
467     cpu->cfg.pmp = true;
468 
469     /* Set latest version of privileged specification */
470     env->priv_ver = PRIV_VERSION_LATEST;
471 #ifndef CONFIG_USER_ONLY
472     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
473 #endif
474 }
475 
476 static void rv64_sifive_u_cpu_init(Object *obj)
477 {
478     RISCVCPU *cpu = RISCV_CPU(obj);
479     CPURISCVState *env = &cpu->env;
480     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
481     env->priv_ver = PRIV_VERSION_1_10_0;
482 #ifndef CONFIG_USER_ONLY
483     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39);
484 #endif
485 
486     /* inherited from parent obj via riscv_cpu_init() */
487     cpu->cfg.ext_zifencei = true;
488     cpu->cfg.ext_zicsr = true;
489     cpu->cfg.mmu = true;
490     cpu->cfg.pmp = true;
491 }
492 
493 static void rv64_sifive_e_cpu_init(Object *obj)
494 {
495     CPURISCVState *env = &RISCV_CPU(obj)->env;
496     RISCVCPU *cpu = RISCV_CPU(obj);
497 
498     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
499     env->priv_ver = PRIV_VERSION_1_10_0;
500 #ifndef CONFIG_USER_ONLY
501     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
502 #endif
503 
504     /* inherited from parent obj via riscv_cpu_init() */
505     cpu->cfg.ext_zifencei = true;
506     cpu->cfg.ext_zicsr = true;
507     cpu->cfg.pmp = true;
508 }
509 
510 static void rv64_thead_c906_cpu_init(Object *obj)
511 {
512     CPURISCVState *env = &RISCV_CPU(obj)->env;
513     RISCVCPU *cpu = RISCV_CPU(obj);
514 
515     riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU);
516     env->priv_ver = PRIV_VERSION_1_11_0;
517 
518     cpu->cfg.ext_zfa = true;
519     cpu->cfg.ext_zfh = true;
520     cpu->cfg.mmu = true;
521     cpu->cfg.ext_xtheadba = true;
522     cpu->cfg.ext_xtheadbb = true;
523     cpu->cfg.ext_xtheadbs = true;
524     cpu->cfg.ext_xtheadcmo = true;
525     cpu->cfg.ext_xtheadcondmov = true;
526     cpu->cfg.ext_xtheadfmemidx = true;
527     cpu->cfg.ext_xtheadmac = true;
528     cpu->cfg.ext_xtheadmemidx = true;
529     cpu->cfg.ext_xtheadmempair = true;
530     cpu->cfg.ext_xtheadsync = true;
531 
532     cpu->cfg.mvendorid = THEAD_VENDOR_ID;
533 #ifndef CONFIG_USER_ONLY
534     set_satp_mode_max_supported(cpu, VM_1_10_SV39);
535     th_register_custom_csrs(cpu);
536 #endif
537 
538     /* inherited from parent obj via riscv_cpu_init() */
539     cpu->cfg.pmp = true;
540 }
541 
542 static void rv64_veyron_v1_cpu_init(Object *obj)
543 {
544     CPURISCVState *env = &RISCV_CPU(obj)->env;
545     RISCVCPU *cpu = RISCV_CPU(obj);
546 
547     riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH);
548     env->priv_ver = PRIV_VERSION_1_12_0;
549 
550     /* Enable ISA extensions */
551     cpu->cfg.mmu = true;
552     cpu->cfg.ext_zifencei = true;
553     cpu->cfg.ext_zicsr = true;
554     cpu->cfg.pmp = true;
555     cpu->cfg.ext_zicbom = true;
556     cpu->cfg.cbom_blocksize = 64;
557     cpu->cfg.cboz_blocksize = 64;
558     cpu->cfg.ext_zicboz = true;
559     cpu->cfg.ext_smaia = true;
560     cpu->cfg.ext_ssaia = true;
561     cpu->cfg.ext_sscofpmf = true;
562     cpu->cfg.ext_sstc = true;
563     cpu->cfg.ext_svinval = true;
564     cpu->cfg.ext_svnapot = true;
565     cpu->cfg.ext_svpbmt = true;
566     cpu->cfg.ext_smstateen = true;
567     cpu->cfg.ext_zba = true;
568     cpu->cfg.ext_zbb = true;
569     cpu->cfg.ext_zbc = true;
570     cpu->cfg.ext_zbs = true;
571     cpu->cfg.ext_XVentanaCondOps = true;
572 
573     cpu->cfg.mvendorid = VEYRON_V1_MVENDORID;
574     cpu->cfg.marchid = VEYRON_V1_MARCHID;
575     cpu->cfg.mimpid = VEYRON_V1_MIMPID;
576 
577 #ifndef CONFIG_USER_ONLY
578     set_satp_mode_max_supported(cpu, VM_1_10_SV48);
579 #endif
580 }
581 
582 #ifdef CONFIG_TCG
583 static void rv128_base_cpu_init(Object *obj)
584 {
585     RISCVCPU *cpu = RISCV_CPU(obj);
586     CPURISCVState *env = &cpu->env;
587 
588     if (qemu_tcg_mttcg_enabled()) {
589         /* Missing 128-bit aligned atomics */
590         error_report("128-bit RISC-V currently does not work with Multi "
591                      "Threaded TCG. Please use: -accel tcg,thread=single");
592         exit(EXIT_FAILURE);
593     }
594 
595     cpu->cfg.mmu = true;
596     cpu->cfg.pmp = true;
597 
598     /* Set latest version of privileged specification */
599     env->priv_ver = PRIV_VERSION_LATEST;
600 #ifndef CONFIG_USER_ONLY
601     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
602 #endif
603 }
604 #endif /* CONFIG_TCG */
605 
606 static void rv64i_bare_cpu_init(Object *obj)
607 {
608     CPURISCVState *env = &RISCV_CPU(obj)->env;
609     riscv_cpu_set_misa_ext(env, RVI);
610 }
611 
612 static void rv64e_bare_cpu_init(Object *obj)
613 {
614     CPURISCVState *env = &RISCV_CPU(obj)->env;
615     riscv_cpu_set_misa_ext(env, RVE);
616 }
617 
618 #else /* !TARGET_RISCV64 */
619 
620 static void rv32_base_cpu_init(Object *obj)
621 {
622     RISCVCPU *cpu = RISCV_CPU(obj);
623     CPURISCVState *env = &cpu->env;
624 
625     cpu->cfg.mmu = true;
626     cpu->cfg.pmp = true;
627 
628     /* Set latest version of privileged specification */
629     env->priv_ver = PRIV_VERSION_LATEST;
630 #ifndef CONFIG_USER_ONLY
631     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
632 #endif
633 }
634 
635 static void rv32_sifive_u_cpu_init(Object *obj)
636 {
637     RISCVCPU *cpu = RISCV_CPU(obj);
638     CPURISCVState *env = &cpu->env;
639     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
640     env->priv_ver = PRIV_VERSION_1_10_0;
641 #ifndef CONFIG_USER_ONLY
642     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
643 #endif
644 
645     /* inherited from parent obj via riscv_cpu_init() */
646     cpu->cfg.ext_zifencei = true;
647     cpu->cfg.ext_zicsr = true;
648     cpu->cfg.mmu = true;
649     cpu->cfg.pmp = true;
650 }
651 
652 static void rv32_sifive_e_cpu_init(Object *obj)
653 {
654     CPURISCVState *env = &RISCV_CPU(obj)->env;
655     RISCVCPU *cpu = RISCV_CPU(obj);
656 
657     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
658     env->priv_ver = PRIV_VERSION_1_10_0;
659 #ifndef CONFIG_USER_ONLY
660     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
661 #endif
662 
663     /* inherited from parent obj via riscv_cpu_init() */
664     cpu->cfg.ext_zifencei = true;
665     cpu->cfg.ext_zicsr = true;
666     cpu->cfg.pmp = true;
667 }
668 
669 static void rv32_ibex_cpu_init(Object *obj)
670 {
671     CPURISCVState *env = &RISCV_CPU(obj)->env;
672     RISCVCPU *cpu = RISCV_CPU(obj);
673 
674     riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU);
675     env->priv_ver = PRIV_VERSION_1_12_0;
676 #ifndef CONFIG_USER_ONLY
677     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
678 #endif
679     /* inherited from parent obj via riscv_cpu_init() */
680     cpu->cfg.ext_zifencei = true;
681     cpu->cfg.ext_zicsr = true;
682     cpu->cfg.pmp = true;
683     cpu->cfg.ext_smepmp = true;
684 
685     cpu->cfg.ext_zba = true;
686     cpu->cfg.ext_zbb = true;
687     cpu->cfg.ext_zbc = true;
688     cpu->cfg.ext_zbs = true;
689 }
690 
691 static void rv32_imafcu_nommu_cpu_init(Object *obj)
692 {
693     CPURISCVState *env = &RISCV_CPU(obj)->env;
694     RISCVCPU *cpu = RISCV_CPU(obj);
695 
696     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU);
697     env->priv_ver = PRIV_VERSION_1_10_0;
698 #ifndef CONFIG_USER_ONLY
699     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
700 #endif
701 
702     /* inherited from parent obj via riscv_cpu_init() */
703     cpu->cfg.ext_zifencei = true;
704     cpu->cfg.ext_zicsr = true;
705     cpu->cfg.pmp = true;
706 }
707 
708 static void rv32i_bare_cpu_init(Object *obj)
709 {
710     CPURISCVState *env = &RISCV_CPU(obj)->env;
711     riscv_cpu_set_misa_ext(env, RVI);
712 }
713 
714 static void rv32e_bare_cpu_init(Object *obj)
715 {
716     CPURISCVState *env = &RISCV_CPU(obj)->env;
717     riscv_cpu_set_misa_ext(env, RVE);
718 }
719 #endif
720 
721 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
722 {
723     ObjectClass *oc;
724     char *typename;
725     char **cpuname;
726 
727     cpuname = g_strsplit(cpu_model, ",", 1);
728     typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
729     oc = object_class_by_name(typename);
730     g_strfreev(cpuname);
731     g_free(typename);
732 
733     return oc;
734 }
735 
736 char *riscv_cpu_get_name(RISCVCPU *cpu)
737 {
738     RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu);
739     const char *typename = object_class_get_name(OBJECT_CLASS(rcc));
740 
741     g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX));
742 
743     return cpu_model_from_type(typename);
744 }
745 
746 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
747 {
748     RISCVCPU *cpu = RISCV_CPU(cs);
749     CPURISCVState *env = &cpu->env;
750     int i, j;
751     uint8_t *p;
752 
753 #if !defined(CONFIG_USER_ONLY)
754     if (riscv_has_ext(env, RVH)) {
755         qemu_fprintf(f, " %s %d\n", "V      =  ", env->virt_enabled);
756     }
757 #endif
758     qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc      ", env->pc);
759 #ifndef CONFIG_USER_ONLY
760     {
761         static const int dump_csrs[] = {
762             CSR_MHARTID,
763             CSR_MSTATUS,
764             CSR_MSTATUSH,
765             /*
766              * CSR_SSTATUS is intentionally omitted here as its value
767              * can be figured out by looking at CSR_MSTATUS
768              */
769             CSR_HSTATUS,
770             CSR_VSSTATUS,
771             CSR_MIP,
772             CSR_MIE,
773             CSR_MIDELEG,
774             CSR_HIDELEG,
775             CSR_MEDELEG,
776             CSR_HEDELEG,
777             CSR_MTVEC,
778             CSR_STVEC,
779             CSR_VSTVEC,
780             CSR_MEPC,
781             CSR_SEPC,
782             CSR_VSEPC,
783             CSR_MCAUSE,
784             CSR_SCAUSE,
785             CSR_VSCAUSE,
786             CSR_MTVAL,
787             CSR_STVAL,
788             CSR_HTVAL,
789             CSR_MTVAL2,
790             CSR_MSCRATCH,
791             CSR_SSCRATCH,
792             CSR_SATP,
793             CSR_MMTE,
794             CSR_UPMBASE,
795             CSR_UPMMASK,
796             CSR_SPMBASE,
797             CSR_SPMMASK,
798             CSR_MPMBASE,
799             CSR_MPMMASK,
800         };
801 
802         for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
803             int csrno = dump_csrs[i];
804             target_ulong val = 0;
805             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
806 
807             /*
808              * Rely on the smode, hmode, etc, predicates within csr.c
809              * to do the filtering of the registers that are present.
810              */
811             if (res == RISCV_EXCP_NONE) {
812                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
813                              csr_ops[csrno].name, val);
814             }
815         }
816     }
817 #endif
818 
819     for (i = 0; i < 32; i++) {
820         qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
821                      riscv_int_regnames[i], env->gpr[i]);
822         if ((i & 3) == 3) {
823             qemu_fprintf(f, "\n");
824         }
825     }
826     if (flags & CPU_DUMP_FPU) {
827         target_ulong val = 0;
828         RISCVException res = riscv_csrrw_debug(env, CSR_FCSR, &val, 0, 0);
829         if (res == RISCV_EXCP_NONE) {
830             qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
831                     csr_ops[CSR_FCSR].name, val);
832         }
833         for (i = 0; i < 32; i++) {
834             qemu_fprintf(f, " %-8s %016" PRIx64,
835                          riscv_fpr_regnames[i], env->fpr[i]);
836             if ((i & 3) == 3) {
837                 qemu_fprintf(f, "\n");
838             }
839         }
840     }
841     if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) {
842         static const int dump_rvv_csrs[] = {
843                     CSR_VSTART,
844                     CSR_VXSAT,
845                     CSR_VXRM,
846                     CSR_VCSR,
847                     CSR_VL,
848                     CSR_VTYPE,
849                     CSR_VLENB,
850                 };
851         for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
852             int csrno = dump_rvv_csrs[i];
853             target_ulong val = 0;
854             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
855 
856             /*
857              * Rely on the smode, hmode, etc, predicates within csr.c
858              * to do the filtering of the registers that are present.
859              */
860             if (res == RISCV_EXCP_NONE) {
861                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
862                              csr_ops[csrno].name, val);
863             }
864         }
865         uint16_t vlenb = cpu->cfg.vlenb;
866 
867         for (i = 0; i < 32; i++) {
868             qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]);
869             p = (uint8_t *)env->vreg;
870             for (j = vlenb - 1 ; j >= 0; j--) {
871                 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j)));
872             }
873             qemu_fprintf(f, "\n");
874         }
875     }
876 }
877 
878 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
879 {
880     RISCVCPU *cpu = RISCV_CPU(cs);
881     CPURISCVState *env = &cpu->env;
882 
883     if (env->xl == MXL_RV32) {
884         env->pc = (int32_t)value;
885     } else {
886         env->pc = value;
887     }
888 }
889 
890 static vaddr riscv_cpu_get_pc(CPUState *cs)
891 {
892     RISCVCPU *cpu = RISCV_CPU(cs);
893     CPURISCVState *env = &cpu->env;
894 
895     /* Match cpu_get_tb_cpu_state. */
896     if (env->xl == MXL_RV32) {
897         return env->pc & UINT32_MAX;
898     }
899     return env->pc;
900 }
901 
902 bool riscv_cpu_has_work(CPUState *cs)
903 {
904 #ifndef CONFIG_USER_ONLY
905     RISCVCPU *cpu = RISCV_CPU(cs);
906     CPURISCVState *env = &cpu->env;
907     /*
908      * Definition of the WFI instruction requires it to ignore the privilege
909      * mode and delegation registers, but respect individual enables
910      */
911     return riscv_cpu_all_pending(env) != 0 ||
912         riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE ||
913         riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE;
914 #else
915     return true;
916 #endif
917 }
918 
919 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch)
920 {
921     return riscv_env_mmu_index(cpu_env(cs), ifetch);
922 }
923 
924 static void riscv_cpu_reset_hold(Object *obj, ResetType type)
925 {
926 #ifndef CONFIG_USER_ONLY
927     uint8_t iprio;
928     int i, irq, rdzero;
929 #endif
930     CPUState *cs = CPU(obj);
931     RISCVCPU *cpu = RISCV_CPU(cs);
932     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
933     CPURISCVState *env = &cpu->env;
934 
935     if (mcc->parent_phases.hold) {
936         mcc->parent_phases.hold(obj, type);
937     }
938 #ifndef CONFIG_USER_ONLY
939     env->misa_mxl = mcc->misa_mxl_max;
940     env->priv = PRV_M;
941     env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
942     if (env->misa_mxl > MXL_RV32) {
943         /*
944          * The reset status of SXL/UXL is undefined, but mstatus is WARL
945          * and we must ensure that the value after init is valid for read.
946          */
947         env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
948         env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
949         if (riscv_has_ext(env, RVH)) {
950             env->vsstatus = set_field(env->vsstatus,
951                                       MSTATUS64_SXL, env->misa_mxl);
952             env->vsstatus = set_field(env->vsstatus,
953                                       MSTATUS64_UXL, env->misa_mxl);
954             env->mstatus_hs = set_field(env->mstatus_hs,
955                                         MSTATUS64_SXL, env->misa_mxl);
956             env->mstatus_hs = set_field(env->mstatus_hs,
957                                         MSTATUS64_UXL, env->misa_mxl);
958         }
959     }
960     env->mcause = 0;
961     env->miclaim = MIP_SGEIP;
962     env->pc = env->resetvec;
963     env->bins = 0;
964     env->two_stage_lookup = false;
965 
966     env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
967                    (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ?
968                     MENVCFG_ADUE : 0);
969     env->henvcfg = 0;
970 
971     /* Initialized default priorities of local interrupts. */
972     for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
973         iprio = riscv_cpu_default_priority(i);
974         env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
975         env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
976         env->hviprio[i] = 0;
977     }
978     i = 0;
979     while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
980         if (!rdzero) {
981             env->hviprio[irq] = env->miprio[irq];
982         }
983         i++;
984     }
985     /* mmte is supposed to have pm.current hardwired to 1 */
986     env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
987 
988     /*
989      * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor
990      * extension is enabled.
991      */
992     if (riscv_has_ext(env, RVH)) {
993         env->mideleg |= HS_MODE_INTERRUPTS;
994     }
995 
996     /*
997      * Clear mseccfg and unlock all the PMP entries upon reset.
998      * This is allowed as per the priv and smepmp specifications
999      * and is needed to clear stale entries across reboots.
1000      */
1001     if (riscv_cpu_cfg(env)->ext_smepmp) {
1002         env->mseccfg = 0;
1003     }
1004 
1005     pmp_unlock_entries(env);
1006 #endif
1007     env->xl = riscv_cpu_mxl(env);
1008     riscv_cpu_update_mask(env);
1009     cs->exception_index = RISCV_EXCP_NONE;
1010     env->load_res = -1;
1011     set_default_nan_mode(1, &env->fp_status);
1012 
1013 #ifndef CONFIG_USER_ONLY
1014     if (cpu->cfg.debug) {
1015         riscv_trigger_reset_hold(env);
1016     }
1017 
1018     if (kvm_enabled()) {
1019         kvm_riscv_reset_vcpu(cpu);
1020     }
1021 #endif
1022 }
1023 
1024 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
1025 {
1026     RISCVCPU *cpu = RISCV_CPU(s);
1027     CPURISCVState *env = &cpu->env;
1028     info->target_info = &cpu->cfg;
1029 
1030     switch (env->xl) {
1031     case MXL_RV32:
1032         info->print_insn = print_insn_riscv32;
1033         break;
1034     case MXL_RV64:
1035         info->print_insn = print_insn_riscv64;
1036         break;
1037     case MXL_RV128:
1038         info->print_insn = print_insn_riscv128;
1039         break;
1040     default:
1041         g_assert_not_reached();
1042     }
1043 }
1044 
1045 #ifndef CONFIG_USER_ONLY
1046 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
1047 {
1048     bool rv32 = riscv_cpu_is_32bit(cpu);
1049     uint8_t satp_mode_map_max, satp_mode_supported_max;
1050 
1051     /* The CPU wants the OS to decide which satp mode to use */
1052     if (cpu->cfg.satp_mode.supported == 0) {
1053         return;
1054     }
1055 
1056     satp_mode_supported_max =
1057                     satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
1058 
1059     if (cpu->cfg.satp_mode.map == 0) {
1060         if (cpu->cfg.satp_mode.init == 0) {
1061             /* If unset by the user, we fallback to the default satp mode. */
1062             set_satp_mode_default_map(cpu);
1063         } else {
1064             /*
1065              * Find the lowest level that was disabled and then enable the
1066              * first valid level below which can be found in
1067              * valid_vm_1_10_32/64.
1068              */
1069             for (int i = 1; i < 16; ++i) {
1070                 if ((cpu->cfg.satp_mode.init & (1 << i)) &&
1071                     (cpu->cfg.satp_mode.supported & (1 << i))) {
1072                     for (int j = i - 1; j >= 0; --j) {
1073                         if (cpu->cfg.satp_mode.supported & (1 << j)) {
1074                             cpu->cfg.satp_mode.map |= (1 << j);
1075                             break;
1076                         }
1077                     }
1078                     break;
1079                 }
1080             }
1081         }
1082     }
1083 
1084     satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
1085 
1086     /* Make sure the user asked for a supported configuration (HW and qemu) */
1087     if (satp_mode_map_max > satp_mode_supported_max) {
1088         error_setg(errp, "satp_mode %s is higher than hw max capability %s",
1089                    satp_mode_str(satp_mode_map_max, rv32),
1090                    satp_mode_str(satp_mode_supported_max, rv32));
1091         return;
1092     }
1093 
1094     /*
1095      * Make sure the user did not ask for an invalid configuration as per
1096      * the specification.
1097      */
1098     if (!rv32) {
1099         for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1100             if (!(cpu->cfg.satp_mode.map & (1 << i)) &&
1101                 (cpu->cfg.satp_mode.init & (1 << i)) &&
1102                 (cpu->cfg.satp_mode.supported & (1 << i))) {
1103                 error_setg(errp, "cannot disable %s satp mode if %s "
1104                            "is enabled", satp_mode_str(i, false),
1105                            satp_mode_str(satp_mode_map_max, false));
1106                 return;
1107             }
1108         }
1109     }
1110 
1111     /* Finally expand the map so that all valid modes are set */
1112     for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1113         if (cpu->cfg.satp_mode.supported & (1 << i)) {
1114             cpu->cfg.satp_mode.map |= (1 << i);
1115         }
1116     }
1117 }
1118 #endif
1119 
1120 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1121 {
1122     Error *local_err = NULL;
1123 
1124 #ifndef CONFIG_USER_ONLY
1125     riscv_cpu_satp_mode_finalize(cpu, &local_err);
1126     if (local_err != NULL) {
1127         error_propagate(errp, local_err);
1128         return;
1129     }
1130 #endif
1131 
1132     if (tcg_enabled()) {
1133         riscv_tcg_cpu_finalize_features(cpu, &local_err);
1134         if (local_err != NULL) {
1135             error_propagate(errp, local_err);
1136             return;
1137         }
1138         riscv_tcg_cpu_finalize_dynamic_decoder(cpu);
1139     } else if (kvm_enabled()) {
1140         riscv_kvm_cpu_finalize_features(cpu, &local_err);
1141         if (local_err != NULL) {
1142             error_propagate(errp, local_err);
1143             return;
1144         }
1145     }
1146 }
1147 
1148 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
1149 {
1150     CPUState *cs = CPU(dev);
1151     RISCVCPU *cpu = RISCV_CPU(dev);
1152     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
1153     Error *local_err = NULL;
1154 
1155     cpu_exec_realizefn(cs, &local_err);
1156     if (local_err != NULL) {
1157         error_propagate(errp, local_err);
1158         return;
1159     }
1160 
1161     riscv_cpu_finalize_features(cpu, &local_err);
1162     if (local_err != NULL) {
1163         error_propagate(errp, local_err);
1164         return;
1165     }
1166 
1167     riscv_cpu_register_gdb_regs_for_features(cs);
1168 
1169 #ifndef CONFIG_USER_ONLY
1170     if (cpu->cfg.debug) {
1171         riscv_trigger_realize(&cpu->env);
1172     }
1173 #endif
1174 
1175     qemu_init_vcpu(cs);
1176     cpu_reset(cs);
1177 
1178     mcc->parent_realize(dev, errp);
1179 }
1180 
1181 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu)
1182 {
1183     if (tcg_enabled()) {
1184         return riscv_cpu_tcg_compatible(cpu);
1185     }
1186 
1187     return true;
1188 }
1189 
1190 #ifndef CONFIG_USER_ONLY
1191 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
1192                                void *opaque, Error **errp)
1193 {
1194     RISCVSATPMap *satp_map = opaque;
1195     uint8_t satp = satp_mode_from_str(name);
1196     bool value;
1197 
1198     value = satp_map->map & (1 << satp);
1199 
1200     visit_type_bool(v, name, &value, errp);
1201 }
1202 
1203 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
1204                                void *opaque, Error **errp)
1205 {
1206     RISCVSATPMap *satp_map = opaque;
1207     uint8_t satp = satp_mode_from_str(name);
1208     bool value;
1209 
1210     if (!visit_type_bool(v, name, &value, errp)) {
1211         return;
1212     }
1213 
1214     satp_map->map = deposit32(satp_map->map, satp, 1, value);
1215     satp_map->init |= 1 << satp;
1216 }
1217 
1218 void riscv_add_satp_mode_properties(Object *obj)
1219 {
1220     RISCVCPU *cpu = RISCV_CPU(obj);
1221 
1222     if (cpu->env.misa_mxl == MXL_RV32) {
1223         object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
1224                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1225     } else {
1226         object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
1227                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1228         object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
1229                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1230         object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
1231                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1232         object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
1233                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1234     }
1235 }
1236 
1237 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
1238 {
1239     RISCVCPU *cpu = RISCV_CPU(opaque);
1240     CPURISCVState *env = &cpu->env;
1241 
1242     if (irq < IRQ_LOCAL_MAX) {
1243         switch (irq) {
1244         case IRQ_U_SOFT:
1245         case IRQ_S_SOFT:
1246         case IRQ_VS_SOFT:
1247         case IRQ_M_SOFT:
1248         case IRQ_U_TIMER:
1249         case IRQ_S_TIMER:
1250         case IRQ_VS_TIMER:
1251         case IRQ_M_TIMER:
1252         case IRQ_U_EXT:
1253         case IRQ_VS_EXT:
1254         case IRQ_M_EXT:
1255             if (kvm_enabled()) {
1256                 kvm_riscv_set_irq(cpu, irq, level);
1257             } else {
1258                 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level));
1259             }
1260              break;
1261         case IRQ_S_EXT:
1262             if (kvm_enabled()) {
1263                 kvm_riscv_set_irq(cpu, irq, level);
1264             } else {
1265                 env->external_seip = level;
1266                 riscv_cpu_update_mip(env, 1 << irq,
1267                                      BOOL_TO_MASK(level | env->software_seip));
1268             }
1269             break;
1270         default:
1271             g_assert_not_reached();
1272         }
1273     } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
1274         /* Require H-extension for handling guest local interrupts */
1275         if (!riscv_has_ext(env, RVH)) {
1276             g_assert_not_reached();
1277         }
1278 
1279         /* Compute bit position in HGEIP CSR */
1280         irq = irq - IRQ_LOCAL_MAX + 1;
1281         if (env->geilen < irq) {
1282             g_assert_not_reached();
1283         }
1284 
1285         /* Update HGEIP CSR */
1286         env->hgeip &= ~((target_ulong)1 << irq);
1287         if (level) {
1288             env->hgeip |= (target_ulong)1 << irq;
1289         }
1290 
1291         /* Update mip.SGEIP bit */
1292         riscv_cpu_update_mip(env, MIP_SGEIP,
1293                              BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
1294     } else {
1295         g_assert_not_reached();
1296     }
1297 }
1298 #endif /* CONFIG_USER_ONLY */
1299 
1300 static bool riscv_cpu_is_dynamic(Object *cpu_obj)
1301 {
1302     return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
1303 }
1304 
1305 static void riscv_cpu_post_init(Object *obj)
1306 {
1307     accel_cpu_instance_init(CPU(obj));
1308 }
1309 
1310 static void riscv_cpu_init(Object *obj)
1311 {
1312     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
1313     RISCVCPU *cpu = RISCV_CPU(obj);
1314     CPURISCVState *env = &cpu->env;
1315 
1316     env->misa_mxl = mcc->misa_mxl_max;
1317 
1318 #ifndef CONFIG_USER_ONLY
1319     qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
1320                       IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
1321 #endif /* CONFIG_USER_ONLY */
1322 
1323     general_user_opts = g_hash_table_new(g_str_hash, g_str_equal);
1324 
1325     /*
1326      * The timer and performance counters extensions were supported
1327      * in QEMU before they were added as discrete extensions in the
1328      * ISA. To keep compatibility we'll always default them to 'true'
1329      * for all CPUs. Each accelerator will decide what to do when
1330      * users disable them.
1331      */
1332     RISCV_CPU(obj)->cfg.ext_zicntr = true;
1333     RISCV_CPU(obj)->cfg.ext_zihpm = true;
1334 
1335     /* Default values for non-bool cpu properties */
1336     cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16);
1337     cpu->cfg.vlenb = 128 >> 3;
1338     cpu->cfg.elen = 64;
1339     cpu->cfg.cbom_blocksize = 64;
1340     cpu->cfg.cbop_blocksize = 64;
1341     cpu->cfg.cboz_blocksize = 64;
1342     cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1343 }
1344 
1345 static void riscv_bare_cpu_init(Object *obj)
1346 {
1347     RISCVCPU *cpu = RISCV_CPU(obj);
1348 
1349     /*
1350      * Bare CPUs do not inherit the timer and performance
1351      * counters from the parent class (see riscv_cpu_init()
1352      * for info on why the parent enables them).
1353      *
1354      * Users have to explicitly enable these counters for
1355      * bare CPUs.
1356      */
1357     cpu->cfg.ext_zicntr = false;
1358     cpu->cfg.ext_zihpm = false;
1359 
1360     /* Set to QEMU's first supported priv version */
1361     cpu->env.priv_ver = PRIV_VERSION_1_10_0;
1362 
1363     /*
1364      * Support all available satp_mode settings. The default
1365      * value will be set to MBARE if the user doesn't set
1366      * satp_mode manually (see set_satp_mode_default()).
1367      */
1368 #ifndef CONFIG_USER_ONLY
1369     set_satp_mode_max_supported(cpu, VM_1_10_SV64);
1370 #endif
1371 }
1372 
1373 typedef struct misa_ext_info {
1374     const char *name;
1375     const char *description;
1376 } MISAExtInfo;
1377 
1378 #define MISA_INFO_IDX(_bit) \
1379     __builtin_ctz(_bit)
1380 
1381 #define MISA_EXT_INFO(_bit, _propname, _descr) \
1382     [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr}
1383 
1384 static const MISAExtInfo misa_ext_info_arr[] = {
1385     MISA_EXT_INFO(RVA, "a", "Atomic instructions"),
1386     MISA_EXT_INFO(RVC, "c", "Compressed instructions"),
1387     MISA_EXT_INFO(RVD, "d", "Double-precision float point"),
1388     MISA_EXT_INFO(RVF, "f", "Single-precision float point"),
1389     MISA_EXT_INFO(RVI, "i", "Base integer instruction set"),
1390     MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"),
1391     MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"),
1392     MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
1393     MISA_EXT_INFO(RVU, "u", "User-level instructions"),
1394     MISA_EXT_INFO(RVH, "h", "Hypervisor"),
1395     MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
1396     MISA_EXT_INFO(RVV, "v", "Vector operations"),
1397     MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
1398     MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)")
1399 };
1400 
1401 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc)
1402 {
1403     CPUClass *cc = CPU_CLASS(mcc);
1404 
1405     /* Validate that MISA_MXL is set properly. */
1406     switch (mcc->misa_mxl_max) {
1407 #ifdef TARGET_RISCV64
1408     case MXL_RV64:
1409     case MXL_RV128:
1410         cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
1411         break;
1412 #endif
1413     case MXL_RV32:
1414         cc->gdb_core_xml_file = "riscv-32bit-cpu.xml";
1415         break;
1416     default:
1417         g_assert_not_reached();
1418     }
1419 }
1420 
1421 static int riscv_validate_misa_info_idx(uint32_t bit)
1422 {
1423     int idx;
1424 
1425     /*
1426      * Our lowest valid input (RVA) is 1 and
1427      * __builtin_ctz() is UB with zero.
1428      */
1429     g_assert(bit != 0);
1430     idx = MISA_INFO_IDX(bit);
1431 
1432     g_assert(idx < ARRAY_SIZE(misa_ext_info_arr));
1433     return idx;
1434 }
1435 
1436 const char *riscv_get_misa_ext_name(uint32_t bit)
1437 {
1438     int idx = riscv_validate_misa_info_idx(bit);
1439     const char *val = misa_ext_info_arr[idx].name;
1440 
1441     g_assert(val != NULL);
1442     return val;
1443 }
1444 
1445 const char *riscv_get_misa_ext_description(uint32_t bit)
1446 {
1447     int idx = riscv_validate_misa_info_idx(bit);
1448     const char *val = misa_ext_info_arr[idx].description;
1449 
1450     g_assert(val != NULL);
1451     return val;
1452 }
1453 
1454 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \
1455     {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
1456      .enabled = _defval}
1457 
1458 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
1459     /* Defaults for standard extensions */
1460     MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
1461     MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false),
1462     MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
1463     MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
1464     MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
1465     MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
1466     MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false),
1467     MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false),
1468     MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false),
1469     MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false),
1470     MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false),
1471     MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false),
1472     MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false),
1473     MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
1474     MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true),
1475     MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false),
1476     MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false),
1477     MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false),
1478     MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false),
1479     MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false),
1480     MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false),
1481     MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false),
1482     MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false),
1483     MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false),
1484     MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false),
1485     MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false),
1486     MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false),
1487     MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
1488 
1489     MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false),
1490     MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
1491     MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
1492     MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false),
1493     MULTI_EXT_CFG_BOOL("svade", ext_svade, false),
1494     MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
1495     MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
1496     MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false),
1497     MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false),
1498     MULTI_EXT_CFG_BOOL("svvptc", ext_svvptc, true),
1499 
1500     MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true),
1501     MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true),
1502 
1503     MULTI_EXT_CFG_BOOL("zba", ext_zba, true),
1504     MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true),
1505     MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true),
1506     MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false),
1507     MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false),
1508     MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false),
1509     MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true),
1510     MULTI_EXT_CFG_BOOL("zk", ext_zk, false),
1511     MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false),
1512     MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false),
1513     MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false),
1514     MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false),
1515     MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false),
1516     MULTI_EXT_CFG_BOOL("zks", ext_zks, false),
1517     MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false),
1518     MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false),
1519     MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false),
1520     MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false),
1521 
1522     MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false),
1523     MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false),
1524     MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false),
1525     MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false),
1526 
1527     MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true),
1528     MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true),
1529     MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true),
1530 
1531     MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false),
1532 
1533     MULTI_EXT_CFG_BOOL("zca", ext_zca, false),
1534     MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false),
1535     MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false),
1536     MULTI_EXT_CFG_BOOL("zce", ext_zce, false),
1537     MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false),
1538     MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false),
1539     MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false),
1540     MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false),
1541 
1542     /* Vector cryptography extensions */
1543     MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false),
1544     MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false),
1545     MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false),
1546     MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false),
1547     MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false),
1548     MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false),
1549     MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false),
1550     MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false),
1551     MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false),
1552     MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false),
1553     MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false),
1554     MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false),
1555     MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false),
1556     MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false),
1557     MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false),
1558     MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false),
1559 
1560     DEFINE_PROP_END_OF_LIST(),
1561 };
1562 
1563 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = {
1564     MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false),
1565     MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false),
1566     MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false),
1567     MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false),
1568     MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false),
1569     MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false),
1570     MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false),
1571     MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false),
1572     MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false),
1573     MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false),
1574     MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false),
1575     MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false),
1576 
1577     DEFINE_PROP_END_OF_LIST(),
1578 };
1579 
1580 /* These are experimental so mark with 'x-' */
1581 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
1582     DEFINE_PROP_END_OF_LIST(),
1583 };
1584 
1585 /*
1586  * 'Named features' is the name we give to extensions that we
1587  * don't want to expose to users. They are either immutable
1588  * (always enabled/disable) or they'll vary depending on
1589  * the resulting CPU state. They have riscv,isa strings
1590  * and priv_ver like regular extensions.
1591  */
1592 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
1593     MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true),
1594 
1595     DEFINE_PROP_END_OF_LIST(),
1596 };
1597 
1598 /* Deprecated entries marked for future removal */
1599 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = {
1600     MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true),
1601     MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true),
1602     MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true),
1603     MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true),
1604     MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true),
1605     MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true),
1606     MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false),
1607     MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false),
1608     MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false),
1609     MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false),
1610     MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false),
1611 
1612     DEFINE_PROP_END_OF_LIST(),
1613 };
1614 
1615 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname,
1616                              Error **errp)
1617 {
1618     g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1619     error_setg(errp, "CPU '%s' does not allow changing the value of '%s'",
1620                cpuname, propname);
1621 }
1622 
1623 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name,
1624                              void *opaque, Error **errp)
1625 {
1626     RISCVCPU *cpu = RISCV_CPU(obj);
1627     uint8_t pmu_num, curr_pmu_num;
1628     uint32_t pmu_mask;
1629 
1630     visit_type_uint8(v, name, &pmu_num, errp);
1631 
1632     curr_pmu_num = ctpop32(cpu->cfg.pmu_mask);
1633 
1634     if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) {
1635         cpu_set_prop_err(cpu, name, errp);
1636         error_append_hint(errp, "Current '%s' val: %u\n",
1637                           name, curr_pmu_num);
1638         return;
1639     }
1640 
1641     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1642         error_setg(errp, "Number of counters exceeds maximum available");
1643         return;
1644     }
1645 
1646     if (pmu_num == 0) {
1647         pmu_mask = 0;
1648     } else {
1649         pmu_mask = MAKE_64BIT_MASK(3, pmu_num);
1650     }
1651 
1652     warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\"");
1653     cpu->cfg.pmu_mask = pmu_mask;
1654     cpu_option_add_user_setting("pmu-mask", pmu_mask);
1655 }
1656 
1657 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name,
1658                              void *opaque, Error **errp)
1659 {
1660     RISCVCPU *cpu = RISCV_CPU(obj);
1661     uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask);
1662 
1663     visit_type_uint8(v, name, &pmu_num, errp);
1664 }
1665 
1666 static const PropertyInfo prop_pmu_num = {
1667     .name = "pmu-num",
1668     .get = prop_pmu_num_get,
1669     .set = prop_pmu_num_set,
1670 };
1671 
1672 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name,
1673                              void *opaque, Error **errp)
1674 {
1675     RISCVCPU *cpu = RISCV_CPU(obj);
1676     uint32_t value;
1677     uint8_t pmu_num;
1678 
1679     visit_type_uint32(v, name, &value, errp);
1680 
1681     if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) {
1682         cpu_set_prop_err(cpu, name, errp);
1683         error_append_hint(errp, "Current '%s' val: %x\n",
1684                           name, cpu->cfg.pmu_mask);
1685         return;
1686     }
1687 
1688     pmu_num = ctpop32(value);
1689 
1690     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1691         error_setg(errp, "Number of counters exceeds maximum available");
1692         return;
1693     }
1694 
1695     cpu_option_add_user_setting(name, value);
1696     cpu->cfg.pmu_mask = value;
1697 }
1698 
1699 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name,
1700                              void *opaque, Error **errp)
1701 {
1702     uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask;
1703 
1704     visit_type_uint8(v, name, &pmu_mask, errp);
1705 }
1706 
1707 static const PropertyInfo prop_pmu_mask = {
1708     .name = "pmu-mask",
1709     .get = prop_pmu_mask_get,
1710     .set = prop_pmu_mask_set,
1711 };
1712 
1713 static void prop_mmu_set(Object *obj, Visitor *v, const char *name,
1714                          void *opaque, Error **errp)
1715 {
1716     RISCVCPU *cpu = RISCV_CPU(obj);
1717     bool value;
1718 
1719     visit_type_bool(v, name, &value, errp);
1720 
1721     if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) {
1722         cpu_set_prop_err(cpu, "mmu", errp);
1723         return;
1724     }
1725 
1726     cpu_option_add_user_setting(name, value);
1727     cpu->cfg.mmu = value;
1728 }
1729 
1730 static void prop_mmu_get(Object *obj, Visitor *v, const char *name,
1731                          void *opaque, Error **errp)
1732 {
1733     bool value = RISCV_CPU(obj)->cfg.mmu;
1734 
1735     visit_type_bool(v, name, &value, errp);
1736 }
1737 
1738 static const PropertyInfo prop_mmu = {
1739     .name = "mmu",
1740     .get = prop_mmu_get,
1741     .set = prop_mmu_set,
1742 };
1743 
1744 static void prop_pmp_set(Object *obj, Visitor *v, const char *name,
1745                          void *opaque, Error **errp)
1746 {
1747     RISCVCPU *cpu = RISCV_CPU(obj);
1748     bool value;
1749 
1750     visit_type_bool(v, name, &value, errp);
1751 
1752     if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) {
1753         cpu_set_prop_err(cpu, name, errp);
1754         return;
1755     }
1756 
1757     cpu_option_add_user_setting(name, value);
1758     cpu->cfg.pmp = value;
1759 }
1760 
1761 static void prop_pmp_get(Object *obj, Visitor *v, const char *name,
1762                          void *opaque, Error **errp)
1763 {
1764     bool value = RISCV_CPU(obj)->cfg.pmp;
1765 
1766     visit_type_bool(v, name, &value, errp);
1767 }
1768 
1769 static const PropertyInfo prop_pmp = {
1770     .name = "pmp",
1771     .get = prop_pmp_get,
1772     .set = prop_pmp_set,
1773 };
1774 
1775 static int priv_spec_from_str(const char *priv_spec_str)
1776 {
1777     int priv_version = -1;
1778 
1779     if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) {
1780         priv_version = PRIV_VERSION_1_13_0;
1781     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) {
1782         priv_version = PRIV_VERSION_1_12_0;
1783     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) {
1784         priv_version = PRIV_VERSION_1_11_0;
1785     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) {
1786         priv_version = PRIV_VERSION_1_10_0;
1787     }
1788 
1789     return priv_version;
1790 }
1791 
1792 const char *priv_spec_to_str(int priv_version)
1793 {
1794     switch (priv_version) {
1795     case PRIV_VERSION_1_10_0:
1796         return PRIV_VER_1_10_0_STR;
1797     case PRIV_VERSION_1_11_0:
1798         return PRIV_VER_1_11_0_STR;
1799     case PRIV_VERSION_1_12_0:
1800         return PRIV_VER_1_12_0_STR;
1801     case PRIV_VERSION_1_13_0:
1802         return PRIV_VER_1_13_0_STR;
1803     default:
1804         return NULL;
1805     }
1806 }
1807 
1808 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name,
1809                                void *opaque, Error **errp)
1810 {
1811     RISCVCPU *cpu = RISCV_CPU(obj);
1812     g_autofree char *value = NULL;
1813     int priv_version = -1;
1814 
1815     visit_type_str(v, name, &value, errp);
1816 
1817     priv_version = priv_spec_from_str(value);
1818     if (priv_version < 0) {
1819         error_setg(errp, "Unsupported privilege spec version '%s'", value);
1820         return;
1821     }
1822 
1823     if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) {
1824         cpu_set_prop_err(cpu, name, errp);
1825         error_append_hint(errp, "Current '%s' val: %s\n", name,
1826                           object_property_get_str(obj, name, NULL));
1827         return;
1828     }
1829 
1830     cpu_option_add_user_setting(name, priv_version);
1831     cpu->env.priv_ver = priv_version;
1832 }
1833 
1834 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name,
1835                                void *opaque, Error **errp)
1836 {
1837     RISCVCPU *cpu = RISCV_CPU(obj);
1838     const char *value = priv_spec_to_str(cpu->env.priv_ver);
1839 
1840     visit_type_str(v, name, (char **)&value, errp);
1841 }
1842 
1843 static const PropertyInfo prop_priv_spec = {
1844     .name = "priv_spec",
1845     .get = prop_priv_spec_get,
1846     .set = prop_priv_spec_set,
1847 };
1848 
1849 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name,
1850                                void *opaque, Error **errp)
1851 {
1852     RISCVCPU *cpu = RISCV_CPU(obj);
1853     g_autofree char *value = NULL;
1854 
1855     visit_type_str(v, name, &value, errp);
1856 
1857     if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) {
1858         error_setg(errp, "Unsupported vector spec version '%s'", value);
1859         return;
1860     }
1861 
1862     cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0);
1863     cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1864 }
1865 
1866 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name,
1867                                void *opaque, Error **errp)
1868 {
1869     const char *value = VEXT_VER_1_00_0_STR;
1870 
1871     visit_type_str(v, name, (char **)&value, errp);
1872 }
1873 
1874 static const PropertyInfo prop_vext_spec = {
1875     .name = "vext_spec",
1876     .get = prop_vext_spec_get,
1877     .set = prop_vext_spec_set,
1878 };
1879 
1880 static void prop_vlen_set(Object *obj, Visitor *v, const char *name,
1881                          void *opaque, Error **errp)
1882 {
1883     RISCVCPU *cpu = RISCV_CPU(obj);
1884     uint16_t value;
1885 
1886     if (!visit_type_uint16(v, name, &value, errp)) {
1887         return;
1888     }
1889 
1890     if (!is_power_of_2(value)) {
1891         error_setg(errp, "Vector extension VLEN must be power of 2");
1892         return;
1893     }
1894 
1895     if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) {
1896         cpu_set_prop_err(cpu, name, errp);
1897         error_append_hint(errp, "Current '%s' val: %u\n",
1898                           name, cpu->cfg.vlenb << 3);
1899         return;
1900     }
1901 
1902     cpu_option_add_user_setting(name, value);
1903     cpu->cfg.vlenb = value >> 3;
1904 }
1905 
1906 static void prop_vlen_get(Object *obj, Visitor *v, const char *name,
1907                          void *opaque, Error **errp)
1908 {
1909     uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3;
1910 
1911     visit_type_uint16(v, name, &value, errp);
1912 }
1913 
1914 static const PropertyInfo prop_vlen = {
1915     .name = "vlen",
1916     .get = prop_vlen_get,
1917     .set = prop_vlen_set,
1918 };
1919 
1920 static void prop_elen_set(Object *obj, Visitor *v, const char *name,
1921                          void *opaque, Error **errp)
1922 {
1923     RISCVCPU *cpu = RISCV_CPU(obj);
1924     uint16_t value;
1925 
1926     if (!visit_type_uint16(v, name, &value, errp)) {
1927         return;
1928     }
1929 
1930     if (!is_power_of_2(value)) {
1931         error_setg(errp, "Vector extension ELEN must be power of 2");
1932         return;
1933     }
1934 
1935     if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) {
1936         cpu_set_prop_err(cpu, name, errp);
1937         error_append_hint(errp, "Current '%s' val: %u\n",
1938                           name, cpu->cfg.elen);
1939         return;
1940     }
1941 
1942     cpu_option_add_user_setting(name, value);
1943     cpu->cfg.elen = value;
1944 }
1945 
1946 static void prop_elen_get(Object *obj, Visitor *v, const char *name,
1947                          void *opaque, Error **errp)
1948 {
1949     uint16_t value = RISCV_CPU(obj)->cfg.elen;
1950 
1951     visit_type_uint16(v, name, &value, errp);
1952 }
1953 
1954 static const PropertyInfo prop_elen = {
1955     .name = "elen",
1956     .get = prop_elen_get,
1957     .set = prop_elen_set,
1958 };
1959 
1960 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name,
1961                                   void *opaque, Error **errp)
1962 {
1963     RISCVCPU *cpu = RISCV_CPU(obj);
1964     uint16_t value;
1965 
1966     if (!visit_type_uint16(v, name, &value, errp)) {
1967         return;
1968     }
1969 
1970     if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) {
1971         cpu_set_prop_err(cpu, name, errp);
1972         error_append_hint(errp, "Current '%s' val: %u\n",
1973                           name, cpu->cfg.cbom_blocksize);
1974         return;
1975     }
1976 
1977     cpu_option_add_user_setting(name, value);
1978     cpu->cfg.cbom_blocksize = value;
1979 }
1980 
1981 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name,
1982                          void *opaque, Error **errp)
1983 {
1984     uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize;
1985 
1986     visit_type_uint16(v, name, &value, errp);
1987 }
1988 
1989 static const PropertyInfo prop_cbom_blksize = {
1990     .name = "cbom_blocksize",
1991     .get = prop_cbom_blksize_get,
1992     .set = prop_cbom_blksize_set,
1993 };
1994 
1995 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name,
1996                                   void *opaque, Error **errp)
1997 {
1998     RISCVCPU *cpu = RISCV_CPU(obj);
1999     uint16_t value;
2000 
2001     if (!visit_type_uint16(v, name, &value, errp)) {
2002         return;
2003     }
2004 
2005     if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) {
2006         cpu_set_prop_err(cpu, name, errp);
2007         error_append_hint(errp, "Current '%s' val: %u\n",
2008                           name, cpu->cfg.cbop_blocksize);
2009         return;
2010     }
2011 
2012     cpu_option_add_user_setting(name, value);
2013     cpu->cfg.cbop_blocksize = value;
2014 }
2015 
2016 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name,
2017                          void *opaque, Error **errp)
2018 {
2019     uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize;
2020 
2021     visit_type_uint16(v, name, &value, errp);
2022 }
2023 
2024 static const PropertyInfo prop_cbop_blksize = {
2025     .name = "cbop_blocksize",
2026     .get = prop_cbop_blksize_get,
2027     .set = prop_cbop_blksize_set,
2028 };
2029 
2030 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name,
2031                                   void *opaque, Error **errp)
2032 {
2033     RISCVCPU *cpu = RISCV_CPU(obj);
2034     uint16_t value;
2035 
2036     if (!visit_type_uint16(v, name, &value, errp)) {
2037         return;
2038     }
2039 
2040     if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) {
2041         cpu_set_prop_err(cpu, name, errp);
2042         error_append_hint(errp, "Current '%s' val: %u\n",
2043                           name, cpu->cfg.cboz_blocksize);
2044         return;
2045     }
2046 
2047     cpu_option_add_user_setting(name, value);
2048     cpu->cfg.cboz_blocksize = value;
2049 }
2050 
2051 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name,
2052                          void *opaque, Error **errp)
2053 {
2054     uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize;
2055 
2056     visit_type_uint16(v, name, &value, errp);
2057 }
2058 
2059 static const PropertyInfo prop_cboz_blksize = {
2060     .name = "cboz_blocksize",
2061     .get = prop_cboz_blksize_get,
2062     .set = prop_cboz_blksize_set,
2063 };
2064 
2065 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name,
2066                                void *opaque, Error **errp)
2067 {
2068     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2069     RISCVCPU *cpu = RISCV_CPU(obj);
2070     uint32_t prev_val = cpu->cfg.mvendorid;
2071     uint32_t value;
2072 
2073     if (!visit_type_uint32(v, name, &value, errp)) {
2074         return;
2075     }
2076 
2077     if (!dynamic_cpu && prev_val != value) {
2078         error_setg(errp, "Unable to change %s mvendorid (0x%x)",
2079                    object_get_typename(obj), prev_val);
2080         return;
2081     }
2082 
2083     cpu->cfg.mvendorid = value;
2084 }
2085 
2086 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name,
2087                                void *opaque, Error **errp)
2088 {
2089     uint32_t value = RISCV_CPU(obj)->cfg.mvendorid;
2090 
2091     visit_type_uint32(v, name, &value, errp);
2092 }
2093 
2094 static const PropertyInfo prop_mvendorid = {
2095     .name = "mvendorid",
2096     .get = prop_mvendorid_get,
2097     .set = prop_mvendorid_set,
2098 };
2099 
2100 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name,
2101                             void *opaque, Error **errp)
2102 {
2103     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2104     RISCVCPU *cpu = RISCV_CPU(obj);
2105     uint64_t prev_val = cpu->cfg.mimpid;
2106     uint64_t value;
2107 
2108     if (!visit_type_uint64(v, name, &value, errp)) {
2109         return;
2110     }
2111 
2112     if (!dynamic_cpu && prev_val != value) {
2113         error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")",
2114                    object_get_typename(obj), prev_val);
2115         return;
2116     }
2117 
2118     cpu->cfg.mimpid = value;
2119 }
2120 
2121 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name,
2122                             void *opaque, Error **errp)
2123 {
2124     uint64_t value = RISCV_CPU(obj)->cfg.mimpid;
2125 
2126     visit_type_uint64(v, name, &value, errp);
2127 }
2128 
2129 static const PropertyInfo prop_mimpid = {
2130     .name = "mimpid",
2131     .get = prop_mimpid_get,
2132     .set = prop_mimpid_set,
2133 };
2134 
2135 static void prop_marchid_set(Object *obj, Visitor *v, const char *name,
2136                              void *opaque, Error **errp)
2137 {
2138     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2139     RISCVCPU *cpu = RISCV_CPU(obj);
2140     uint64_t prev_val = cpu->cfg.marchid;
2141     uint64_t value, invalid_val;
2142     uint32_t mxlen = 0;
2143 
2144     if (!visit_type_uint64(v, name, &value, errp)) {
2145         return;
2146     }
2147 
2148     if (!dynamic_cpu && prev_val != value) {
2149         error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")",
2150                    object_get_typename(obj), prev_val);
2151         return;
2152     }
2153 
2154     switch (riscv_cpu_mxl(&cpu->env)) {
2155     case MXL_RV32:
2156         mxlen = 32;
2157         break;
2158     case MXL_RV64:
2159     case MXL_RV128:
2160         mxlen = 64;
2161         break;
2162     default:
2163         g_assert_not_reached();
2164     }
2165 
2166     invalid_val = 1LL << (mxlen - 1);
2167 
2168     if (value == invalid_val) {
2169         error_setg(errp, "Unable to set marchid with MSB (%u) bit set "
2170                          "and the remaining bits zero", mxlen);
2171         return;
2172     }
2173 
2174     cpu->cfg.marchid = value;
2175 }
2176 
2177 static void prop_marchid_get(Object *obj, Visitor *v, const char *name,
2178                              void *opaque, Error **errp)
2179 {
2180     uint64_t value = RISCV_CPU(obj)->cfg.marchid;
2181 
2182     visit_type_uint64(v, name, &value, errp);
2183 }
2184 
2185 static const PropertyInfo prop_marchid = {
2186     .name = "marchid",
2187     .get = prop_marchid_get,
2188     .set = prop_marchid_set,
2189 };
2190 
2191 /*
2192  * RVA22U64 defines some 'named features' that are cache
2193  * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa
2194  * and Zicclsm. They are always implemented in TCG and
2195  * doesn't need to be manually enabled by the profile.
2196  */
2197 static RISCVCPUProfile RVA22U64 = {
2198     .parent = NULL,
2199     .name = "rva22u64",
2200     .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU,
2201     .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
2202     .satp_mode = RISCV_PROFILE_ATTR_UNUSED,
2203     .ext_offsets = {
2204         CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause),
2205         CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb),
2206         CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin),
2207         CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr),
2208         CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom),
2209         CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz),
2210 
2211         /* mandatory named features for this profile */
2212         CPU_CFG_OFFSET(ext_zic64b),
2213 
2214         RISCV_PROFILE_EXT_LIST_END
2215     }
2216 };
2217 
2218 /*
2219  * As with RVA22U64, RVA22S64 also defines 'named features'.
2220  *
2221  * Cache related features that we consider enabled since we don't
2222  * implement cache: Ssccptr
2223  *
2224  * Other named features that we already implement: Sstvecd, Sstvala,
2225  * Sscounterenw
2226  *
2227  * The remaining features/extensions comes from RVA22U64.
2228  */
2229 static RISCVCPUProfile RVA22S64 = {
2230     .parent = &RVA22U64,
2231     .name = "rva22s64",
2232     .misa_ext = RVS,
2233     .priv_spec = PRIV_VERSION_1_12_0,
2234     .satp_mode = VM_1_10_SV39,
2235     .ext_offsets = {
2236         /* rva22s64 exts */
2237         CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt),
2238         CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade),
2239 
2240         RISCV_PROFILE_EXT_LIST_END
2241     }
2242 };
2243 
2244 RISCVCPUProfile *riscv_profiles[] = {
2245     &RVA22U64,
2246     &RVA22S64,
2247     NULL,
2248 };
2249 
2250 static RISCVCPUImpliedExtsRule RVA_IMPLIED = {
2251     .is_misa = true,
2252     .ext = RVA,
2253     .implied_multi_exts = {
2254         CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo),
2255 
2256         RISCV_IMPLIED_EXTS_RULE_END
2257     },
2258 };
2259 
2260 static RISCVCPUImpliedExtsRule RVD_IMPLIED = {
2261     .is_misa = true,
2262     .ext = RVD,
2263     .implied_misa_exts = RVF,
2264     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2265 };
2266 
2267 static RISCVCPUImpliedExtsRule RVF_IMPLIED = {
2268     .is_misa = true,
2269     .ext = RVF,
2270     .implied_multi_exts = {
2271         CPU_CFG_OFFSET(ext_zicsr),
2272 
2273         RISCV_IMPLIED_EXTS_RULE_END
2274     },
2275 };
2276 
2277 static RISCVCPUImpliedExtsRule RVM_IMPLIED = {
2278     .is_misa = true,
2279     .ext = RVM,
2280     .implied_multi_exts = {
2281         CPU_CFG_OFFSET(ext_zmmul),
2282 
2283         RISCV_IMPLIED_EXTS_RULE_END
2284     },
2285 };
2286 
2287 static RISCVCPUImpliedExtsRule RVV_IMPLIED = {
2288     .is_misa = true,
2289     .ext = RVV,
2290     .implied_multi_exts = {
2291         CPU_CFG_OFFSET(ext_zve64d),
2292 
2293         RISCV_IMPLIED_EXTS_RULE_END
2294     },
2295 };
2296 
2297 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = {
2298     .ext = CPU_CFG_OFFSET(ext_zcb),
2299     .implied_multi_exts = {
2300         CPU_CFG_OFFSET(ext_zca),
2301 
2302         RISCV_IMPLIED_EXTS_RULE_END
2303     },
2304 };
2305 
2306 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = {
2307     .ext = CPU_CFG_OFFSET(ext_zcd),
2308     .implied_misa_exts = RVD,
2309     .implied_multi_exts = {
2310         CPU_CFG_OFFSET(ext_zca),
2311 
2312         RISCV_IMPLIED_EXTS_RULE_END
2313     },
2314 };
2315 
2316 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = {
2317     .ext = CPU_CFG_OFFSET(ext_zce),
2318     .implied_multi_exts = {
2319         CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp),
2320         CPU_CFG_OFFSET(ext_zcmt),
2321 
2322         RISCV_IMPLIED_EXTS_RULE_END
2323     },
2324 };
2325 
2326 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = {
2327     .ext = CPU_CFG_OFFSET(ext_zcf),
2328     .implied_misa_exts = RVF,
2329     .implied_multi_exts = {
2330         CPU_CFG_OFFSET(ext_zca),
2331 
2332         RISCV_IMPLIED_EXTS_RULE_END
2333     },
2334 };
2335 
2336 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = {
2337     .ext = CPU_CFG_OFFSET(ext_zcmp),
2338     .implied_multi_exts = {
2339         CPU_CFG_OFFSET(ext_zca),
2340 
2341         RISCV_IMPLIED_EXTS_RULE_END
2342     },
2343 };
2344 
2345 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = {
2346     .ext = CPU_CFG_OFFSET(ext_zcmt),
2347     .implied_multi_exts = {
2348         CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr),
2349 
2350         RISCV_IMPLIED_EXTS_RULE_END
2351     },
2352 };
2353 
2354 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = {
2355     .ext = CPU_CFG_OFFSET(ext_zdinx),
2356     .implied_multi_exts = {
2357         CPU_CFG_OFFSET(ext_zfinx),
2358 
2359         RISCV_IMPLIED_EXTS_RULE_END
2360     },
2361 };
2362 
2363 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = {
2364     .ext = CPU_CFG_OFFSET(ext_zfa),
2365     .implied_misa_exts = RVF,
2366     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2367 };
2368 
2369 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = {
2370     .ext = CPU_CFG_OFFSET(ext_zfbfmin),
2371     .implied_misa_exts = RVF,
2372     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2373 };
2374 
2375 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = {
2376     .ext = CPU_CFG_OFFSET(ext_zfh),
2377     .implied_multi_exts = {
2378         CPU_CFG_OFFSET(ext_zfhmin),
2379 
2380         RISCV_IMPLIED_EXTS_RULE_END
2381     },
2382 };
2383 
2384 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = {
2385     .ext = CPU_CFG_OFFSET(ext_zfhmin),
2386     .implied_misa_exts = RVF,
2387     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2388 };
2389 
2390 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = {
2391     .ext = CPU_CFG_OFFSET(ext_zfinx),
2392     .implied_multi_exts = {
2393         CPU_CFG_OFFSET(ext_zicsr),
2394 
2395         RISCV_IMPLIED_EXTS_RULE_END
2396     },
2397 };
2398 
2399 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = {
2400     .ext = CPU_CFG_OFFSET(ext_zhinx),
2401     .implied_multi_exts = {
2402         CPU_CFG_OFFSET(ext_zhinxmin),
2403 
2404         RISCV_IMPLIED_EXTS_RULE_END
2405     },
2406 };
2407 
2408 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = {
2409     .ext = CPU_CFG_OFFSET(ext_zhinxmin),
2410     .implied_multi_exts = {
2411         CPU_CFG_OFFSET(ext_zfinx),
2412 
2413         RISCV_IMPLIED_EXTS_RULE_END
2414     },
2415 };
2416 
2417 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = {
2418     .ext = CPU_CFG_OFFSET(ext_zicntr),
2419     .implied_multi_exts = {
2420         CPU_CFG_OFFSET(ext_zicsr),
2421 
2422         RISCV_IMPLIED_EXTS_RULE_END
2423     },
2424 };
2425 
2426 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = {
2427     .ext = CPU_CFG_OFFSET(ext_zihpm),
2428     .implied_multi_exts = {
2429         CPU_CFG_OFFSET(ext_zicsr),
2430 
2431         RISCV_IMPLIED_EXTS_RULE_END
2432     },
2433 };
2434 
2435 static RISCVCPUImpliedExtsRule ZK_IMPLIED = {
2436     .ext = CPU_CFG_OFFSET(ext_zk),
2437     .implied_multi_exts = {
2438         CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr),
2439         CPU_CFG_OFFSET(ext_zkt),
2440 
2441         RISCV_IMPLIED_EXTS_RULE_END
2442     },
2443 };
2444 
2445 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = {
2446     .ext = CPU_CFG_OFFSET(ext_zkn),
2447     .implied_multi_exts = {
2448         CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
2449         CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne),
2450         CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh),
2451 
2452         RISCV_IMPLIED_EXTS_RULE_END
2453     },
2454 };
2455 
2456 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = {
2457     .ext = CPU_CFG_OFFSET(ext_zks),
2458     .implied_multi_exts = {
2459         CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
2460         CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed),
2461         CPU_CFG_OFFSET(ext_zksh),
2462 
2463         RISCV_IMPLIED_EXTS_RULE_END
2464     },
2465 };
2466 
2467 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = {
2468     .ext = CPU_CFG_OFFSET(ext_zvbb),
2469     .implied_multi_exts = {
2470         CPU_CFG_OFFSET(ext_zvkb),
2471 
2472         RISCV_IMPLIED_EXTS_RULE_END
2473     },
2474 };
2475 
2476 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = {
2477     .ext = CPU_CFG_OFFSET(ext_zve32f),
2478     .implied_misa_exts = RVF,
2479     .implied_multi_exts = {
2480         CPU_CFG_OFFSET(ext_zve32x),
2481 
2482         RISCV_IMPLIED_EXTS_RULE_END
2483     },
2484 };
2485 
2486 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = {
2487     .ext = CPU_CFG_OFFSET(ext_zve32x),
2488     .implied_multi_exts = {
2489         CPU_CFG_OFFSET(ext_zicsr),
2490 
2491         RISCV_IMPLIED_EXTS_RULE_END
2492     },
2493 };
2494 
2495 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = {
2496     .ext = CPU_CFG_OFFSET(ext_zve64d),
2497     .implied_misa_exts = RVD,
2498     .implied_multi_exts = {
2499         CPU_CFG_OFFSET(ext_zve64f),
2500 
2501         RISCV_IMPLIED_EXTS_RULE_END
2502     },
2503 };
2504 
2505 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = {
2506     .ext = CPU_CFG_OFFSET(ext_zve64f),
2507     .implied_misa_exts = RVF,
2508     .implied_multi_exts = {
2509         CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x),
2510 
2511         RISCV_IMPLIED_EXTS_RULE_END
2512     },
2513 };
2514 
2515 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = {
2516     .ext = CPU_CFG_OFFSET(ext_zve64x),
2517     .implied_multi_exts = {
2518         CPU_CFG_OFFSET(ext_zve32x),
2519 
2520         RISCV_IMPLIED_EXTS_RULE_END
2521     },
2522 };
2523 
2524 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = {
2525     .ext = CPU_CFG_OFFSET(ext_zvfbfmin),
2526     .implied_multi_exts = {
2527         CPU_CFG_OFFSET(ext_zve32f),
2528 
2529         RISCV_IMPLIED_EXTS_RULE_END
2530     },
2531 };
2532 
2533 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = {
2534     .ext = CPU_CFG_OFFSET(ext_zvfbfwma),
2535     .implied_multi_exts = {
2536         CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin),
2537 
2538         RISCV_IMPLIED_EXTS_RULE_END
2539     },
2540 };
2541 
2542 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = {
2543     .ext = CPU_CFG_OFFSET(ext_zvfh),
2544     .implied_multi_exts = {
2545         CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin),
2546 
2547         RISCV_IMPLIED_EXTS_RULE_END
2548     },
2549 };
2550 
2551 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = {
2552     .ext = CPU_CFG_OFFSET(ext_zvfhmin),
2553     .implied_multi_exts = {
2554         CPU_CFG_OFFSET(ext_zve32f),
2555 
2556         RISCV_IMPLIED_EXTS_RULE_END
2557     },
2558 };
2559 
2560 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = {
2561     .ext = CPU_CFG_OFFSET(ext_zvkn),
2562     .implied_multi_exts = {
2563         CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb),
2564         CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
2565 
2566         RISCV_IMPLIED_EXTS_RULE_END
2567     },
2568 };
2569 
2570 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = {
2571     .ext = CPU_CFG_OFFSET(ext_zvknc),
2572     .implied_multi_exts = {
2573         CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc),
2574 
2575         RISCV_IMPLIED_EXTS_RULE_END
2576     },
2577 };
2578 
2579 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = {
2580     .ext = CPU_CFG_OFFSET(ext_zvkng),
2581     .implied_multi_exts = {
2582         CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg),
2583 
2584         RISCV_IMPLIED_EXTS_RULE_END
2585     },
2586 };
2587 
2588 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = {
2589     .ext = CPU_CFG_OFFSET(ext_zvknhb),
2590     .implied_multi_exts = {
2591         CPU_CFG_OFFSET(ext_zve64x),
2592 
2593         RISCV_IMPLIED_EXTS_RULE_END
2594     },
2595 };
2596 
2597 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = {
2598     .ext = CPU_CFG_OFFSET(ext_zvks),
2599     .implied_multi_exts = {
2600         CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh),
2601         CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
2602 
2603         RISCV_IMPLIED_EXTS_RULE_END
2604     },
2605 };
2606 
2607 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = {
2608     .ext = CPU_CFG_OFFSET(ext_zvksc),
2609     .implied_multi_exts = {
2610         CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc),
2611 
2612         RISCV_IMPLIED_EXTS_RULE_END
2613     },
2614 };
2615 
2616 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = {
2617     .ext = CPU_CFG_OFFSET(ext_zvksg),
2618     .implied_multi_exts = {
2619         CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg),
2620 
2621         RISCV_IMPLIED_EXTS_RULE_END
2622     },
2623 };
2624 
2625 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = {
2626     &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED,
2627     &RVM_IMPLIED, &RVV_IMPLIED, NULL
2628 };
2629 
2630 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = {
2631     &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED,
2632     &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED,
2633     &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED,
2634     &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED,
2635     &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED,
2636     &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED,
2637     &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED,
2638     &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED,
2639     &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED,
2640     &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED,
2641     &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED,
2642     &ZVKS_IMPLIED,  &ZVKSC_IMPLIED, &ZVKSG_IMPLIED,
2643     NULL
2644 };
2645 
2646 static Property riscv_cpu_properties[] = {
2647     DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
2648 
2649     {.name = "pmu-mask", .info = &prop_pmu_mask},
2650     {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */
2651 
2652     {.name = "mmu", .info = &prop_mmu},
2653     {.name = "pmp", .info = &prop_pmp},
2654 
2655     {.name = "priv_spec", .info = &prop_priv_spec},
2656     {.name = "vext_spec", .info = &prop_vext_spec},
2657 
2658     {.name = "vlen", .info = &prop_vlen},
2659     {.name = "elen", .info = &prop_elen},
2660 
2661     {.name = "cbom_blocksize", .info = &prop_cbom_blksize},
2662     {.name = "cbop_blocksize", .info = &prop_cbop_blksize},
2663     {.name = "cboz_blocksize", .info = &prop_cboz_blksize},
2664 
2665      {.name = "mvendorid", .info = &prop_mvendorid},
2666      {.name = "mimpid", .info = &prop_mimpid},
2667      {.name = "marchid", .info = &prop_marchid},
2668 
2669 #ifndef CONFIG_USER_ONLY
2670     DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
2671 #endif
2672 
2673     DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
2674 
2675     DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
2676     DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
2677     DEFINE_PROP_BOOL("rvv_vl_half_avl", RISCVCPU, cfg.rvv_vl_half_avl, false),
2678 
2679     /*
2680      * write_misa() is marked as experimental for now so mark
2681      * it with -x and default to 'false'.
2682      */
2683     DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
2684     DEFINE_PROP_END_OF_LIST(),
2685 };
2686 
2687 #if defined(TARGET_RISCV64)
2688 static void rva22u64_profile_cpu_init(Object *obj)
2689 {
2690     rv64i_bare_cpu_init(obj);
2691 
2692     RVA22U64.enabled = true;
2693 }
2694 
2695 static void rva22s64_profile_cpu_init(Object *obj)
2696 {
2697     rv64i_bare_cpu_init(obj);
2698 
2699     RVA22S64.enabled = true;
2700 }
2701 #endif
2702 
2703 static const gchar *riscv_gdb_arch_name(CPUState *cs)
2704 {
2705     RISCVCPU *cpu = RISCV_CPU(cs);
2706     CPURISCVState *env = &cpu->env;
2707 
2708     switch (riscv_cpu_mxl(env)) {
2709     case MXL_RV32:
2710         return "riscv:rv32";
2711     case MXL_RV64:
2712     case MXL_RV128:
2713         return "riscv:rv64";
2714     default:
2715         g_assert_not_reached();
2716     }
2717 }
2718 
2719 #ifndef CONFIG_USER_ONLY
2720 static int64_t riscv_get_arch_id(CPUState *cs)
2721 {
2722     RISCVCPU *cpu = RISCV_CPU(cs);
2723 
2724     return cpu->env.mhartid;
2725 }
2726 
2727 #include "hw/core/sysemu-cpu-ops.h"
2728 
2729 static const struct SysemuCPUOps riscv_sysemu_ops = {
2730     .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
2731     .write_elf64_note = riscv_cpu_write_elf64_note,
2732     .write_elf32_note = riscv_cpu_write_elf32_note,
2733     .legacy_vmsd = &vmstate_riscv_cpu,
2734 };
2735 #endif
2736 
2737 static void riscv_cpu_common_class_init(ObjectClass *c, void *data)
2738 {
2739     RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2740     CPUClass *cc = CPU_CLASS(c);
2741     DeviceClass *dc = DEVICE_CLASS(c);
2742     ResettableClass *rc = RESETTABLE_CLASS(c);
2743 
2744     device_class_set_parent_realize(dc, riscv_cpu_realize,
2745                                     &mcc->parent_realize);
2746 
2747     resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL,
2748                                        &mcc->parent_phases);
2749 
2750     cc->class_by_name = riscv_cpu_class_by_name;
2751     cc->has_work = riscv_cpu_has_work;
2752     cc->mmu_index = riscv_cpu_mmu_index;
2753     cc->dump_state = riscv_cpu_dump_state;
2754     cc->set_pc = riscv_cpu_set_pc;
2755     cc->get_pc = riscv_cpu_get_pc;
2756     cc->gdb_read_register = riscv_cpu_gdb_read_register;
2757     cc->gdb_write_register = riscv_cpu_gdb_write_register;
2758     cc->gdb_stop_before_watchpoint = true;
2759     cc->disas_set_info = riscv_cpu_disas_set_info;
2760 #ifndef CONFIG_USER_ONLY
2761     cc->sysemu_ops = &riscv_sysemu_ops;
2762     cc->get_arch_id = riscv_get_arch_id;
2763 #endif
2764     cc->gdb_arch_name = riscv_gdb_arch_name;
2765 
2766     device_class_set_props(dc, riscv_cpu_properties);
2767 }
2768 
2769 static void riscv_cpu_class_init(ObjectClass *c, void *data)
2770 {
2771     RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2772 
2773     mcc->misa_mxl_max = (uint32_t)(uintptr_t)data;
2774     riscv_cpu_validate_misa_mxl(mcc);
2775 }
2776 
2777 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
2778                                  int max_str_len)
2779 {
2780     const RISCVIsaExtData *edata;
2781     char *old = *isa_str;
2782     char *new = *isa_str;
2783 
2784     for (edata = isa_edata_arr; edata && edata->name; edata++) {
2785         if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2786             new = g_strconcat(old, "_", edata->name, NULL);
2787             g_free(old);
2788             old = new;
2789         }
2790     }
2791 
2792     *isa_str = new;
2793 }
2794 
2795 char *riscv_isa_string(RISCVCPU *cpu)
2796 {
2797     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
2798     int i;
2799     const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
2800     char *isa_str = g_new(char, maxlen);
2801     int xlen = riscv_cpu_max_xlen(mcc);
2802     char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen);
2803 
2804     for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2805         if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2806             *p++ = qemu_tolower(riscv_single_letter_exts[i]);
2807         }
2808     }
2809     *p = '\0';
2810     if (!cpu->cfg.short_isa_string) {
2811         riscv_isa_string_ext(cpu, &isa_str, maxlen);
2812     }
2813     return isa_str;
2814 }
2815 
2816 #ifndef CONFIG_USER_ONLY
2817 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count)
2818 {
2819     int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr);
2820     char **extensions = g_new(char *, maxlen);
2821 
2822     for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2823         if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2824             extensions[*count] = g_new(char, 2);
2825             snprintf(extensions[*count], 2, "%c",
2826                      qemu_tolower(riscv_single_letter_exts[i]));
2827             (*count)++;
2828         }
2829     }
2830 
2831     for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) {
2832         if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2833             extensions[*count] = g_strdup(edata->name);
2834             (*count)++;
2835         }
2836     }
2837 
2838     return extensions;
2839 }
2840 
2841 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename)
2842 {
2843     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
2844     const size_t maxlen = sizeof("rv128i");
2845     g_autofree char *isa_base = g_new(char, maxlen);
2846     g_autofree char *riscv_isa;
2847     char **isa_extensions;
2848     int count = 0;
2849     int xlen = riscv_cpu_max_xlen(mcc);
2850 
2851     riscv_isa = riscv_isa_string(cpu);
2852     qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa);
2853 
2854     snprintf(isa_base, maxlen, "rv%di", xlen);
2855     qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base);
2856 
2857     isa_extensions = riscv_isa_extensions_list(cpu, &count);
2858     qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions",
2859                                   isa_extensions, count);
2860 
2861     for (int i = 0; i < count; i++) {
2862         g_free(isa_extensions[i]);
2863     }
2864 
2865     g_free(isa_extensions);
2866 }
2867 #endif
2868 
2869 #define DEFINE_CPU(type_name, misa_mxl_max, initfn)         \
2870     {                                                       \
2871         .name = (type_name),                                \
2872         .parent = TYPE_RISCV_CPU,                           \
2873         .instance_init = (initfn),                          \
2874         .class_init = riscv_cpu_class_init,                 \
2875         .class_data = (void *)(misa_mxl_max)                \
2876     }
2877 
2878 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \
2879     {                                                       \
2880         .name = (type_name),                                \
2881         .parent = TYPE_RISCV_DYNAMIC_CPU,                   \
2882         .instance_init = (initfn),                          \
2883         .class_init = riscv_cpu_class_init,                 \
2884         .class_data = (void *)(misa_mxl_max)                \
2885     }
2886 
2887 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn)  \
2888     {                                                       \
2889         .name = (type_name),                                \
2890         .parent = TYPE_RISCV_VENDOR_CPU,                    \
2891         .instance_init = (initfn),                          \
2892         .class_init = riscv_cpu_class_init,                 \
2893         .class_data = (void *)(misa_mxl_max)                \
2894     }
2895 
2896 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn)    \
2897     {                                                       \
2898         .name = (type_name),                                \
2899         .parent = TYPE_RISCV_BARE_CPU,                      \
2900         .instance_init = (initfn),                          \
2901         .class_init = riscv_cpu_class_init,                 \
2902         .class_data = (void *)(misa_mxl_max)                \
2903     }
2904 
2905 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \
2906     {                                                       \
2907         .name = (type_name),                                \
2908         .parent = TYPE_RISCV_BARE_CPU,                      \
2909         .instance_init = (initfn),                          \
2910         .class_init = riscv_cpu_class_init,                 \
2911         .class_data = (void *)(misa_mxl_max)                \
2912     }
2913 
2914 static const TypeInfo riscv_cpu_type_infos[] = {
2915     {
2916         .name = TYPE_RISCV_CPU,
2917         .parent = TYPE_CPU,
2918         .instance_size = sizeof(RISCVCPU),
2919         .instance_align = __alignof(RISCVCPU),
2920         .instance_init = riscv_cpu_init,
2921         .instance_post_init = riscv_cpu_post_init,
2922         .abstract = true,
2923         .class_size = sizeof(RISCVCPUClass),
2924         .class_init = riscv_cpu_common_class_init,
2925     },
2926     {
2927         .name = TYPE_RISCV_DYNAMIC_CPU,
2928         .parent = TYPE_RISCV_CPU,
2929         .abstract = true,
2930     },
2931     {
2932         .name = TYPE_RISCV_VENDOR_CPU,
2933         .parent = TYPE_RISCV_CPU,
2934         .abstract = true,
2935     },
2936     {
2937         .name = TYPE_RISCV_BARE_CPU,
2938         .parent = TYPE_RISCV_CPU,
2939         .instance_init = riscv_bare_cpu_init,
2940         .abstract = true,
2941     },
2942 #if defined(TARGET_RISCV32)
2943     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX,       MXL_RV32,  riscv_max_cpu_init),
2944     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32,    MXL_RV32,  rv32_base_cpu_init),
2945     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX,       MXL_RV32,  rv32_ibex_cpu_init),
2946     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32,  rv32_sifive_e_cpu_init),
2947     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32,  rv32_imafcu_nommu_cpu_init),
2948     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32,  rv32_sifive_u_cpu_init),
2949     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I,        MXL_RV32,  rv32i_bare_cpu_init),
2950     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E,        MXL_RV32,  rv32e_bare_cpu_init),
2951 #elif defined(TARGET_RISCV64)
2952     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX,       MXL_RV64,  riscv_max_cpu_init),
2953     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64,    MXL_RV64,  rv64_base_cpu_init),
2954     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64,  rv64_sifive_e_cpu_init),
2955     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64,  rv64_sifive_u_cpu_init),
2956     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C,   MXL_RV64,  rv64_sifive_u_cpu_init),
2957     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64,  rv64_thead_c906_cpu_init),
2958     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1,  MXL_RV64,  rv64_veyron_v1_cpu_init),
2959 #ifdef CONFIG_TCG
2960     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128,   MXL_RV128, rv128_base_cpu_init),
2961 #endif /* CONFIG_TCG */
2962     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I,        MXL_RV64,  rv64i_bare_cpu_init),
2963     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E,        MXL_RV64,  rv64e_bare_cpu_init),
2964     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64,  MXL_RV64,  rva22u64_profile_cpu_init),
2965     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64,  MXL_RV64,  rva22s64_profile_cpu_init),
2966 #endif /* TARGET_RISCV64 */
2967 };
2968 
2969 DEFINE_TYPES(riscv_cpu_type_infos)
2970