xref: /openbmc/qemu/target/riscv/cpu.c (revision fae0b533)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "cpu_vendorid.h"
26 #include "internals.h"
27 #include "exec/exec-all.h"
28 #include "qapi/error.h"
29 #include "qapi/visitor.h"
30 #include "qemu/error-report.h"
31 #include "hw/qdev-properties.h"
32 #include "hw/core/qdev-prop-internal.h"
33 #include "migration/vmstate.h"
34 #include "fpu/softfloat-helpers.h"
35 #include "sysemu/kvm.h"
36 #include "sysemu/tcg.h"
37 #include "kvm/kvm_riscv.h"
38 #include "tcg/tcg-cpu.h"
39 #include "tcg/tcg.h"
40 
41 /* RISC-V CPU definitions */
42 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH";
43 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
44                               RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0};
45 
46 /*
47  * From vector_helper.c
48  * Note that vector data is stored in host-endian 64-bit chunks,
49  * so addressing bytes needs a host-endian fixup.
50  */
51 #if HOST_BIG_ENDIAN
52 #define BYTE(x)   ((x) ^ 7)
53 #else
54 #define BYTE(x)   (x)
55 #endif
56 
57 bool riscv_cpu_is_32bit(RISCVCPU *cpu)
58 {
59     return riscv_cpu_mxl(&cpu->env) == MXL_RV32;
60 }
61 
62 /* Hash that stores general user set numeric options */
63 static GHashTable *general_user_opts;
64 
65 static void cpu_option_add_user_setting(const char *optname, uint32_t value)
66 {
67     g_hash_table_insert(general_user_opts, (gpointer)optname,
68                         GUINT_TO_POINTER(value));
69 }
70 
71 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
72     {#_name, _min_ver, CPU_CFG_OFFSET(_prop)}
73 
74 /*
75  * Here are the ordering rules of extension naming defined by RISC-V
76  * specification :
77  * 1. All extensions should be separated from other multi-letter extensions
78  *    by an underscore.
79  * 2. The first letter following the 'Z' conventionally indicates the most
80  *    closely related alphabetical extension category, IMAFDQLCBKJTPVH.
81  *    If multiple 'Z' extensions are named, they should be ordered first
82  *    by category, then alphabetically within a category.
83  * 3. Standard supervisor-level extensions (starts with 'S') should be
84  *    listed after standard unprivileged extensions.  If multiple
85  *    supervisor-level extensions are listed, they should be ordered
86  *    alphabetically.
87  * 4. Non-standard extensions (starts with 'X') must be listed after all
88  *    standard extensions. They must be separated from other multi-letter
89  *    extensions by an underscore.
90  *
91  * Single letter extensions are checked in riscv_cpu_validate_misa_priv()
92  * instead.
93  */
94 const RISCVIsaExtData isa_edata_arr[] = {
95     ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom),
96     ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop),
97     ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz),
98     ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
99     ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr),
100     ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr),
101     ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei),
102     ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl),
103     ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
104     ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm),
105     ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
106     ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
107     ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
108     ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
109     ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
110     ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh),
111     ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin),
112     ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx),
113     ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx),
114     ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca),
115     ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb),
116     ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf),
117     ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd),
118     ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce),
119     ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp),
120     ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt),
121     ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba),
122     ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb),
123     ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc),
124     ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb),
125     ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc),
126     ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx),
127     ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs),
128     ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk),
129     ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn),
130     ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd),
131     ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne),
132     ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh),
133     ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr),
134     ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks),
135     ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
136     ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
137     ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
138     ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb),
139     ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
140     ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
141     ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
142     ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
143     ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin),
144     ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
145     ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
146     ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
147     ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb),
148     ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg),
149     ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn),
150     ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc),
151     ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
152     ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng),
153     ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
154     ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
155     ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks),
156     ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc),
157     ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed),
158     ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg),
159     ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
160     ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt),
161     ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
162     ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
163     ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
164     ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
165     ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
166     ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
167     ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
168     ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
169     ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
170     ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
171     ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
172     ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt),
173     ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba),
174     ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb),
175     ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs),
176     ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo),
177     ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov),
178     ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx),
179     ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv),
180     ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac),
181     ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx),
182     ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair),
183     ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync),
184     ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
185 
186     DEFINE_PROP_END_OF_LIST(),
187 };
188 
189 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset)
190 {
191     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
192 
193     return *ext_enabled;
194 }
195 
196 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en)
197 {
198     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
199 
200     *ext_enabled = en;
201 }
202 
203 bool riscv_cpu_is_vendor(Object *cpu_obj)
204 {
205     return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL;
206 }
207 
208 const char * const riscv_int_regnames[] = {
209     "x0/zero", "x1/ra",  "x2/sp",  "x3/gp",  "x4/tp",  "x5/t0",   "x6/t1",
210     "x7/t2",   "x8/s0",  "x9/s1",  "x10/a0", "x11/a1", "x12/a2",  "x13/a3",
211     "x14/a4",  "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3",  "x20/s4",
212     "x21/s5",  "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
213     "x28/t3",  "x29/t4", "x30/t5", "x31/t6"
214 };
215 
216 const char * const riscv_int_regnamesh[] = {
217     "x0h/zeroh", "x1h/rah",  "x2h/sph",   "x3h/gph",   "x4h/tph",  "x5h/t0h",
218     "x6h/t1h",   "x7h/t2h",  "x8h/s0h",   "x9h/s1h",   "x10h/a0h", "x11h/a1h",
219     "x12h/a2h",  "x13h/a3h", "x14h/a4h",  "x15h/a5h",  "x16h/a6h", "x17h/a7h",
220     "x18h/s2h",  "x19h/s3h", "x20h/s4h",  "x21h/s5h",  "x22h/s6h", "x23h/s7h",
221     "x24h/s8h",  "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
222     "x30h/t5h",  "x31h/t6h"
223 };
224 
225 const char * const riscv_fpr_regnames[] = {
226     "f0/ft0",   "f1/ft1",  "f2/ft2",   "f3/ft3",   "f4/ft4",  "f5/ft5",
227     "f6/ft6",   "f7/ft7",  "f8/fs0",   "f9/fs1",   "f10/fa0", "f11/fa1",
228     "f12/fa2",  "f13/fa3", "f14/fa4",  "f15/fa5",  "f16/fa6", "f17/fa7",
229     "f18/fs2",  "f19/fs3", "f20/fs4",  "f21/fs5",  "f22/fs6", "f23/fs7",
230     "f24/fs8",  "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
231     "f30/ft10", "f31/ft11"
232 };
233 
234 const char * const riscv_rvv_regnames[] = {
235   "v0",  "v1",  "v2",  "v3",  "v4",  "v5",  "v6",
236   "v7",  "v8",  "v9",  "v10", "v11", "v12", "v13",
237   "v14", "v15", "v16", "v17", "v18", "v19", "v20",
238   "v21", "v22", "v23", "v24", "v25", "v26", "v27",
239   "v28", "v29", "v30", "v31"
240 };
241 
242 static const char * const riscv_excp_names[] = {
243     "misaligned_fetch",
244     "fault_fetch",
245     "illegal_instruction",
246     "breakpoint",
247     "misaligned_load",
248     "fault_load",
249     "misaligned_store",
250     "fault_store",
251     "user_ecall",
252     "supervisor_ecall",
253     "hypervisor_ecall",
254     "machine_ecall",
255     "exec_page_fault",
256     "load_page_fault",
257     "reserved",
258     "store_page_fault",
259     "reserved",
260     "reserved",
261     "reserved",
262     "reserved",
263     "guest_exec_page_fault",
264     "guest_load_page_fault",
265     "reserved",
266     "guest_store_page_fault",
267 };
268 
269 static const char * const riscv_intr_names[] = {
270     "u_software",
271     "s_software",
272     "vs_software",
273     "m_software",
274     "u_timer",
275     "s_timer",
276     "vs_timer",
277     "m_timer",
278     "u_external",
279     "s_external",
280     "vs_external",
281     "m_external",
282     "reserved",
283     "reserved",
284     "reserved",
285     "reserved"
286 };
287 
288 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
289 {
290     if (async) {
291         return (cause < ARRAY_SIZE(riscv_intr_names)) ?
292                riscv_intr_names[cause] : "(unknown)";
293     } else {
294         return (cause < ARRAY_SIZE(riscv_excp_names)) ?
295                riscv_excp_names[cause] : "(unknown)";
296     }
297 }
298 
299 void riscv_cpu_set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext)
300 {
301     env->misa_mxl_max = env->misa_mxl = mxl;
302     env->misa_ext_mask = env->misa_ext = ext;
303 }
304 
305 #ifndef CONFIG_USER_ONLY
306 static uint8_t satp_mode_from_str(const char *satp_mode_str)
307 {
308     if (!strncmp(satp_mode_str, "mbare", 5)) {
309         return VM_1_10_MBARE;
310     }
311 
312     if (!strncmp(satp_mode_str, "sv32", 4)) {
313         return VM_1_10_SV32;
314     }
315 
316     if (!strncmp(satp_mode_str, "sv39", 4)) {
317         return VM_1_10_SV39;
318     }
319 
320     if (!strncmp(satp_mode_str, "sv48", 4)) {
321         return VM_1_10_SV48;
322     }
323 
324     if (!strncmp(satp_mode_str, "sv57", 4)) {
325         return VM_1_10_SV57;
326     }
327 
328     if (!strncmp(satp_mode_str, "sv64", 4)) {
329         return VM_1_10_SV64;
330     }
331 
332     g_assert_not_reached();
333 }
334 
335 uint8_t satp_mode_max_from_map(uint32_t map)
336 {
337     /*
338      * 'map = 0' will make us return (31 - 32), which C will
339      * happily overflow to UINT_MAX. There's no good result to
340      * return if 'map = 0' (e.g. returning 0 will be ambiguous
341      * with the result for 'map = 1').
342      *
343      * Assert out if map = 0. Callers will have to deal with
344      * it outside of this function.
345      */
346     g_assert(map > 0);
347 
348     /* map here has at least one bit set, so no problem with clz */
349     return 31 - __builtin_clz(map);
350 }
351 
352 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
353 {
354     if (is_32_bit) {
355         switch (satp_mode) {
356         case VM_1_10_SV32:
357             return "sv32";
358         case VM_1_10_MBARE:
359             return "none";
360         }
361     } else {
362         switch (satp_mode) {
363         case VM_1_10_SV64:
364             return "sv64";
365         case VM_1_10_SV57:
366             return "sv57";
367         case VM_1_10_SV48:
368             return "sv48";
369         case VM_1_10_SV39:
370             return "sv39";
371         case VM_1_10_MBARE:
372             return "none";
373         }
374     }
375 
376     g_assert_not_reached();
377 }
378 
379 static void set_satp_mode_max_supported(RISCVCPU *cpu,
380                                         uint8_t satp_mode)
381 {
382     bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
383     const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
384 
385     for (int i = 0; i <= satp_mode; ++i) {
386         if (valid_vm[i]) {
387             cpu->cfg.satp_mode.supported |= (1 << i);
388         }
389     }
390 }
391 
392 /* Set the satp mode to the max supported */
393 static void set_satp_mode_default_map(RISCVCPU *cpu)
394 {
395     /*
396      * Bare CPUs do not default to the max available.
397      * Users must set a valid satp_mode in the command
398      * line.
399      */
400     if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) {
401         warn_report("No satp mode set. Defaulting to 'bare'");
402         cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE);
403         return;
404     }
405 
406     cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
407 }
408 #endif
409 
410 static void riscv_any_cpu_init(Object *obj)
411 {
412     RISCVCPU *cpu = RISCV_CPU(obj);
413     CPURISCVState *env = &cpu->env;
414 #if defined(TARGET_RISCV32)
415     riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
416 #elif defined(TARGET_RISCV64)
417     riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
418 #endif
419 
420 #ifndef CONFIG_USER_ONLY
421     set_satp_mode_max_supported(RISCV_CPU(obj),
422         riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ?
423         VM_1_10_SV32 : VM_1_10_SV57);
424 #endif
425 
426     env->priv_ver = PRIV_VERSION_LATEST;
427 
428     /* inherited from parent obj via riscv_cpu_init() */
429     cpu->cfg.ext_zifencei = true;
430     cpu->cfg.ext_zicsr = true;
431     cpu->cfg.mmu = true;
432     cpu->cfg.pmp = true;
433 }
434 
435 static void riscv_max_cpu_init(Object *obj)
436 {
437     RISCVCPU *cpu = RISCV_CPU(obj);
438     CPURISCVState *env = &cpu->env;
439     RISCVMXL mlx = MXL_RV64;
440 
441     cpu->cfg.mmu = true;
442     cpu->cfg.pmp = true;
443 
444 #ifdef TARGET_RISCV32
445     mlx = MXL_RV32;
446 #endif
447     riscv_cpu_set_misa(env, mlx, 0);
448     env->priv_ver = PRIV_VERSION_LATEST;
449 #ifndef CONFIG_USER_ONLY
450     set_satp_mode_max_supported(RISCV_CPU(obj), mlx == MXL_RV32 ?
451                                 VM_1_10_SV32 : VM_1_10_SV57);
452 #endif
453 }
454 
455 #if defined(TARGET_RISCV64)
456 static void rv64_base_cpu_init(Object *obj)
457 {
458     RISCVCPU *cpu = RISCV_CPU(obj);
459     CPURISCVState *env = &cpu->env;
460 
461     cpu->cfg.mmu = true;
462     cpu->cfg.pmp = true;
463 
464     /* We set this in the realise function */
465     riscv_cpu_set_misa(env, MXL_RV64, 0);
466     /* Set latest version of privileged specification */
467     env->priv_ver = PRIV_VERSION_LATEST;
468 #ifndef CONFIG_USER_ONLY
469     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
470 #endif
471 }
472 
473 static void rv64_sifive_u_cpu_init(Object *obj)
474 {
475     RISCVCPU *cpu = RISCV_CPU(obj);
476     CPURISCVState *env = &cpu->env;
477     riscv_cpu_set_misa(env, MXL_RV64,
478                        RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
479     env->priv_ver = PRIV_VERSION_1_10_0;
480 #ifndef CONFIG_USER_ONLY
481     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39);
482 #endif
483 
484     /* inherited from parent obj via riscv_cpu_init() */
485     cpu->cfg.ext_zifencei = true;
486     cpu->cfg.ext_zicsr = true;
487     cpu->cfg.mmu = true;
488     cpu->cfg.pmp = true;
489 }
490 
491 static void rv64_sifive_e_cpu_init(Object *obj)
492 {
493     CPURISCVState *env = &RISCV_CPU(obj)->env;
494     RISCVCPU *cpu = RISCV_CPU(obj);
495 
496     riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU);
497     env->priv_ver = PRIV_VERSION_1_10_0;
498 #ifndef CONFIG_USER_ONLY
499     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
500 #endif
501 
502     /* inherited from parent obj via riscv_cpu_init() */
503     cpu->cfg.ext_zifencei = true;
504     cpu->cfg.ext_zicsr = true;
505     cpu->cfg.pmp = true;
506 }
507 
508 static void rv64_thead_c906_cpu_init(Object *obj)
509 {
510     CPURISCVState *env = &RISCV_CPU(obj)->env;
511     RISCVCPU *cpu = RISCV_CPU(obj);
512 
513     riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU);
514     env->priv_ver = PRIV_VERSION_1_11_0;
515 
516     cpu->cfg.ext_zfa = true;
517     cpu->cfg.ext_zfh = true;
518     cpu->cfg.mmu = true;
519     cpu->cfg.ext_xtheadba = true;
520     cpu->cfg.ext_xtheadbb = true;
521     cpu->cfg.ext_xtheadbs = true;
522     cpu->cfg.ext_xtheadcmo = true;
523     cpu->cfg.ext_xtheadcondmov = true;
524     cpu->cfg.ext_xtheadfmemidx = true;
525     cpu->cfg.ext_xtheadmac = true;
526     cpu->cfg.ext_xtheadmemidx = true;
527     cpu->cfg.ext_xtheadmempair = true;
528     cpu->cfg.ext_xtheadsync = true;
529 
530     cpu->cfg.mvendorid = THEAD_VENDOR_ID;
531 #ifndef CONFIG_USER_ONLY
532     set_satp_mode_max_supported(cpu, VM_1_10_SV39);
533 #endif
534 
535     /* inherited from parent obj via riscv_cpu_init() */
536     cpu->cfg.pmp = true;
537 }
538 
539 static void rv64_veyron_v1_cpu_init(Object *obj)
540 {
541     CPURISCVState *env = &RISCV_CPU(obj)->env;
542     RISCVCPU *cpu = RISCV_CPU(obj);
543 
544     riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH);
545     env->priv_ver = PRIV_VERSION_1_12_0;
546 
547     /* Enable ISA extensions */
548     cpu->cfg.mmu = true;
549     cpu->cfg.ext_zifencei = true;
550     cpu->cfg.ext_zicsr = true;
551     cpu->cfg.pmp = true;
552     cpu->cfg.ext_zicbom = true;
553     cpu->cfg.cbom_blocksize = 64;
554     cpu->cfg.cboz_blocksize = 64;
555     cpu->cfg.ext_zicboz = true;
556     cpu->cfg.ext_smaia = true;
557     cpu->cfg.ext_ssaia = true;
558     cpu->cfg.ext_sscofpmf = true;
559     cpu->cfg.ext_sstc = true;
560     cpu->cfg.ext_svinval = true;
561     cpu->cfg.ext_svnapot = true;
562     cpu->cfg.ext_svpbmt = true;
563     cpu->cfg.ext_smstateen = true;
564     cpu->cfg.ext_zba = true;
565     cpu->cfg.ext_zbb = true;
566     cpu->cfg.ext_zbc = true;
567     cpu->cfg.ext_zbs = true;
568     cpu->cfg.ext_XVentanaCondOps = true;
569 
570     cpu->cfg.mvendorid = VEYRON_V1_MVENDORID;
571     cpu->cfg.marchid = VEYRON_V1_MARCHID;
572     cpu->cfg.mimpid = VEYRON_V1_MIMPID;
573 
574 #ifndef CONFIG_USER_ONLY
575     set_satp_mode_max_supported(cpu, VM_1_10_SV48);
576 #endif
577 }
578 
579 static void rv128_base_cpu_init(Object *obj)
580 {
581     RISCVCPU *cpu = RISCV_CPU(obj);
582     CPURISCVState *env = &cpu->env;
583 
584     if (qemu_tcg_mttcg_enabled()) {
585         /* Missing 128-bit aligned atomics */
586         error_report("128-bit RISC-V currently does not work with Multi "
587                      "Threaded TCG. Please use: -accel tcg,thread=single");
588         exit(EXIT_FAILURE);
589     }
590 
591     cpu->cfg.mmu = true;
592     cpu->cfg.pmp = true;
593 
594     /* We set this in the realise function */
595     riscv_cpu_set_misa(env, MXL_RV128, 0);
596     /* Set latest version of privileged specification */
597     env->priv_ver = PRIV_VERSION_LATEST;
598 #ifndef CONFIG_USER_ONLY
599     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
600 #endif
601 }
602 
603 static void rv64i_bare_cpu_init(Object *obj)
604 {
605     CPURISCVState *env = &RISCV_CPU(obj)->env;
606     riscv_cpu_set_misa(env, MXL_RV64, RVI);
607 
608     /* Remove the defaults from the parent class */
609     RISCV_CPU(obj)->cfg.ext_zicntr = false;
610     RISCV_CPU(obj)->cfg.ext_zihpm = false;
611 
612     /* Set to QEMU's first supported priv version */
613     env->priv_ver = PRIV_VERSION_1_10_0;
614 
615     /*
616      * Support all available satp_mode settings. The default
617      * value will be set to MBARE if the user doesn't set
618      * satp_mode manually (see set_satp_mode_default()).
619      */
620 #ifndef CONFIG_USER_ONLY
621     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV64);
622 #endif
623 }
624 #else
625 static void rv32_base_cpu_init(Object *obj)
626 {
627     RISCVCPU *cpu = RISCV_CPU(obj);
628     CPURISCVState *env = &cpu->env;
629 
630     cpu->cfg.mmu = true;
631     cpu->cfg.pmp = true;
632 
633     /* We set this in the realise function */
634     riscv_cpu_set_misa(env, MXL_RV32, 0);
635     /* Set latest version of privileged specification */
636     env->priv_ver = PRIV_VERSION_LATEST;
637 #ifndef CONFIG_USER_ONLY
638     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
639 #endif
640 }
641 
642 static void rv32_sifive_u_cpu_init(Object *obj)
643 {
644     RISCVCPU *cpu = RISCV_CPU(obj);
645     CPURISCVState *env = &cpu->env;
646     riscv_cpu_set_misa(env, MXL_RV32,
647                        RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
648     env->priv_ver = PRIV_VERSION_1_10_0;
649 #ifndef CONFIG_USER_ONLY
650     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
651 #endif
652 
653     /* inherited from parent obj via riscv_cpu_init() */
654     cpu->cfg.ext_zifencei = true;
655     cpu->cfg.ext_zicsr = true;
656     cpu->cfg.mmu = true;
657     cpu->cfg.pmp = true;
658 }
659 
660 static void rv32_sifive_e_cpu_init(Object *obj)
661 {
662     CPURISCVState *env = &RISCV_CPU(obj)->env;
663     RISCVCPU *cpu = RISCV_CPU(obj);
664 
665     riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU);
666     env->priv_ver = PRIV_VERSION_1_10_0;
667 #ifndef CONFIG_USER_ONLY
668     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
669 #endif
670 
671     /* inherited from parent obj via riscv_cpu_init() */
672     cpu->cfg.ext_zifencei = true;
673     cpu->cfg.ext_zicsr = true;
674     cpu->cfg.pmp = true;
675 }
676 
677 static void rv32_ibex_cpu_init(Object *obj)
678 {
679     CPURISCVState *env = &RISCV_CPU(obj)->env;
680     RISCVCPU *cpu = RISCV_CPU(obj);
681 
682     riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU);
683     env->priv_ver = PRIV_VERSION_1_12_0;
684 #ifndef CONFIG_USER_ONLY
685     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
686 #endif
687     /* inherited from parent obj via riscv_cpu_init() */
688     cpu->cfg.ext_zifencei = true;
689     cpu->cfg.ext_zicsr = true;
690     cpu->cfg.pmp = true;
691     cpu->cfg.ext_smepmp = true;
692 }
693 
694 static void rv32_imafcu_nommu_cpu_init(Object *obj)
695 {
696     CPURISCVState *env = &RISCV_CPU(obj)->env;
697     RISCVCPU *cpu = RISCV_CPU(obj);
698 
699     riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU);
700     env->priv_ver = PRIV_VERSION_1_10_0;
701 #ifndef CONFIG_USER_ONLY
702     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
703 #endif
704 
705     /* inherited from parent obj via riscv_cpu_init() */
706     cpu->cfg.ext_zifencei = true;
707     cpu->cfg.ext_zicsr = true;
708     cpu->cfg.pmp = true;
709 }
710 #endif
711 
712 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
713 {
714     ObjectClass *oc;
715     char *typename;
716     char **cpuname;
717 
718     cpuname = g_strsplit(cpu_model, ",", 1);
719     typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
720     oc = object_class_by_name(typename);
721     g_strfreev(cpuname);
722     g_free(typename);
723 
724     return oc;
725 }
726 
727 char *riscv_cpu_get_name(RISCVCPU *cpu)
728 {
729     RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu);
730     const char *typename = object_class_get_name(OBJECT_CLASS(rcc));
731 
732     g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX));
733 
734     return cpu_model_from_type(typename);
735 }
736 
737 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
738 {
739     RISCVCPU *cpu = RISCV_CPU(cs);
740     CPURISCVState *env = &cpu->env;
741     int i, j;
742     uint8_t *p;
743 
744 #if !defined(CONFIG_USER_ONLY)
745     if (riscv_has_ext(env, RVH)) {
746         qemu_fprintf(f, " %s %d\n", "V      =  ", env->virt_enabled);
747     }
748 #endif
749     qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc      ", env->pc);
750 #ifndef CONFIG_USER_ONLY
751     {
752         static const int dump_csrs[] = {
753             CSR_MHARTID,
754             CSR_MSTATUS,
755             CSR_MSTATUSH,
756             /*
757              * CSR_SSTATUS is intentionally omitted here as its value
758              * can be figured out by looking at CSR_MSTATUS
759              */
760             CSR_HSTATUS,
761             CSR_VSSTATUS,
762             CSR_MIP,
763             CSR_MIE,
764             CSR_MIDELEG,
765             CSR_HIDELEG,
766             CSR_MEDELEG,
767             CSR_HEDELEG,
768             CSR_MTVEC,
769             CSR_STVEC,
770             CSR_VSTVEC,
771             CSR_MEPC,
772             CSR_SEPC,
773             CSR_VSEPC,
774             CSR_MCAUSE,
775             CSR_SCAUSE,
776             CSR_VSCAUSE,
777             CSR_MTVAL,
778             CSR_STVAL,
779             CSR_HTVAL,
780             CSR_MTVAL2,
781             CSR_MSCRATCH,
782             CSR_SSCRATCH,
783             CSR_SATP,
784             CSR_MMTE,
785             CSR_UPMBASE,
786             CSR_UPMMASK,
787             CSR_SPMBASE,
788             CSR_SPMMASK,
789             CSR_MPMBASE,
790             CSR_MPMMASK,
791         };
792 
793         for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
794             int csrno = dump_csrs[i];
795             target_ulong val = 0;
796             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
797 
798             /*
799              * Rely on the smode, hmode, etc, predicates within csr.c
800              * to do the filtering of the registers that are present.
801              */
802             if (res == RISCV_EXCP_NONE) {
803                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
804                              csr_ops[csrno].name, val);
805             }
806         }
807     }
808 #endif
809 
810     for (i = 0; i < 32; i++) {
811         qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
812                      riscv_int_regnames[i], env->gpr[i]);
813         if ((i & 3) == 3) {
814             qemu_fprintf(f, "\n");
815         }
816     }
817     if (flags & CPU_DUMP_FPU) {
818         for (i = 0; i < 32; i++) {
819             qemu_fprintf(f, " %-8s %016" PRIx64,
820                          riscv_fpr_regnames[i], env->fpr[i]);
821             if ((i & 3) == 3) {
822                 qemu_fprintf(f, "\n");
823             }
824         }
825     }
826     if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) {
827         static const int dump_rvv_csrs[] = {
828                     CSR_VSTART,
829                     CSR_VXSAT,
830                     CSR_VXRM,
831                     CSR_VCSR,
832                     CSR_VL,
833                     CSR_VTYPE,
834                     CSR_VLENB,
835                 };
836         for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
837             int csrno = dump_rvv_csrs[i];
838             target_ulong val = 0;
839             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
840 
841             /*
842              * Rely on the smode, hmode, etc, predicates within csr.c
843              * to do the filtering of the registers that are present.
844              */
845             if (res == RISCV_EXCP_NONE) {
846                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
847                              csr_ops[csrno].name, val);
848             }
849         }
850         uint16_t vlenb = cpu->cfg.vlen >> 3;
851 
852         for (i = 0; i < 32; i++) {
853             qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]);
854             p = (uint8_t *)env->vreg;
855             for (j = vlenb - 1 ; j >= 0; j--) {
856                 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j)));
857             }
858             qemu_fprintf(f, "\n");
859         }
860     }
861 }
862 
863 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
864 {
865     RISCVCPU *cpu = RISCV_CPU(cs);
866     CPURISCVState *env = &cpu->env;
867 
868     if (env->xl == MXL_RV32) {
869         env->pc = (int32_t)value;
870     } else {
871         env->pc = value;
872     }
873 }
874 
875 static vaddr riscv_cpu_get_pc(CPUState *cs)
876 {
877     RISCVCPU *cpu = RISCV_CPU(cs);
878     CPURISCVState *env = &cpu->env;
879 
880     /* Match cpu_get_tb_cpu_state. */
881     if (env->xl == MXL_RV32) {
882         return env->pc & UINT32_MAX;
883     }
884     return env->pc;
885 }
886 
887 static bool riscv_cpu_has_work(CPUState *cs)
888 {
889 #ifndef CONFIG_USER_ONLY
890     RISCVCPU *cpu = RISCV_CPU(cs);
891     CPURISCVState *env = &cpu->env;
892     /*
893      * Definition of the WFI instruction requires it to ignore the privilege
894      * mode and delegation registers, but respect individual enables
895      */
896     return riscv_cpu_all_pending(env) != 0 ||
897         riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE ||
898         riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE;
899 #else
900     return true;
901 #endif
902 }
903 
904 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch)
905 {
906     return riscv_env_mmu_index(cpu_env(cs), ifetch);
907 }
908 
909 static void riscv_cpu_reset_hold(Object *obj)
910 {
911 #ifndef CONFIG_USER_ONLY
912     uint8_t iprio;
913     int i, irq, rdzero;
914 #endif
915     CPUState *cs = CPU(obj);
916     RISCVCPU *cpu = RISCV_CPU(cs);
917     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
918     CPURISCVState *env = &cpu->env;
919 
920     if (mcc->parent_phases.hold) {
921         mcc->parent_phases.hold(obj);
922     }
923 #ifndef CONFIG_USER_ONLY
924     env->misa_mxl = env->misa_mxl_max;
925     env->priv = PRV_M;
926     env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
927     if (env->misa_mxl > MXL_RV32) {
928         /*
929          * The reset status of SXL/UXL is undefined, but mstatus is WARL
930          * and we must ensure that the value after init is valid for read.
931          */
932         env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
933         env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
934         if (riscv_has_ext(env, RVH)) {
935             env->vsstatus = set_field(env->vsstatus,
936                                       MSTATUS64_SXL, env->misa_mxl);
937             env->vsstatus = set_field(env->vsstatus,
938                                       MSTATUS64_UXL, env->misa_mxl);
939             env->mstatus_hs = set_field(env->mstatus_hs,
940                                         MSTATUS64_SXL, env->misa_mxl);
941             env->mstatus_hs = set_field(env->mstatus_hs,
942                                         MSTATUS64_UXL, env->misa_mxl);
943         }
944     }
945     env->mcause = 0;
946     env->miclaim = MIP_SGEIP;
947     env->pc = env->resetvec;
948     env->bins = 0;
949     env->two_stage_lookup = false;
950 
951     env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
952                    (cpu->cfg.ext_svadu ? MENVCFG_ADUE : 0);
953     env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) |
954                    (cpu->cfg.ext_svadu ? HENVCFG_ADUE : 0);
955 
956     /* Initialized default priorities of local interrupts. */
957     for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
958         iprio = riscv_cpu_default_priority(i);
959         env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
960         env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
961         env->hviprio[i] = 0;
962     }
963     i = 0;
964     while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
965         if (!rdzero) {
966             env->hviprio[irq] = env->miprio[irq];
967         }
968         i++;
969     }
970     /* mmte is supposed to have pm.current hardwired to 1 */
971     env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
972 
973     /*
974      * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor
975      * extension is enabled.
976      */
977     if (riscv_has_ext(env, RVH)) {
978         env->mideleg |= HS_MODE_INTERRUPTS;
979     }
980 
981     /*
982      * Clear mseccfg and unlock all the PMP entries upon reset.
983      * This is allowed as per the priv and smepmp specifications
984      * and is needed to clear stale entries across reboots.
985      */
986     if (riscv_cpu_cfg(env)->ext_smepmp) {
987         env->mseccfg = 0;
988     }
989 
990     pmp_unlock_entries(env);
991 #endif
992     env->xl = riscv_cpu_mxl(env);
993     riscv_cpu_update_mask(env);
994     cs->exception_index = RISCV_EXCP_NONE;
995     env->load_res = -1;
996     set_default_nan_mode(1, &env->fp_status);
997 
998 #ifndef CONFIG_USER_ONLY
999     if (cpu->cfg.debug) {
1000         riscv_trigger_reset_hold(env);
1001     }
1002 
1003     if (kvm_enabled()) {
1004         kvm_riscv_reset_vcpu(cpu);
1005     }
1006 #endif
1007 }
1008 
1009 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
1010 {
1011     RISCVCPU *cpu = RISCV_CPU(s);
1012     CPURISCVState *env = &cpu->env;
1013     info->target_info = &cpu->cfg;
1014 
1015     switch (env->xl) {
1016     case MXL_RV32:
1017         info->print_insn = print_insn_riscv32;
1018         break;
1019     case MXL_RV64:
1020         info->print_insn = print_insn_riscv64;
1021         break;
1022     case MXL_RV128:
1023         info->print_insn = print_insn_riscv128;
1024         break;
1025     default:
1026         g_assert_not_reached();
1027     }
1028 }
1029 
1030 #ifndef CONFIG_USER_ONLY
1031 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
1032 {
1033     bool rv32 = riscv_cpu_is_32bit(cpu);
1034     uint8_t satp_mode_map_max, satp_mode_supported_max;
1035 
1036     /* The CPU wants the OS to decide which satp mode to use */
1037     if (cpu->cfg.satp_mode.supported == 0) {
1038         return;
1039     }
1040 
1041     satp_mode_supported_max =
1042                     satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
1043 
1044     if (cpu->cfg.satp_mode.map == 0) {
1045         if (cpu->cfg.satp_mode.init == 0) {
1046             /* If unset by the user, we fallback to the default satp mode. */
1047             set_satp_mode_default_map(cpu);
1048         } else {
1049             /*
1050              * Find the lowest level that was disabled and then enable the
1051              * first valid level below which can be found in
1052              * valid_vm_1_10_32/64.
1053              */
1054             for (int i = 1; i < 16; ++i) {
1055                 if ((cpu->cfg.satp_mode.init & (1 << i)) &&
1056                     (cpu->cfg.satp_mode.supported & (1 << i))) {
1057                     for (int j = i - 1; j >= 0; --j) {
1058                         if (cpu->cfg.satp_mode.supported & (1 << j)) {
1059                             cpu->cfg.satp_mode.map |= (1 << j);
1060                             break;
1061                         }
1062                     }
1063                     break;
1064                 }
1065             }
1066         }
1067     }
1068 
1069     satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
1070 
1071     /* Make sure the user asked for a supported configuration (HW and qemu) */
1072     if (satp_mode_map_max > satp_mode_supported_max) {
1073         error_setg(errp, "satp_mode %s is higher than hw max capability %s",
1074                    satp_mode_str(satp_mode_map_max, rv32),
1075                    satp_mode_str(satp_mode_supported_max, rv32));
1076         return;
1077     }
1078 
1079     /*
1080      * Make sure the user did not ask for an invalid configuration as per
1081      * the specification.
1082      */
1083     if (!rv32) {
1084         for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1085             if (!(cpu->cfg.satp_mode.map & (1 << i)) &&
1086                 (cpu->cfg.satp_mode.init & (1 << i)) &&
1087                 (cpu->cfg.satp_mode.supported & (1 << i))) {
1088                 error_setg(errp, "cannot disable %s satp mode if %s "
1089                            "is enabled", satp_mode_str(i, false),
1090                            satp_mode_str(satp_mode_map_max, false));
1091                 return;
1092             }
1093         }
1094     }
1095 
1096     /* Finally expand the map so that all valid modes are set */
1097     for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1098         if (cpu->cfg.satp_mode.supported & (1 << i)) {
1099             cpu->cfg.satp_mode.map |= (1 << i);
1100         }
1101     }
1102 }
1103 #endif
1104 
1105 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1106 {
1107     Error *local_err = NULL;
1108 
1109 #ifndef CONFIG_USER_ONLY
1110     riscv_cpu_satp_mode_finalize(cpu, &local_err);
1111     if (local_err != NULL) {
1112         error_propagate(errp, local_err);
1113         return;
1114     }
1115 #endif
1116 
1117     /*
1118      * KVM accel does not have a specialized finalize()
1119      * callback because its extensions are validated
1120      * in the get()/set() callbacks of each property.
1121      */
1122     if (tcg_enabled()) {
1123         riscv_tcg_cpu_finalize_features(cpu, &local_err);
1124         if (local_err != NULL) {
1125             error_propagate(errp, local_err);
1126             return;
1127         }
1128     }
1129 }
1130 
1131 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
1132 {
1133     CPUState *cs = CPU(dev);
1134     RISCVCPU *cpu = RISCV_CPU(dev);
1135     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
1136     Error *local_err = NULL;
1137 
1138     if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) {
1139         warn_report("The 'any' CPU is deprecated and will be "
1140                     "removed in the future.");
1141     }
1142 
1143     cpu_exec_realizefn(cs, &local_err);
1144     if (local_err != NULL) {
1145         error_propagate(errp, local_err);
1146         return;
1147     }
1148 
1149     riscv_cpu_finalize_features(cpu, &local_err);
1150     if (local_err != NULL) {
1151         error_propagate(errp, local_err);
1152         return;
1153     }
1154 
1155     riscv_cpu_register_gdb_regs_for_features(cs);
1156 
1157 #ifndef CONFIG_USER_ONLY
1158     if (cpu->cfg.debug) {
1159         riscv_trigger_realize(&cpu->env);
1160     }
1161 #endif
1162 
1163     qemu_init_vcpu(cs);
1164     cpu_reset(cs);
1165 
1166     mcc->parent_realize(dev, errp);
1167 }
1168 
1169 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu)
1170 {
1171     if (tcg_enabled()) {
1172         return riscv_cpu_tcg_compatible(cpu);
1173     }
1174 
1175     return true;
1176 }
1177 
1178 #ifndef CONFIG_USER_ONLY
1179 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
1180                                void *opaque, Error **errp)
1181 {
1182     RISCVSATPMap *satp_map = opaque;
1183     uint8_t satp = satp_mode_from_str(name);
1184     bool value;
1185 
1186     value = satp_map->map & (1 << satp);
1187 
1188     visit_type_bool(v, name, &value, errp);
1189 }
1190 
1191 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
1192                                void *opaque, Error **errp)
1193 {
1194     RISCVSATPMap *satp_map = opaque;
1195     uint8_t satp = satp_mode_from_str(name);
1196     bool value;
1197 
1198     if (!visit_type_bool(v, name, &value, errp)) {
1199         return;
1200     }
1201 
1202     satp_map->map = deposit32(satp_map->map, satp, 1, value);
1203     satp_map->init |= 1 << satp;
1204 }
1205 
1206 void riscv_add_satp_mode_properties(Object *obj)
1207 {
1208     RISCVCPU *cpu = RISCV_CPU(obj);
1209 
1210     if (cpu->env.misa_mxl == MXL_RV32) {
1211         object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
1212                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1213     } else {
1214         object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
1215                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1216         object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
1217                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1218         object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
1219                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1220         object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
1221                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1222     }
1223 }
1224 
1225 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
1226 {
1227     RISCVCPU *cpu = RISCV_CPU(opaque);
1228     CPURISCVState *env = &cpu->env;
1229 
1230     if (irq < IRQ_LOCAL_MAX) {
1231         switch (irq) {
1232         case IRQ_U_SOFT:
1233         case IRQ_S_SOFT:
1234         case IRQ_VS_SOFT:
1235         case IRQ_M_SOFT:
1236         case IRQ_U_TIMER:
1237         case IRQ_S_TIMER:
1238         case IRQ_VS_TIMER:
1239         case IRQ_M_TIMER:
1240         case IRQ_U_EXT:
1241         case IRQ_VS_EXT:
1242         case IRQ_M_EXT:
1243             if (kvm_enabled()) {
1244                 kvm_riscv_set_irq(cpu, irq, level);
1245             } else {
1246                 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level));
1247             }
1248              break;
1249         case IRQ_S_EXT:
1250             if (kvm_enabled()) {
1251                 kvm_riscv_set_irq(cpu, irq, level);
1252             } else {
1253                 env->external_seip = level;
1254                 riscv_cpu_update_mip(env, 1 << irq,
1255                                      BOOL_TO_MASK(level | env->software_seip));
1256             }
1257             break;
1258         default:
1259             g_assert_not_reached();
1260         }
1261     } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
1262         /* Require H-extension for handling guest local interrupts */
1263         if (!riscv_has_ext(env, RVH)) {
1264             g_assert_not_reached();
1265         }
1266 
1267         /* Compute bit position in HGEIP CSR */
1268         irq = irq - IRQ_LOCAL_MAX + 1;
1269         if (env->geilen < irq) {
1270             g_assert_not_reached();
1271         }
1272 
1273         /* Update HGEIP CSR */
1274         env->hgeip &= ~((target_ulong)1 << irq);
1275         if (level) {
1276             env->hgeip |= (target_ulong)1 << irq;
1277         }
1278 
1279         /* Update mip.SGEIP bit */
1280         riscv_cpu_update_mip(env, MIP_SGEIP,
1281                              BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
1282     } else {
1283         g_assert_not_reached();
1284     }
1285 }
1286 #endif /* CONFIG_USER_ONLY */
1287 
1288 static bool riscv_cpu_is_dynamic(Object *cpu_obj)
1289 {
1290     return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
1291 }
1292 
1293 static void riscv_cpu_post_init(Object *obj)
1294 {
1295     accel_cpu_instance_init(CPU(obj));
1296 }
1297 
1298 static void riscv_cpu_init(Object *obj)
1299 {
1300     RISCVCPU *cpu = RISCV_CPU(obj);
1301 
1302 #ifndef CONFIG_USER_ONLY
1303     qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
1304                       IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
1305 #endif /* CONFIG_USER_ONLY */
1306 
1307     general_user_opts = g_hash_table_new(g_str_hash, g_str_equal);
1308 
1309     /*
1310      * The timer and performance counters extensions were supported
1311      * in QEMU before they were added as discrete extensions in the
1312      * ISA. To keep compatibility we'll always default them to 'true'
1313      * for all CPUs. Each accelerator will decide what to do when
1314      * users disable them.
1315      */
1316     RISCV_CPU(obj)->cfg.ext_zicntr = true;
1317     RISCV_CPU(obj)->cfg.ext_zihpm = true;
1318 
1319     /* Default values for non-bool cpu properties */
1320     cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16);
1321     cpu->cfg.vlen = 128;
1322     cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1323 }
1324 
1325 typedef struct misa_ext_info {
1326     const char *name;
1327     const char *description;
1328 } MISAExtInfo;
1329 
1330 #define MISA_INFO_IDX(_bit) \
1331     __builtin_ctz(_bit)
1332 
1333 #define MISA_EXT_INFO(_bit, _propname, _descr) \
1334     [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr}
1335 
1336 static const MISAExtInfo misa_ext_info_arr[] = {
1337     MISA_EXT_INFO(RVA, "a", "Atomic instructions"),
1338     MISA_EXT_INFO(RVC, "c", "Compressed instructions"),
1339     MISA_EXT_INFO(RVD, "d", "Double-precision float point"),
1340     MISA_EXT_INFO(RVF, "f", "Single-precision float point"),
1341     MISA_EXT_INFO(RVI, "i", "Base integer instruction set"),
1342     MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"),
1343     MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"),
1344     MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
1345     MISA_EXT_INFO(RVU, "u", "User-level instructions"),
1346     MISA_EXT_INFO(RVH, "h", "Hypervisor"),
1347     MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
1348     MISA_EXT_INFO(RVV, "v", "Vector operations"),
1349     MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
1350     MISA_EXT_INFO(RVB, "x-b", "Bit manipulation (Zba_Zbb_Zbs)")
1351 };
1352 
1353 static int riscv_validate_misa_info_idx(uint32_t bit)
1354 {
1355     int idx;
1356 
1357     /*
1358      * Our lowest valid input (RVA) is 1 and
1359      * __builtin_ctz() is UB with zero.
1360      */
1361     g_assert(bit != 0);
1362     idx = MISA_INFO_IDX(bit);
1363 
1364     g_assert(idx < ARRAY_SIZE(misa_ext_info_arr));
1365     return idx;
1366 }
1367 
1368 const char *riscv_get_misa_ext_name(uint32_t bit)
1369 {
1370     int idx = riscv_validate_misa_info_idx(bit);
1371     const char *val = misa_ext_info_arr[idx].name;
1372 
1373     g_assert(val != NULL);
1374     return val;
1375 }
1376 
1377 const char *riscv_get_misa_ext_description(uint32_t bit)
1378 {
1379     int idx = riscv_validate_misa_info_idx(bit);
1380     const char *val = misa_ext_info_arr[idx].description;
1381 
1382     g_assert(val != NULL);
1383     return val;
1384 }
1385 
1386 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \
1387     {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
1388      .enabled = _defval}
1389 
1390 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
1391     /* Defaults for standard extensions */
1392     MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
1393     MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
1394     MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
1395     MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
1396     MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
1397     MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false),
1398     MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
1399     MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true),
1400     MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false),
1401     MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false),
1402     MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false),
1403     MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false),
1404     MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false),
1405     MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
1406 
1407     MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
1408     MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
1409     MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
1410     MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
1411     MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false),
1412     MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false),
1413 
1414     MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true),
1415     MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true),
1416 
1417     MULTI_EXT_CFG_BOOL("zba", ext_zba, true),
1418     MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true),
1419     MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true),
1420     MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false),
1421     MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false),
1422     MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false),
1423     MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true),
1424     MULTI_EXT_CFG_BOOL("zk", ext_zk, false),
1425     MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false),
1426     MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false),
1427     MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false),
1428     MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false),
1429     MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false),
1430     MULTI_EXT_CFG_BOOL("zks", ext_zks, false),
1431     MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false),
1432     MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false),
1433     MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false),
1434 
1435     MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false),
1436     MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false),
1437     MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false),
1438     MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false),
1439 
1440     MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true),
1441     MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true),
1442     MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true),
1443 
1444     MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false),
1445 
1446     MULTI_EXT_CFG_BOOL("zca", ext_zca, false),
1447     MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false),
1448     MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false),
1449     MULTI_EXT_CFG_BOOL("zce", ext_zce, false),
1450     MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false),
1451     MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false),
1452     MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false),
1453     MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false),
1454 
1455     /* Vector cryptography extensions */
1456     MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false),
1457     MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false),
1458     MULTI_EXT_CFG_BOOL("zvkb", ext_zvkg, false),
1459     MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false),
1460     MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false),
1461     MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false),
1462     MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false),
1463     MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false),
1464     MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false),
1465     MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false),
1466     MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false),
1467     MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false),
1468     MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false),
1469     MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false),
1470     MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false),
1471     MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false),
1472 
1473     DEFINE_PROP_END_OF_LIST(),
1474 };
1475 
1476 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = {
1477     MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false),
1478     MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false),
1479     MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false),
1480     MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false),
1481     MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false),
1482     MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false),
1483     MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false),
1484     MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false),
1485     MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false),
1486     MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false),
1487     MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false),
1488     MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false),
1489 
1490     DEFINE_PROP_END_OF_LIST(),
1491 };
1492 
1493 /* These are experimental so mark with 'x-' */
1494 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
1495     MULTI_EXT_CFG_BOOL("x-smaia", ext_smaia, false),
1496     MULTI_EXT_CFG_BOOL("x-ssaia", ext_ssaia, false),
1497 
1498     MULTI_EXT_CFG_BOOL("x-zvfh", ext_zvfh, false),
1499     MULTI_EXT_CFG_BOOL("x-zvfhmin", ext_zvfhmin, false),
1500 
1501     MULTI_EXT_CFG_BOOL("x-zfbfmin", ext_zfbfmin, false),
1502     MULTI_EXT_CFG_BOOL("x-zvfbfmin", ext_zvfbfmin, false),
1503     MULTI_EXT_CFG_BOOL("x-zvfbfwma", ext_zvfbfwma, false),
1504 
1505     DEFINE_PROP_END_OF_LIST(),
1506 };
1507 
1508 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
1509     MULTI_EXT_CFG_BOOL("svade", svade, true),
1510     MULTI_EXT_CFG_BOOL("zic64b", zic64b, true),
1511 
1512     DEFINE_PROP_END_OF_LIST(),
1513 };
1514 
1515 /* Deprecated entries marked for future removal */
1516 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = {
1517     MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true),
1518     MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true),
1519     MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true),
1520     MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true),
1521     MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true),
1522     MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true),
1523     MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false),
1524     MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false),
1525     MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false),
1526     MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false),
1527     MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false),
1528 
1529     DEFINE_PROP_END_OF_LIST(),
1530 };
1531 
1532 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname,
1533                              Error **errp)
1534 {
1535     g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1536     error_setg(errp, "CPU '%s' does not allow changing the value of '%s'",
1537                cpuname, propname);
1538 }
1539 
1540 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name,
1541                              void *opaque, Error **errp)
1542 {
1543     RISCVCPU *cpu = RISCV_CPU(obj);
1544     uint8_t pmu_num, curr_pmu_num;
1545     uint32_t pmu_mask;
1546 
1547     visit_type_uint8(v, name, &pmu_num, errp);
1548 
1549     curr_pmu_num = ctpop32(cpu->cfg.pmu_mask);
1550 
1551     if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) {
1552         cpu_set_prop_err(cpu, name, errp);
1553         error_append_hint(errp, "Current '%s' val: %u\n",
1554                           name, curr_pmu_num);
1555         return;
1556     }
1557 
1558     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1559         error_setg(errp, "Number of counters exceeds maximum available");
1560         return;
1561     }
1562 
1563     if (pmu_num == 0) {
1564         pmu_mask = 0;
1565     } else {
1566         pmu_mask = MAKE_64BIT_MASK(3, pmu_num);
1567     }
1568 
1569     warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\"");
1570     cpu->cfg.pmu_mask = pmu_mask;
1571     cpu_option_add_user_setting("pmu-mask", pmu_mask);
1572 }
1573 
1574 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name,
1575                              void *opaque, Error **errp)
1576 {
1577     RISCVCPU *cpu = RISCV_CPU(obj);
1578     uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask);
1579 
1580     visit_type_uint8(v, name, &pmu_num, errp);
1581 }
1582 
1583 static const PropertyInfo prop_pmu_num = {
1584     .name = "pmu-num",
1585     .get = prop_pmu_num_get,
1586     .set = prop_pmu_num_set,
1587 };
1588 
1589 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name,
1590                              void *opaque, Error **errp)
1591 {
1592     RISCVCPU *cpu = RISCV_CPU(obj);
1593     uint32_t value;
1594     uint8_t pmu_num;
1595 
1596     visit_type_uint32(v, name, &value, errp);
1597 
1598     if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) {
1599         cpu_set_prop_err(cpu, name, errp);
1600         error_append_hint(errp, "Current '%s' val: %x\n",
1601                           name, cpu->cfg.pmu_mask);
1602         return;
1603     }
1604 
1605     pmu_num = ctpop32(value);
1606 
1607     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1608         error_setg(errp, "Number of counters exceeds maximum available");
1609         return;
1610     }
1611 
1612     cpu_option_add_user_setting(name, value);
1613     cpu->cfg.pmu_mask = value;
1614 }
1615 
1616 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name,
1617                              void *opaque, Error **errp)
1618 {
1619     uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask;
1620 
1621     visit_type_uint8(v, name, &pmu_mask, errp);
1622 }
1623 
1624 static const PropertyInfo prop_pmu_mask = {
1625     .name = "pmu-mask",
1626     .get = prop_pmu_mask_get,
1627     .set = prop_pmu_mask_set,
1628 };
1629 
1630 static void prop_mmu_set(Object *obj, Visitor *v, const char *name,
1631                          void *opaque, Error **errp)
1632 {
1633     RISCVCPU *cpu = RISCV_CPU(obj);
1634     bool value;
1635 
1636     visit_type_bool(v, name, &value, errp);
1637 
1638     if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) {
1639         cpu_set_prop_err(cpu, "mmu", errp);
1640         return;
1641     }
1642 
1643     cpu_option_add_user_setting(name, value);
1644     cpu->cfg.mmu = value;
1645 }
1646 
1647 static void prop_mmu_get(Object *obj, Visitor *v, const char *name,
1648                          void *opaque, Error **errp)
1649 {
1650     bool value = RISCV_CPU(obj)->cfg.mmu;
1651 
1652     visit_type_bool(v, name, &value, errp);
1653 }
1654 
1655 static const PropertyInfo prop_mmu = {
1656     .name = "mmu",
1657     .get = prop_mmu_get,
1658     .set = prop_mmu_set,
1659 };
1660 
1661 static void prop_pmp_set(Object *obj, Visitor *v, const char *name,
1662                          void *opaque, Error **errp)
1663 {
1664     RISCVCPU *cpu = RISCV_CPU(obj);
1665     bool value;
1666 
1667     visit_type_bool(v, name, &value, errp);
1668 
1669     if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) {
1670         cpu_set_prop_err(cpu, name, errp);
1671         return;
1672     }
1673 
1674     cpu_option_add_user_setting(name, value);
1675     cpu->cfg.pmp = value;
1676 }
1677 
1678 static void prop_pmp_get(Object *obj, Visitor *v, const char *name,
1679                          void *opaque, Error **errp)
1680 {
1681     bool value = RISCV_CPU(obj)->cfg.pmp;
1682 
1683     visit_type_bool(v, name, &value, errp);
1684 }
1685 
1686 static const PropertyInfo prop_pmp = {
1687     .name = "pmp",
1688     .get = prop_pmp_get,
1689     .set = prop_pmp_set,
1690 };
1691 
1692 static int priv_spec_from_str(const char *priv_spec_str)
1693 {
1694     int priv_version = -1;
1695 
1696     if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) {
1697         priv_version = PRIV_VERSION_1_12_0;
1698     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) {
1699         priv_version = PRIV_VERSION_1_11_0;
1700     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) {
1701         priv_version = PRIV_VERSION_1_10_0;
1702     }
1703 
1704     return priv_version;
1705 }
1706 
1707 static const char *priv_spec_to_str(int priv_version)
1708 {
1709     switch (priv_version) {
1710     case PRIV_VERSION_1_10_0:
1711         return PRIV_VER_1_10_0_STR;
1712     case PRIV_VERSION_1_11_0:
1713         return PRIV_VER_1_11_0_STR;
1714     case PRIV_VERSION_1_12_0:
1715         return PRIV_VER_1_12_0_STR;
1716     default:
1717         return NULL;
1718     }
1719 }
1720 
1721 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name,
1722                                void *opaque, Error **errp)
1723 {
1724     RISCVCPU *cpu = RISCV_CPU(obj);
1725     g_autofree char *value = NULL;
1726     int priv_version = -1;
1727 
1728     visit_type_str(v, name, &value, errp);
1729 
1730     priv_version = priv_spec_from_str(value);
1731     if (priv_version < 0) {
1732         error_setg(errp, "Unsupported privilege spec version '%s'", value);
1733         return;
1734     }
1735 
1736     if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) {
1737         cpu_set_prop_err(cpu, name, errp);
1738         error_append_hint(errp, "Current '%s' val: %s\n", name,
1739                           object_property_get_str(obj, name, NULL));
1740         return;
1741     }
1742 
1743     cpu_option_add_user_setting(name, priv_version);
1744     cpu->env.priv_ver = priv_version;
1745 }
1746 
1747 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name,
1748                                void *opaque, Error **errp)
1749 {
1750     RISCVCPU *cpu = RISCV_CPU(obj);
1751     const char *value = priv_spec_to_str(cpu->env.priv_ver);
1752 
1753     visit_type_str(v, name, (char **)&value, errp);
1754 }
1755 
1756 static const PropertyInfo prop_priv_spec = {
1757     .name = "priv_spec",
1758     .get = prop_priv_spec_get,
1759     .set = prop_priv_spec_set,
1760 };
1761 
1762 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name,
1763                                void *opaque, Error **errp)
1764 {
1765     RISCVCPU *cpu = RISCV_CPU(obj);
1766     g_autofree char *value = NULL;
1767 
1768     visit_type_str(v, name, &value, errp);
1769 
1770     if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) {
1771         error_setg(errp, "Unsupported vector spec version '%s'", value);
1772         return;
1773     }
1774 
1775     cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0);
1776     cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1777 }
1778 
1779 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name,
1780                                void *opaque, Error **errp)
1781 {
1782     const char *value = VEXT_VER_1_00_0_STR;
1783 
1784     visit_type_str(v, name, (char **)&value, errp);
1785 }
1786 
1787 static const PropertyInfo prop_vext_spec = {
1788     .name = "vext_spec",
1789     .get = prop_vext_spec_get,
1790     .set = prop_vext_spec_set,
1791 };
1792 
1793 static void prop_vlen_set(Object *obj, Visitor *v, const char *name,
1794                          void *opaque, Error **errp)
1795 {
1796     RISCVCPU *cpu = RISCV_CPU(obj);
1797     uint16_t value;
1798 
1799     if (!visit_type_uint16(v, name, &value, errp)) {
1800         return;
1801     }
1802 
1803     if (!is_power_of_2(value)) {
1804         error_setg(errp, "Vector extension VLEN must be power of 2");
1805         return;
1806     }
1807 
1808     if (value != cpu->cfg.vlen && riscv_cpu_is_vendor(obj)) {
1809         cpu_set_prop_err(cpu, name, errp);
1810         error_append_hint(errp, "Current '%s' val: %u\n",
1811                           name, cpu->cfg.vlen);
1812         return;
1813     }
1814 
1815     cpu_option_add_user_setting(name, value);
1816     cpu->cfg.vlen = value;
1817 }
1818 
1819 static void prop_vlen_get(Object *obj, Visitor *v, const char *name,
1820                          void *opaque, Error **errp)
1821 {
1822     uint16_t value = RISCV_CPU(obj)->cfg.vlen;
1823 
1824     visit_type_uint16(v, name, &value, errp);
1825 }
1826 
1827 static const PropertyInfo prop_vlen = {
1828     .name = "vlen",
1829     .get = prop_vlen_get,
1830     .set = prop_vlen_set,
1831 };
1832 
1833 Property riscv_cpu_options[] = {
1834     DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64),
1835 
1836     DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64),
1837     DEFINE_PROP_UINT16("cbop_blocksize", RISCVCPU, cfg.cbop_blocksize, 64),
1838     DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64),
1839 
1840     DEFINE_PROP_END_OF_LIST(),
1841 };
1842 
1843 /*
1844  * RVA22U64 defines some 'named features' or 'synthetic extensions'
1845  * that are cache related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa
1846  * and Zicclsm. We do not implement caching in QEMU so we'll consider
1847  * all these named features as always enabled.
1848  *
1849  * There's no riscv,isa update for them (nor for zic64b, despite it
1850  * having a cfg offset) at this moment.
1851  */
1852 static RISCVCPUProfile RVA22U64 = {
1853     .parent = NULL,
1854     .name = "rva22u64",
1855     .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU,
1856     .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
1857     .satp_mode = RISCV_PROFILE_ATTR_UNUSED,
1858     .ext_offsets = {
1859         CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause),
1860         CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb),
1861         CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin),
1862         CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr),
1863         CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom),
1864         CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz),
1865 
1866         /* mandatory named features for this profile */
1867         CPU_CFG_OFFSET(zic64b),
1868 
1869         RISCV_PROFILE_EXT_LIST_END
1870     }
1871 };
1872 
1873 /*
1874  * As with RVA22U64, RVA22S64 also defines 'named features'.
1875  *
1876  * Cache related features that we consider enabled since we don't
1877  * implement cache: Ssccptr
1878  *
1879  * Other named features that we already implement: Sstvecd, Sstvala,
1880  * Sscounterenw
1881  *
1882  * Named features that we need to enable: svade
1883  *
1884  * The remaining features/extensions comes from RVA22U64.
1885  */
1886 static RISCVCPUProfile RVA22S64 = {
1887     .parent = &RVA22U64,
1888     .name = "rva22s64",
1889     .misa_ext = RVS,
1890     .priv_spec = PRIV_VERSION_1_12_0,
1891     .satp_mode = VM_1_10_SV39,
1892     .ext_offsets = {
1893         /* rva22s64 exts */
1894         CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt),
1895         CPU_CFG_OFFSET(ext_svinval),
1896 
1897         /* rva22s64 named features */
1898         CPU_CFG_OFFSET(svade),
1899 
1900         RISCV_PROFILE_EXT_LIST_END
1901     }
1902 };
1903 
1904 RISCVCPUProfile *riscv_profiles[] = {
1905     &RVA22U64,
1906     &RVA22S64,
1907     NULL,
1908 };
1909 
1910 static Property riscv_cpu_properties[] = {
1911     DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
1912 
1913     {.name = "pmu-mask", .info = &prop_pmu_mask},
1914     {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */
1915 
1916     {.name = "mmu", .info = &prop_mmu},
1917     {.name = "pmp", .info = &prop_pmp},
1918 
1919     {.name = "priv_spec", .info = &prop_priv_spec},
1920     {.name = "vext_spec", .info = &prop_vext_spec},
1921 
1922     {.name = "vlen", .info = &prop_vlen},
1923 
1924 #ifndef CONFIG_USER_ONLY
1925     DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
1926 #endif
1927 
1928     DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
1929 
1930     DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
1931     DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
1932 
1933     /*
1934      * write_misa() is marked as experimental for now so mark
1935      * it with -x and default to 'false'.
1936      */
1937     DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
1938     DEFINE_PROP_END_OF_LIST(),
1939 };
1940 
1941 #if defined(TARGET_RISCV64)
1942 static void rva22u64_profile_cpu_init(Object *obj)
1943 {
1944     rv64i_bare_cpu_init(obj);
1945 
1946     RVA22U64.enabled = true;
1947 }
1948 
1949 static void rva22s64_profile_cpu_init(Object *obj)
1950 {
1951     rv64i_bare_cpu_init(obj);
1952 
1953     RVA22S64.enabled = true;
1954 }
1955 #endif
1956 
1957 static const gchar *riscv_gdb_arch_name(CPUState *cs)
1958 {
1959     RISCVCPU *cpu = RISCV_CPU(cs);
1960     CPURISCVState *env = &cpu->env;
1961 
1962     switch (riscv_cpu_mxl(env)) {
1963     case MXL_RV32:
1964         return "riscv:rv32";
1965     case MXL_RV64:
1966     case MXL_RV128:
1967         return "riscv:rv64";
1968     default:
1969         g_assert_not_reached();
1970     }
1971 }
1972 
1973 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
1974 {
1975     RISCVCPU *cpu = RISCV_CPU(cs);
1976 
1977     if (strcmp(xmlname, "riscv-csr.xml") == 0) {
1978         return cpu->dyn_csr_xml;
1979     } else if (strcmp(xmlname, "riscv-vector.xml") == 0) {
1980         return cpu->dyn_vreg_xml;
1981     }
1982 
1983     return NULL;
1984 }
1985 
1986 #ifndef CONFIG_USER_ONLY
1987 static int64_t riscv_get_arch_id(CPUState *cs)
1988 {
1989     RISCVCPU *cpu = RISCV_CPU(cs);
1990 
1991     return cpu->env.mhartid;
1992 }
1993 
1994 #include "hw/core/sysemu-cpu-ops.h"
1995 
1996 static const struct SysemuCPUOps riscv_sysemu_ops = {
1997     .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
1998     .write_elf64_note = riscv_cpu_write_elf64_note,
1999     .write_elf32_note = riscv_cpu_write_elf32_note,
2000     .legacy_vmsd = &vmstate_riscv_cpu,
2001 };
2002 #endif
2003 
2004 static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name,
2005                               void *opaque, Error **errp)
2006 {
2007     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2008     RISCVCPU *cpu = RISCV_CPU(obj);
2009     uint32_t prev_val = cpu->cfg.mvendorid;
2010     uint32_t value;
2011 
2012     if (!visit_type_uint32(v, name, &value, errp)) {
2013         return;
2014     }
2015 
2016     if (!dynamic_cpu && prev_val != value) {
2017         error_setg(errp, "Unable to change %s mvendorid (0x%x)",
2018                    object_get_typename(obj), prev_val);
2019         return;
2020     }
2021 
2022     cpu->cfg.mvendorid = value;
2023 }
2024 
2025 static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name,
2026                               void *opaque, Error **errp)
2027 {
2028     uint32_t value = RISCV_CPU(obj)->cfg.mvendorid;
2029 
2030     visit_type_uint32(v, name, &value, errp);
2031 }
2032 
2033 static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name,
2034                            void *opaque, Error **errp)
2035 {
2036     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2037     RISCVCPU *cpu = RISCV_CPU(obj);
2038     uint64_t prev_val = cpu->cfg.mimpid;
2039     uint64_t value;
2040 
2041     if (!visit_type_uint64(v, name, &value, errp)) {
2042         return;
2043     }
2044 
2045     if (!dynamic_cpu && prev_val != value) {
2046         error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")",
2047                    object_get_typename(obj), prev_val);
2048         return;
2049     }
2050 
2051     cpu->cfg.mimpid = value;
2052 }
2053 
2054 static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name,
2055                            void *opaque, Error **errp)
2056 {
2057     uint64_t value = RISCV_CPU(obj)->cfg.mimpid;
2058 
2059     visit_type_uint64(v, name, &value, errp);
2060 }
2061 
2062 static void cpu_set_marchid(Object *obj, Visitor *v, const char *name,
2063                             void *opaque, Error **errp)
2064 {
2065     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2066     RISCVCPU *cpu = RISCV_CPU(obj);
2067     uint64_t prev_val = cpu->cfg.marchid;
2068     uint64_t value, invalid_val;
2069     uint32_t mxlen = 0;
2070 
2071     if (!visit_type_uint64(v, name, &value, errp)) {
2072         return;
2073     }
2074 
2075     if (!dynamic_cpu && prev_val != value) {
2076         error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")",
2077                    object_get_typename(obj), prev_val);
2078         return;
2079     }
2080 
2081     switch (riscv_cpu_mxl(&cpu->env)) {
2082     case MXL_RV32:
2083         mxlen = 32;
2084         break;
2085     case MXL_RV64:
2086     case MXL_RV128:
2087         mxlen = 64;
2088         break;
2089     default:
2090         g_assert_not_reached();
2091     }
2092 
2093     invalid_val = 1LL << (mxlen - 1);
2094 
2095     if (value == invalid_val) {
2096         error_setg(errp, "Unable to set marchid with MSB (%u) bit set "
2097                          "and the remaining bits zero", mxlen);
2098         return;
2099     }
2100 
2101     cpu->cfg.marchid = value;
2102 }
2103 
2104 static void cpu_get_marchid(Object *obj, Visitor *v, const char *name,
2105                            void *opaque, Error **errp)
2106 {
2107     uint64_t value = RISCV_CPU(obj)->cfg.marchid;
2108 
2109     visit_type_uint64(v, name, &value, errp);
2110 }
2111 
2112 static void riscv_cpu_class_init(ObjectClass *c, void *data)
2113 {
2114     RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2115     CPUClass *cc = CPU_CLASS(c);
2116     DeviceClass *dc = DEVICE_CLASS(c);
2117     ResettableClass *rc = RESETTABLE_CLASS(c);
2118 
2119     device_class_set_parent_realize(dc, riscv_cpu_realize,
2120                                     &mcc->parent_realize);
2121 
2122     resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL,
2123                                        &mcc->parent_phases);
2124 
2125     cc->class_by_name = riscv_cpu_class_by_name;
2126     cc->has_work = riscv_cpu_has_work;
2127     cc->mmu_index = riscv_cpu_mmu_index;
2128     cc->dump_state = riscv_cpu_dump_state;
2129     cc->set_pc = riscv_cpu_set_pc;
2130     cc->get_pc = riscv_cpu_get_pc;
2131     cc->gdb_read_register = riscv_cpu_gdb_read_register;
2132     cc->gdb_write_register = riscv_cpu_gdb_write_register;
2133     cc->gdb_num_core_regs = 33;
2134     cc->gdb_stop_before_watchpoint = true;
2135     cc->disas_set_info = riscv_cpu_disas_set_info;
2136 #ifndef CONFIG_USER_ONLY
2137     cc->sysemu_ops = &riscv_sysemu_ops;
2138     cc->get_arch_id = riscv_get_arch_id;
2139 #endif
2140     cc->gdb_arch_name = riscv_gdb_arch_name;
2141     cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml;
2142 
2143     object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid,
2144                               cpu_set_mvendorid, NULL, NULL);
2145 
2146     object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid,
2147                               cpu_set_mimpid, NULL, NULL);
2148 
2149     object_class_property_add(c, "marchid", "uint64", cpu_get_marchid,
2150                               cpu_set_marchid, NULL, NULL);
2151 
2152     device_class_set_props(dc, riscv_cpu_properties);
2153 }
2154 
2155 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
2156                                  int max_str_len)
2157 {
2158     const RISCVIsaExtData *edata;
2159     char *old = *isa_str;
2160     char *new = *isa_str;
2161 
2162     for (edata = isa_edata_arr; edata && edata->name; edata++) {
2163         if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2164             new = g_strconcat(old, "_", edata->name, NULL);
2165             g_free(old);
2166             old = new;
2167         }
2168     }
2169 
2170     *isa_str = new;
2171 }
2172 
2173 char *riscv_isa_string(RISCVCPU *cpu)
2174 {
2175     int i;
2176     const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
2177     char *isa_str = g_new(char, maxlen);
2178     char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS);
2179     for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2180         if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2181             *p++ = qemu_tolower(riscv_single_letter_exts[i]);
2182         }
2183     }
2184     *p = '\0';
2185     if (!cpu->cfg.short_isa_string) {
2186         riscv_isa_string_ext(cpu, &isa_str, maxlen);
2187     }
2188     return isa_str;
2189 }
2190 
2191 #define DEFINE_CPU(type_name, initfn)      \
2192     {                                      \
2193         .name = type_name,                 \
2194         .parent = TYPE_RISCV_CPU,          \
2195         .instance_init = initfn            \
2196     }
2197 
2198 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \
2199     {                                         \
2200         .name = type_name,                    \
2201         .parent = TYPE_RISCV_DYNAMIC_CPU,     \
2202         .instance_init = initfn               \
2203     }
2204 
2205 #define DEFINE_VENDOR_CPU(type_name, initfn) \
2206     {                                        \
2207         .name = type_name,                   \
2208         .parent = TYPE_RISCV_VENDOR_CPU,     \
2209         .instance_init = initfn              \
2210     }
2211 
2212 #define DEFINE_BARE_CPU(type_name, initfn) \
2213     {                                      \
2214         .name = type_name,                 \
2215         .parent = TYPE_RISCV_BARE_CPU,     \
2216         .instance_init = initfn            \
2217     }
2218 
2219 #define DEFINE_PROFILE_CPU(type_name, initfn) \
2220     {                                         \
2221         .name = type_name,                    \
2222         .parent = TYPE_RISCV_BARE_CPU,        \
2223         .instance_init = initfn               \
2224     }
2225 
2226 static const TypeInfo riscv_cpu_type_infos[] = {
2227     {
2228         .name = TYPE_RISCV_CPU,
2229         .parent = TYPE_CPU,
2230         .instance_size = sizeof(RISCVCPU),
2231         .instance_align = __alignof(RISCVCPU),
2232         .instance_init = riscv_cpu_init,
2233         .instance_post_init = riscv_cpu_post_init,
2234         .abstract = true,
2235         .class_size = sizeof(RISCVCPUClass),
2236         .class_init = riscv_cpu_class_init,
2237     },
2238     {
2239         .name = TYPE_RISCV_DYNAMIC_CPU,
2240         .parent = TYPE_RISCV_CPU,
2241         .abstract = true,
2242     },
2243     {
2244         .name = TYPE_RISCV_VENDOR_CPU,
2245         .parent = TYPE_RISCV_CPU,
2246         .abstract = true,
2247     },
2248     {
2249         .name = TYPE_RISCV_BARE_CPU,
2250         .parent = TYPE_RISCV_CPU,
2251         .abstract = true,
2252     },
2253     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY,      riscv_any_cpu_init),
2254     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX,      riscv_max_cpu_init),
2255 #if defined(TARGET_RISCV32)
2256     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32,   rv32_base_cpu_init),
2257     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX,        rv32_ibex_cpu_init),
2258     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31,  rv32_sifive_e_cpu_init),
2259     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34,  rv32_imafcu_nommu_cpu_init),
2260     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34,  rv32_sifive_u_cpu_init),
2261 #elif defined(TARGET_RISCV64)
2262     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64,   rv64_base_cpu_init),
2263     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51,  rv64_sifive_e_cpu_init),
2264     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54,  rv64_sifive_u_cpu_init),
2265     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C,    rv64_sifive_u_cpu_init),
2266     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906,  rv64_thead_c906_cpu_init),
2267     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1,   rv64_veyron_v1_cpu_init),
2268     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128,  rv128_base_cpu_init),
2269     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, rv64i_bare_cpu_init),
2270     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, rva22u64_profile_cpu_init),
2271     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, rva22s64_profile_cpu_init),
2272 #endif
2273 };
2274 
2275 DEFINE_TYPES(riscv_cpu_type_infos)
2276