xref: /openbmc/qemu/target/riscv/cpu.c (revision 41f2b94e)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "cpu_vendorid.h"
26 #include "internals.h"
27 #include "exec/exec-all.h"
28 #include "qapi/error.h"
29 #include "qapi/visitor.h"
30 #include "qemu/error-report.h"
31 #include "hw/qdev-properties.h"
32 #include "migration/vmstate.h"
33 #include "fpu/softfloat-helpers.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/tcg.h"
36 #include "kvm/kvm_riscv.h"
37 #include "tcg/tcg-cpu.h"
38 #include "tcg/tcg.h"
39 
40 /* RISC-V CPU definitions */
41 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH";
42 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
43                               RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0};
44 
45 /*
46  * From vector_helper.c
47  * Note that vector data is stored in host-endian 64-bit chunks,
48  * so addressing bytes needs a host-endian fixup.
49  */
50 #if HOST_BIG_ENDIAN
51 #define BYTE(x)   ((x) ^ 7)
52 #else
53 #define BYTE(x)   (x)
54 #endif
55 
56 bool riscv_cpu_is_32bit(RISCVCPU *cpu)
57 {
58     return riscv_cpu_mxl(&cpu->env) == MXL_RV32;
59 }
60 
61 /* Hash that stores general user set numeric options */
62 static GHashTable *general_user_opts;
63 
64 static void cpu_option_add_user_setting(const char *optname, uint32_t value)
65 {
66     g_hash_table_insert(general_user_opts, (gpointer)optname,
67                         GUINT_TO_POINTER(value));
68 }
69 
70 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
71     {#_name, _min_ver, CPU_CFG_OFFSET(_prop)}
72 
73 /*
74  * Here are the ordering rules of extension naming defined by RISC-V
75  * specification :
76  * 1. All extensions should be separated from other multi-letter extensions
77  *    by an underscore.
78  * 2. The first letter following the 'Z' conventionally indicates the most
79  *    closely related alphabetical extension category, IMAFDQLCBKJTPVH.
80  *    If multiple 'Z' extensions are named, they should be ordered first
81  *    by category, then alphabetically within a category.
82  * 3. Standard supervisor-level extensions (starts with 'S') should be
83  *    listed after standard unprivileged extensions.  If multiple
84  *    supervisor-level extensions are listed, they should be ordered
85  *    alphabetically.
86  * 4. Non-standard extensions (starts with 'X') must be listed after all
87  *    standard extensions. They must be separated from other multi-letter
88  *    extensions by an underscore.
89  *
90  * Single letter extensions are checked in riscv_cpu_validate_misa_priv()
91  * instead.
92  */
93 const RISCVIsaExtData isa_edata_arr[] = {
94     ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom),
95     ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop),
96     ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz),
97     ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
98     ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr),
99     ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr),
100     ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei),
101     ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl),
102     ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
103     ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm),
104     ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
105     ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
106     ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
107     ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
108     ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
109     ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh),
110     ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin),
111     ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx),
112     ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx),
113     ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca),
114     ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb),
115     ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf),
116     ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd),
117     ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce),
118     ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp),
119     ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt),
120     ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba),
121     ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb),
122     ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc),
123     ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb),
124     ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc),
125     ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx),
126     ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs),
127     ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk),
128     ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn),
129     ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd),
130     ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne),
131     ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh),
132     ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr),
133     ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks),
134     ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
135     ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
136     ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
137     ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb),
138     ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
139     ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
140     ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
141     ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
142     ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin),
143     ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
144     ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
145     ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
146     ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb),
147     ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg),
148     ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn),
149     ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc),
150     ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
151     ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng),
152     ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
153     ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
154     ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks),
155     ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc),
156     ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed),
157     ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg),
158     ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
159     ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt),
160     ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
161     ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
162     ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
163     ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
164     ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
165     ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
166     ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
167     ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
168     ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
169     ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
170     ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
171     ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt),
172     ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba),
173     ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb),
174     ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs),
175     ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo),
176     ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov),
177     ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx),
178     ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv),
179     ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac),
180     ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx),
181     ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair),
182     ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync),
183     ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
184 
185     DEFINE_PROP_END_OF_LIST(),
186 };
187 
188 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset)
189 {
190     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
191 
192     return *ext_enabled;
193 }
194 
195 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en)
196 {
197     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
198 
199     *ext_enabled = en;
200 }
201 
202 bool riscv_cpu_is_vendor(Object *cpu_obj)
203 {
204     return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL;
205 }
206 
207 const char * const riscv_int_regnames[] = {
208     "x0/zero", "x1/ra",  "x2/sp",  "x3/gp",  "x4/tp",  "x5/t0",   "x6/t1",
209     "x7/t2",   "x8/s0",  "x9/s1",  "x10/a0", "x11/a1", "x12/a2",  "x13/a3",
210     "x14/a4",  "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3",  "x20/s4",
211     "x21/s5",  "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
212     "x28/t3",  "x29/t4", "x30/t5", "x31/t6"
213 };
214 
215 const char * const riscv_int_regnamesh[] = {
216     "x0h/zeroh", "x1h/rah",  "x2h/sph",   "x3h/gph",   "x4h/tph",  "x5h/t0h",
217     "x6h/t1h",   "x7h/t2h",  "x8h/s0h",   "x9h/s1h",   "x10h/a0h", "x11h/a1h",
218     "x12h/a2h",  "x13h/a3h", "x14h/a4h",  "x15h/a5h",  "x16h/a6h", "x17h/a7h",
219     "x18h/s2h",  "x19h/s3h", "x20h/s4h",  "x21h/s5h",  "x22h/s6h", "x23h/s7h",
220     "x24h/s8h",  "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
221     "x30h/t5h",  "x31h/t6h"
222 };
223 
224 const char * const riscv_fpr_regnames[] = {
225     "f0/ft0",   "f1/ft1",  "f2/ft2",   "f3/ft3",   "f4/ft4",  "f5/ft5",
226     "f6/ft6",   "f7/ft7",  "f8/fs0",   "f9/fs1",   "f10/fa0", "f11/fa1",
227     "f12/fa2",  "f13/fa3", "f14/fa4",  "f15/fa5",  "f16/fa6", "f17/fa7",
228     "f18/fs2",  "f19/fs3", "f20/fs4",  "f21/fs5",  "f22/fs6", "f23/fs7",
229     "f24/fs8",  "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
230     "f30/ft10", "f31/ft11"
231 };
232 
233 const char * const riscv_rvv_regnames[] = {
234   "v0",  "v1",  "v2",  "v3",  "v4",  "v5",  "v6",
235   "v7",  "v8",  "v9",  "v10", "v11", "v12", "v13",
236   "v14", "v15", "v16", "v17", "v18", "v19", "v20",
237   "v21", "v22", "v23", "v24", "v25", "v26", "v27",
238   "v28", "v29", "v30", "v31"
239 };
240 
241 static const char * const riscv_excp_names[] = {
242     "misaligned_fetch",
243     "fault_fetch",
244     "illegal_instruction",
245     "breakpoint",
246     "misaligned_load",
247     "fault_load",
248     "misaligned_store",
249     "fault_store",
250     "user_ecall",
251     "supervisor_ecall",
252     "hypervisor_ecall",
253     "machine_ecall",
254     "exec_page_fault",
255     "load_page_fault",
256     "reserved",
257     "store_page_fault",
258     "reserved",
259     "reserved",
260     "reserved",
261     "reserved",
262     "guest_exec_page_fault",
263     "guest_load_page_fault",
264     "reserved",
265     "guest_store_page_fault",
266 };
267 
268 static const char * const riscv_intr_names[] = {
269     "u_software",
270     "s_software",
271     "vs_software",
272     "m_software",
273     "u_timer",
274     "s_timer",
275     "vs_timer",
276     "m_timer",
277     "u_external",
278     "s_external",
279     "vs_external",
280     "m_external",
281     "reserved",
282     "reserved",
283     "reserved",
284     "reserved"
285 };
286 
287 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
288 {
289     if (async) {
290         return (cause < ARRAY_SIZE(riscv_intr_names)) ?
291                riscv_intr_names[cause] : "(unknown)";
292     } else {
293         return (cause < ARRAY_SIZE(riscv_excp_names)) ?
294                riscv_excp_names[cause] : "(unknown)";
295     }
296 }
297 
298 void riscv_cpu_set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext)
299 {
300     env->misa_mxl_max = env->misa_mxl = mxl;
301     env->misa_ext_mask = env->misa_ext = ext;
302 }
303 
304 #ifndef CONFIG_USER_ONLY
305 static uint8_t satp_mode_from_str(const char *satp_mode_str)
306 {
307     if (!strncmp(satp_mode_str, "mbare", 5)) {
308         return VM_1_10_MBARE;
309     }
310 
311     if (!strncmp(satp_mode_str, "sv32", 4)) {
312         return VM_1_10_SV32;
313     }
314 
315     if (!strncmp(satp_mode_str, "sv39", 4)) {
316         return VM_1_10_SV39;
317     }
318 
319     if (!strncmp(satp_mode_str, "sv48", 4)) {
320         return VM_1_10_SV48;
321     }
322 
323     if (!strncmp(satp_mode_str, "sv57", 4)) {
324         return VM_1_10_SV57;
325     }
326 
327     if (!strncmp(satp_mode_str, "sv64", 4)) {
328         return VM_1_10_SV64;
329     }
330 
331     g_assert_not_reached();
332 }
333 
334 uint8_t satp_mode_max_from_map(uint32_t map)
335 {
336     /*
337      * 'map = 0' will make us return (31 - 32), which C will
338      * happily overflow to UINT_MAX. There's no good result to
339      * return if 'map = 0' (e.g. returning 0 will be ambiguous
340      * with the result for 'map = 1').
341      *
342      * Assert out if map = 0. Callers will have to deal with
343      * it outside of this function.
344      */
345     g_assert(map > 0);
346 
347     /* map here has at least one bit set, so no problem with clz */
348     return 31 - __builtin_clz(map);
349 }
350 
351 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
352 {
353     if (is_32_bit) {
354         switch (satp_mode) {
355         case VM_1_10_SV32:
356             return "sv32";
357         case VM_1_10_MBARE:
358             return "none";
359         }
360     } else {
361         switch (satp_mode) {
362         case VM_1_10_SV64:
363             return "sv64";
364         case VM_1_10_SV57:
365             return "sv57";
366         case VM_1_10_SV48:
367             return "sv48";
368         case VM_1_10_SV39:
369             return "sv39";
370         case VM_1_10_MBARE:
371             return "none";
372         }
373     }
374 
375     g_assert_not_reached();
376 }
377 
378 static void set_satp_mode_max_supported(RISCVCPU *cpu,
379                                         uint8_t satp_mode)
380 {
381     bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
382     const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
383 
384     for (int i = 0; i <= satp_mode; ++i) {
385         if (valid_vm[i]) {
386             cpu->cfg.satp_mode.supported |= (1 << i);
387         }
388     }
389 }
390 
391 /* Set the satp mode to the max supported */
392 static void set_satp_mode_default_map(RISCVCPU *cpu)
393 {
394     /*
395      * Bare CPUs do not default to the max available.
396      * Users must set a valid satp_mode in the command
397      * line.
398      */
399     if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) {
400         warn_report("No satp mode set. Defaulting to 'bare'");
401         cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE);
402         return;
403     }
404 
405     cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
406 }
407 #endif
408 
409 static void riscv_any_cpu_init(Object *obj)
410 {
411     RISCVCPU *cpu = RISCV_CPU(obj);
412     CPURISCVState *env = &cpu->env;
413 #if defined(TARGET_RISCV32)
414     riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
415 #elif defined(TARGET_RISCV64)
416     riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
417 #endif
418 
419 #ifndef CONFIG_USER_ONLY
420     set_satp_mode_max_supported(RISCV_CPU(obj),
421         riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ?
422         VM_1_10_SV32 : VM_1_10_SV57);
423 #endif
424 
425     env->priv_ver = PRIV_VERSION_LATEST;
426 
427     /* inherited from parent obj via riscv_cpu_init() */
428     cpu->cfg.ext_zifencei = true;
429     cpu->cfg.ext_zicsr = true;
430     cpu->cfg.mmu = true;
431     cpu->cfg.pmp = true;
432 }
433 
434 static void riscv_max_cpu_init(Object *obj)
435 {
436     RISCVCPU *cpu = RISCV_CPU(obj);
437     CPURISCVState *env = &cpu->env;
438     RISCVMXL mlx = MXL_RV64;
439 
440     cpu->cfg.mmu = true;
441     cpu->cfg.pmp = true;
442 
443 #ifdef TARGET_RISCV32
444     mlx = MXL_RV32;
445 #endif
446     riscv_cpu_set_misa(env, mlx, 0);
447     env->priv_ver = PRIV_VERSION_LATEST;
448 #ifndef CONFIG_USER_ONLY
449     set_satp_mode_max_supported(RISCV_CPU(obj), mlx == MXL_RV32 ?
450                                 VM_1_10_SV32 : VM_1_10_SV57);
451 #endif
452 }
453 
454 #if defined(TARGET_RISCV64)
455 static void rv64_base_cpu_init(Object *obj)
456 {
457     RISCVCPU *cpu = RISCV_CPU(obj);
458     CPURISCVState *env = &cpu->env;
459 
460     cpu->cfg.mmu = true;
461     cpu->cfg.pmp = true;
462 
463     /* We set this in the realise function */
464     riscv_cpu_set_misa(env, MXL_RV64, 0);
465     /* Set latest version of privileged specification */
466     env->priv_ver = PRIV_VERSION_LATEST;
467 #ifndef CONFIG_USER_ONLY
468     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
469 #endif
470 }
471 
472 static void rv64_sifive_u_cpu_init(Object *obj)
473 {
474     RISCVCPU *cpu = RISCV_CPU(obj);
475     CPURISCVState *env = &cpu->env;
476     riscv_cpu_set_misa(env, MXL_RV64,
477                        RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
478     env->priv_ver = PRIV_VERSION_1_10_0;
479 #ifndef CONFIG_USER_ONLY
480     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39);
481 #endif
482 
483     /* inherited from parent obj via riscv_cpu_init() */
484     cpu->cfg.ext_zifencei = true;
485     cpu->cfg.ext_zicsr = true;
486     cpu->cfg.mmu = true;
487     cpu->cfg.pmp = true;
488 }
489 
490 static void rv64_sifive_e_cpu_init(Object *obj)
491 {
492     CPURISCVState *env = &RISCV_CPU(obj)->env;
493     RISCVCPU *cpu = RISCV_CPU(obj);
494 
495     riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU);
496     env->priv_ver = PRIV_VERSION_1_10_0;
497 #ifndef CONFIG_USER_ONLY
498     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
499 #endif
500 
501     /* inherited from parent obj via riscv_cpu_init() */
502     cpu->cfg.ext_zifencei = true;
503     cpu->cfg.ext_zicsr = true;
504     cpu->cfg.pmp = true;
505 }
506 
507 static void rv64_thead_c906_cpu_init(Object *obj)
508 {
509     CPURISCVState *env = &RISCV_CPU(obj)->env;
510     RISCVCPU *cpu = RISCV_CPU(obj);
511 
512     riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU);
513     env->priv_ver = PRIV_VERSION_1_11_0;
514 
515     cpu->cfg.ext_zfa = true;
516     cpu->cfg.ext_zfh = true;
517     cpu->cfg.mmu = true;
518     cpu->cfg.ext_xtheadba = true;
519     cpu->cfg.ext_xtheadbb = true;
520     cpu->cfg.ext_xtheadbs = true;
521     cpu->cfg.ext_xtheadcmo = true;
522     cpu->cfg.ext_xtheadcondmov = true;
523     cpu->cfg.ext_xtheadfmemidx = true;
524     cpu->cfg.ext_xtheadmac = true;
525     cpu->cfg.ext_xtheadmemidx = true;
526     cpu->cfg.ext_xtheadmempair = true;
527     cpu->cfg.ext_xtheadsync = true;
528 
529     cpu->cfg.mvendorid = THEAD_VENDOR_ID;
530 #ifndef CONFIG_USER_ONLY
531     set_satp_mode_max_supported(cpu, VM_1_10_SV39);
532 #endif
533 
534     /* inherited from parent obj via riscv_cpu_init() */
535     cpu->cfg.pmp = true;
536 }
537 
538 static void rv64_veyron_v1_cpu_init(Object *obj)
539 {
540     CPURISCVState *env = &RISCV_CPU(obj)->env;
541     RISCVCPU *cpu = RISCV_CPU(obj);
542 
543     riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH);
544     env->priv_ver = PRIV_VERSION_1_12_0;
545 
546     /* Enable ISA extensions */
547     cpu->cfg.mmu = true;
548     cpu->cfg.ext_zifencei = true;
549     cpu->cfg.ext_zicsr = true;
550     cpu->cfg.pmp = true;
551     cpu->cfg.ext_zicbom = true;
552     cpu->cfg.cbom_blocksize = 64;
553     cpu->cfg.cboz_blocksize = 64;
554     cpu->cfg.ext_zicboz = true;
555     cpu->cfg.ext_smaia = true;
556     cpu->cfg.ext_ssaia = true;
557     cpu->cfg.ext_sscofpmf = true;
558     cpu->cfg.ext_sstc = true;
559     cpu->cfg.ext_svinval = true;
560     cpu->cfg.ext_svnapot = true;
561     cpu->cfg.ext_svpbmt = true;
562     cpu->cfg.ext_smstateen = true;
563     cpu->cfg.ext_zba = true;
564     cpu->cfg.ext_zbb = true;
565     cpu->cfg.ext_zbc = true;
566     cpu->cfg.ext_zbs = true;
567     cpu->cfg.ext_XVentanaCondOps = true;
568 
569     cpu->cfg.mvendorid = VEYRON_V1_MVENDORID;
570     cpu->cfg.marchid = VEYRON_V1_MARCHID;
571     cpu->cfg.mimpid = VEYRON_V1_MIMPID;
572 
573 #ifndef CONFIG_USER_ONLY
574     set_satp_mode_max_supported(cpu, VM_1_10_SV48);
575 #endif
576 }
577 
578 static void rv128_base_cpu_init(Object *obj)
579 {
580     RISCVCPU *cpu = RISCV_CPU(obj);
581     CPURISCVState *env = &cpu->env;
582 
583     if (qemu_tcg_mttcg_enabled()) {
584         /* Missing 128-bit aligned atomics */
585         error_report("128-bit RISC-V currently does not work with Multi "
586                      "Threaded TCG. Please use: -accel tcg,thread=single");
587         exit(EXIT_FAILURE);
588     }
589 
590     cpu->cfg.mmu = true;
591     cpu->cfg.pmp = true;
592 
593     /* We set this in the realise function */
594     riscv_cpu_set_misa(env, MXL_RV128, 0);
595     /* Set latest version of privileged specification */
596     env->priv_ver = PRIV_VERSION_LATEST;
597 #ifndef CONFIG_USER_ONLY
598     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
599 #endif
600 }
601 
602 static void rv64i_bare_cpu_init(Object *obj)
603 {
604     CPURISCVState *env = &RISCV_CPU(obj)->env;
605     riscv_cpu_set_misa(env, MXL_RV64, RVI);
606 
607     /* Remove the defaults from the parent class */
608     RISCV_CPU(obj)->cfg.ext_zicntr = false;
609     RISCV_CPU(obj)->cfg.ext_zihpm = false;
610 
611     /* Set to QEMU's first supported priv version */
612     env->priv_ver = PRIV_VERSION_1_10_0;
613 
614     /*
615      * Support all available satp_mode settings. The default
616      * value will be set to MBARE if the user doesn't set
617      * satp_mode manually (see set_satp_mode_default()).
618      */
619 #ifndef CONFIG_USER_ONLY
620     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV64);
621 #endif
622 }
623 #else
624 static void rv32_base_cpu_init(Object *obj)
625 {
626     RISCVCPU *cpu = RISCV_CPU(obj);
627     CPURISCVState *env = &cpu->env;
628 
629     cpu->cfg.mmu = true;
630     cpu->cfg.pmp = true;
631 
632     /* We set this in the realise function */
633     riscv_cpu_set_misa(env, MXL_RV32, 0);
634     /* Set latest version of privileged specification */
635     env->priv_ver = PRIV_VERSION_LATEST;
636 #ifndef CONFIG_USER_ONLY
637     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
638 #endif
639 }
640 
641 static void rv32_sifive_u_cpu_init(Object *obj)
642 {
643     RISCVCPU *cpu = RISCV_CPU(obj);
644     CPURISCVState *env = &cpu->env;
645     riscv_cpu_set_misa(env, MXL_RV32,
646                        RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
647     env->priv_ver = PRIV_VERSION_1_10_0;
648 #ifndef CONFIG_USER_ONLY
649     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
650 #endif
651 
652     /* inherited from parent obj via riscv_cpu_init() */
653     cpu->cfg.ext_zifencei = true;
654     cpu->cfg.ext_zicsr = true;
655     cpu->cfg.mmu = true;
656     cpu->cfg.pmp = true;
657 }
658 
659 static void rv32_sifive_e_cpu_init(Object *obj)
660 {
661     CPURISCVState *env = &RISCV_CPU(obj)->env;
662     RISCVCPU *cpu = RISCV_CPU(obj);
663 
664     riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU);
665     env->priv_ver = PRIV_VERSION_1_10_0;
666 #ifndef CONFIG_USER_ONLY
667     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
668 #endif
669 
670     /* inherited from parent obj via riscv_cpu_init() */
671     cpu->cfg.ext_zifencei = true;
672     cpu->cfg.ext_zicsr = true;
673     cpu->cfg.pmp = true;
674 }
675 
676 static void rv32_ibex_cpu_init(Object *obj)
677 {
678     CPURISCVState *env = &RISCV_CPU(obj)->env;
679     RISCVCPU *cpu = RISCV_CPU(obj);
680 
681     riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU);
682     env->priv_ver = PRIV_VERSION_1_12_0;
683 #ifndef CONFIG_USER_ONLY
684     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
685 #endif
686     /* inherited from parent obj via riscv_cpu_init() */
687     cpu->cfg.ext_zifencei = true;
688     cpu->cfg.ext_zicsr = true;
689     cpu->cfg.pmp = true;
690     cpu->cfg.ext_smepmp = true;
691 }
692 
693 static void rv32_imafcu_nommu_cpu_init(Object *obj)
694 {
695     CPURISCVState *env = &RISCV_CPU(obj)->env;
696     RISCVCPU *cpu = RISCV_CPU(obj);
697 
698     riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU);
699     env->priv_ver = PRIV_VERSION_1_10_0;
700 #ifndef CONFIG_USER_ONLY
701     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
702 #endif
703 
704     /* inherited from parent obj via riscv_cpu_init() */
705     cpu->cfg.ext_zifencei = true;
706     cpu->cfg.ext_zicsr = true;
707     cpu->cfg.pmp = true;
708 }
709 #endif
710 
711 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
712 {
713     ObjectClass *oc;
714     char *typename;
715     char **cpuname;
716 
717     cpuname = g_strsplit(cpu_model, ",", 1);
718     typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
719     oc = object_class_by_name(typename);
720     g_strfreev(cpuname);
721     g_free(typename);
722 
723     return oc;
724 }
725 
726 char *riscv_cpu_get_name(RISCVCPU *cpu)
727 {
728     RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu);
729     const char *typename = object_class_get_name(OBJECT_CLASS(rcc));
730 
731     g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX));
732 
733     return cpu_model_from_type(typename);
734 }
735 
736 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
737 {
738     RISCVCPU *cpu = RISCV_CPU(cs);
739     CPURISCVState *env = &cpu->env;
740     int i, j;
741     uint8_t *p;
742 
743 #if !defined(CONFIG_USER_ONLY)
744     if (riscv_has_ext(env, RVH)) {
745         qemu_fprintf(f, " %s %d\n", "V      =  ", env->virt_enabled);
746     }
747 #endif
748     qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc      ", env->pc);
749 #ifndef CONFIG_USER_ONLY
750     {
751         static const int dump_csrs[] = {
752             CSR_MHARTID,
753             CSR_MSTATUS,
754             CSR_MSTATUSH,
755             /*
756              * CSR_SSTATUS is intentionally omitted here as its value
757              * can be figured out by looking at CSR_MSTATUS
758              */
759             CSR_HSTATUS,
760             CSR_VSSTATUS,
761             CSR_MIP,
762             CSR_MIE,
763             CSR_MIDELEG,
764             CSR_HIDELEG,
765             CSR_MEDELEG,
766             CSR_HEDELEG,
767             CSR_MTVEC,
768             CSR_STVEC,
769             CSR_VSTVEC,
770             CSR_MEPC,
771             CSR_SEPC,
772             CSR_VSEPC,
773             CSR_MCAUSE,
774             CSR_SCAUSE,
775             CSR_VSCAUSE,
776             CSR_MTVAL,
777             CSR_STVAL,
778             CSR_HTVAL,
779             CSR_MTVAL2,
780             CSR_MSCRATCH,
781             CSR_SSCRATCH,
782             CSR_SATP,
783             CSR_MMTE,
784             CSR_UPMBASE,
785             CSR_UPMMASK,
786             CSR_SPMBASE,
787             CSR_SPMMASK,
788             CSR_MPMBASE,
789             CSR_MPMMASK,
790         };
791 
792         for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
793             int csrno = dump_csrs[i];
794             target_ulong val = 0;
795             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
796 
797             /*
798              * Rely on the smode, hmode, etc, predicates within csr.c
799              * to do the filtering of the registers that are present.
800              */
801             if (res == RISCV_EXCP_NONE) {
802                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
803                              csr_ops[csrno].name, val);
804             }
805         }
806     }
807 #endif
808 
809     for (i = 0; i < 32; i++) {
810         qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
811                      riscv_int_regnames[i], env->gpr[i]);
812         if ((i & 3) == 3) {
813             qemu_fprintf(f, "\n");
814         }
815     }
816     if (flags & CPU_DUMP_FPU) {
817         for (i = 0; i < 32; i++) {
818             qemu_fprintf(f, " %-8s %016" PRIx64,
819                          riscv_fpr_regnames[i], env->fpr[i]);
820             if ((i & 3) == 3) {
821                 qemu_fprintf(f, "\n");
822             }
823         }
824     }
825     if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) {
826         static const int dump_rvv_csrs[] = {
827                     CSR_VSTART,
828                     CSR_VXSAT,
829                     CSR_VXRM,
830                     CSR_VCSR,
831                     CSR_VL,
832                     CSR_VTYPE,
833                     CSR_VLENB,
834                 };
835         for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
836             int csrno = dump_rvv_csrs[i];
837             target_ulong val = 0;
838             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
839 
840             /*
841              * Rely on the smode, hmode, etc, predicates within csr.c
842              * to do the filtering of the registers that are present.
843              */
844             if (res == RISCV_EXCP_NONE) {
845                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
846                              csr_ops[csrno].name, val);
847             }
848         }
849         uint16_t vlenb = cpu->cfg.vlen >> 3;
850 
851         for (i = 0; i < 32; i++) {
852             qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]);
853             p = (uint8_t *)env->vreg;
854             for (j = vlenb - 1 ; j >= 0; j--) {
855                 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j)));
856             }
857             qemu_fprintf(f, "\n");
858         }
859     }
860 }
861 
862 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
863 {
864     RISCVCPU *cpu = RISCV_CPU(cs);
865     CPURISCVState *env = &cpu->env;
866 
867     if (env->xl == MXL_RV32) {
868         env->pc = (int32_t)value;
869     } else {
870         env->pc = value;
871     }
872 }
873 
874 static vaddr riscv_cpu_get_pc(CPUState *cs)
875 {
876     RISCVCPU *cpu = RISCV_CPU(cs);
877     CPURISCVState *env = &cpu->env;
878 
879     /* Match cpu_get_tb_cpu_state. */
880     if (env->xl == MXL_RV32) {
881         return env->pc & UINT32_MAX;
882     }
883     return env->pc;
884 }
885 
886 static bool riscv_cpu_has_work(CPUState *cs)
887 {
888 #ifndef CONFIG_USER_ONLY
889     RISCVCPU *cpu = RISCV_CPU(cs);
890     CPURISCVState *env = &cpu->env;
891     /*
892      * Definition of the WFI instruction requires it to ignore the privilege
893      * mode and delegation registers, but respect individual enables
894      */
895     return riscv_cpu_all_pending(env) != 0 ||
896         riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE ||
897         riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE;
898 #else
899     return true;
900 #endif
901 }
902 
903 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch)
904 {
905     return riscv_env_mmu_index(cpu_env(cs), ifetch);
906 }
907 
908 static void riscv_cpu_reset_hold(Object *obj)
909 {
910 #ifndef CONFIG_USER_ONLY
911     uint8_t iprio;
912     int i, irq, rdzero;
913 #endif
914     CPUState *cs = CPU(obj);
915     RISCVCPU *cpu = RISCV_CPU(cs);
916     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
917     CPURISCVState *env = &cpu->env;
918 
919     if (mcc->parent_phases.hold) {
920         mcc->parent_phases.hold(obj);
921     }
922 #ifndef CONFIG_USER_ONLY
923     env->misa_mxl = env->misa_mxl_max;
924     env->priv = PRV_M;
925     env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
926     if (env->misa_mxl > MXL_RV32) {
927         /*
928          * The reset status of SXL/UXL is undefined, but mstatus is WARL
929          * and we must ensure that the value after init is valid for read.
930          */
931         env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
932         env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
933         if (riscv_has_ext(env, RVH)) {
934             env->vsstatus = set_field(env->vsstatus,
935                                       MSTATUS64_SXL, env->misa_mxl);
936             env->vsstatus = set_field(env->vsstatus,
937                                       MSTATUS64_UXL, env->misa_mxl);
938             env->mstatus_hs = set_field(env->mstatus_hs,
939                                         MSTATUS64_SXL, env->misa_mxl);
940             env->mstatus_hs = set_field(env->mstatus_hs,
941                                         MSTATUS64_UXL, env->misa_mxl);
942         }
943     }
944     env->mcause = 0;
945     env->miclaim = MIP_SGEIP;
946     env->pc = env->resetvec;
947     env->bins = 0;
948     env->two_stage_lookup = false;
949 
950     env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
951                    (cpu->cfg.ext_svadu ? MENVCFG_ADUE : 0);
952     env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) |
953                    (cpu->cfg.ext_svadu ? HENVCFG_ADUE : 0);
954 
955     /* Initialized default priorities of local interrupts. */
956     for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
957         iprio = riscv_cpu_default_priority(i);
958         env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
959         env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
960         env->hviprio[i] = 0;
961     }
962     i = 0;
963     while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
964         if (!rdzero) {
965             env->hviprio[irq] = env->miprio[irq];
966         }
967         i++;
968     }
969     /* mmte is supposed to have pm.current hardwired to 1 */
970     env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
971 
972     /*
973      * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor
974      * extension is enabled.
975      */
976     if (riscv_has_ext(env, RVH)) {
977         env->mideleg |= HS_MODE_INTERRUPTS;
978     }
979 
980     /*
981      * Clear mseccfg and unlock all the PMP entries upon reset.
982      * This is allowed as per the priv and smepmp specifications
983      * and is needed to clear stale entries across reboots.
984      */
985     if (riscv_cpu_cfg(env)->ext_smepmp) {
986         env->mseccfg = 0;
987     }
988 
989     pmp_unlock_entries(env);
990 #endif
991     env->xl = riscv_cpu_mxl(env);
992     riscv_cpu_update_mask(env);
993     cs->exception_index = RISCV_EXCP_NONE;
994     env->load_res = -1;
995     set_default_nan_mode(1, &env->fp_status);
996 
997 #ifndef CONFIG_USER_ONLY
998     if (cpu->cfg.debug) {
999         riscv_trigger_reset_hold(env);
1000     }
1001 
1002     if (kvm_enabled()) {
1003         kvm_riscv_reset_vcpu(cpu);
1004     }
1005 #endif
1006 }
1007 
1008 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
1009 {
1010     RISCVCPU *cpu = RISCV_CPU(s);
1011     CPURISCVState *env = &cpu->env;
1012     info->target_info = &cpu->cfg;
1013 
1014     switch (env->xl) {
1015     case MXL_RV32:
1016         info->print_insn = print_insn_riscv32;
1017         break;
1018     case MXL_RV64:
1019         info->print_insn = print_insn_riscv64;
1020         break;
1021     case MXL_RV128:
1022         info->print_insn = print_insn_riscv128;
1023         break;
1024     default:
1025         g_assert_not_reached();
1026     }
1027 }
1028 
1029 #ifndef CONFIG_USER_ONLY
1030 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
1031 {
1032     bool rv32 = riscv_cpu_is_32bit(cpu);
1033     uint8_t satp_mode_map_max, satp_mode_supported_max;
1034 
1035     /* The CPU wants the OS to decide which satp mode to use */
1036     if (cpu->cfg.satp_mode.supported == 0) {
1037         return;
1038     }
1039 
1040     satp_mode_supported_max =
1041                     satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
1042 
1043     if (cpu->cfg.satp_mode.map == 0) {
1044         if (cpu->cfg.satp_mode.init == 0) {
1045             /* If unset by the user, we fallback to the default satp mode. */
1046             set_satp_mode_default_map(cpu);
1047         } else {
1048             /*
1049              * Find the lowest level that was disabled and then enable the
1050              * first valid level below which can be found in
1051              * valid_vm_1_10_32/64.
1052              */
1053             for (int i = 1; i < 16; ++i) {
1054                 if ((cpu->cfg.satp_mode.init & (1 << i)) &&
1055                     (cpu->cfg.satp_mode.supported & (1 << i))) {
1056                     for (int j = i - 1; j >= 0; --j) {
1057                         if (cpu->cfg.satp_mode.supported & (1 << j)) {
1058                             cpu->cfg.satp_mode.map |= (1 << j);
1059                             break;
1060                         }
1061                     }
1062                     break;
1063                 }
1064             }
1065         }
1066     }
1067 
1068     satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
1069 
1070     /* Make sure the user asked for a supported configuration (HW and qemu) */
1071     if (satp_mode_map_max > satp_mode_supported_max) {
1072         error_setg(errp, "satp_mode %s is higher than hw max capability %s",
1073                    satp_mode_str(satp_mode_map_max, rv32),
1074                    satp_mode_str(satp_mode_supported_max, rv32));
1075         return;
1076     }
1077 
1078     /*
1079      * Make sure the user did not ask for an invalid configuration as per
1080      * the specification.
1081      */
1082     if (!rv32) {
1083         for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1084             if (!(cpu->cfg.satp_mode.map & (1 << i)) &&
1085                 (cpu->cfg.satp_mode.init & (1 << i)) &&
1086                 (cpu->cfg.satp_mode.supported & (1 << i))) {
1087                 error_setg(errp, "cannot disable %s satp mode if %s "
1088                            "is enabled", satp_mode_str(i, false),
1089                            satp_mode_str(satp_mode_map_max, false));
1090                 return;
1091             }
1092         }
1093     }
1094 
1095     /* Finally expand the map so that all valid modes are set */
1096     for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1097         if (cpu->cfg.satp_mode.supported & (1 << i)) {
1098             cpu->cfg.satp_mode.map |= (1 << i);
1099         }
1100     }
1101 }
1102 #endif
1103 
1104 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1105 {
1106     Error *local_err = NULL;
1107 
1108 #ifndef CONFIG_USER_ONLY
1109     riscv_cpu_satp_mode_finalize(cpu, &local_err);
1110     if (local_err != NULL) {
1111         error_propagate(errp, local_err);
1112         return;
1113     }
1114 #endif
1115 
1116     /*
1117      * KVM accel does not have a specialized finalize()
1118      * callback because its extensions are validated
1119      * in the get()/set() callbacks of each property.
1120      */
1121     if (tcg_enabled()) {
1122         riscv_tcg_cpu_finalize_features(cpu, &local_err);
1123         if (local_err != NULL) {
1124             error_propagate(errp, local_err);
1125             return;
1126         }
1127     }
1128 }
1129 
1130 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
1131 {
1132     CPUState *cs = CPU(dev);
1133     RISCVCPU *cpu = RISCV_CPU(dev);
1134     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
1135     Error *local_err = NULL;
1136 
1137     if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) {
1138         warn_report("The 'any' CPU is deprecated and will be "
1139                     "removed in the future.");
1140     }
1141 
1142     cpu_exec_realizefn(cs, &local_err);
1143     if (local_err != NULL) {
1144         error_propagate(errp, local_err);
1145         return;
1146     }
1147 
1148     riscv_cpu_finalize_features(cpu, &local_err);
1149     if (local_err != NULL) {
1150         error_propagate(errp, local_err);
1151         return;
1152     }
1153 
1154     riscv_cpu_register_gdb_regs_for_features(cs);
1155 
1156 #ifndef CONFIG_USER_ONLY
1157     if (cpu->cfg.debug) {
1158         riscv_trigger_realize(&cpu->env);
1159     }
1160 #endif
1161 
1162     qemu_init_vcpu(cs);
1163     cpu_reset(cs);
1164 
1165     mcc->parent_realize(dev, errp);
1166 }
1167 
1168 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu)
1169 {
1170     if (tcg_enabled()) {
1171         return riscv_cpu_tcg_compatible(cpu);
1172     }
1173 
1174     return true;
1175 }
1176 
1177 #ifndef CONFIG_USER_ONLY
1178 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
1179                                void *opaque, Error **errp)
1180 {
1181     RISCVSATPMap *satp_map = opaque;
1182     uint8_t satp = satp_mode_from_str(name);
1183     bool value;
1184 
1185     value = satp_map->map & (1 << satp);
1186 
1187     visit_type_bool(v, name, &value, errp);
1188 }
1189 
1190 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
1191                                void *opaque, Error **errp)
1192 {
1193     RISCVSATPMap *satp_map = opaque;
1194     uint8_t satp = satp_mode_from_str(name);
1195     bool value;
1196 
1197     if (!visit_type_bool(v, name, &value, errp)) {
1198         return;
1199     }
1200 
1201     satp_map->map = deposit32(satp_map->map, satp, 1, value);
1202     satp_map->init |= 1 << satp;
1203 }
1204 
1205 void riscv_add_satp_mode_properties(Object *obj)
1206 {
1207     RISCVCPU *cpu = RISCV_CPU(obj);
1208 
1209     if (cpu->env.misa_mxl == MXL_RV32) {
1210         object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
1211                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1212     } else {
1213         object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
1214                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1215         object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
1216                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1217         object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
1218                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1219         object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
1220                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1221     }
1222 }
1223 
1224 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
1225 {
1226     RISCVCPU *cpu = RISCV_CPU(opaque);
1227     CPURISCVState *env = &cpu->env;
1228 
1229     if (irq < IRQ_LOCAL_MAX) {
1230         switch (irq) {
1231         case IRQ_U_SOFT:
1232         case IRQ_S_SOFT:
1233         case IRQ_VS_SOFT:
1234         case IRQ_M_SOFT:
1235         case IRQ_U_TIMER:
1236         case IRQ_S_TIMER:
1237         case IRQ_VS_TIMER:
1238         case IRQ_M_TIMER:
1239         case IRQ_U_EXT:
1240         case IRQ_VS_EXT:
1241         case IRQ_M_EXT:
1242             if (kvm_enabled()) {
1243                 kvm_riscv_set_irq(cpu, irq, level);
1244             } else {
1245                 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level));
1246             }
1247              break;
1248         case IRQ_S_EXT:
1249             if (kvm_enabled()) {
1250                 kvm_riscv_set_irq(cpu, irq, level);
1251             } else {
1252                 env->external_seip = level;
1253                 riscv_cpu_update_mip(env, 1 << irq,
1254                                      BOOL_TO_MASK(level | env->software_seip));
1255             }
1256             break;
1257         default:
1258             g_assert_not_reached();
1259         }
1260     } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
1261         /* Require H-extension for handling guest local interrupts */
1262         if (!riscv_has_ext(env, RVH)) {
1263             g_assert_not_reached();
1264         }
1265 
1266         /* Compute bit position in HGEIP CSR */
1267         irq = irq - IRQ_LOCAL_MAX + 1;
1268         if (env->geilen < irq) {
1269             g_assert_not_reached();
1270         }
1271 
1272         /* Update HGEIP CSR */
1273         env->hgeip &= ~((target_ulong)1 << irq);
1274         if (level) {
1275             env->hgeip |= (target_ulong)1 << irq;
1276         }
1277 
1278         /* Update mip.SGEIP bit */
1279         riscv_cpu_update_mip(env, MIP_SGEIP,
1280                              BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
1281     } else {
1282         g_assert_not_reached();
1283     }
1284 }
1285 #endif /* CONFIG_USER_ONLY */
1286 
1287 static bool riscv_cpu_is_dynamic(Object *cpu_obj)
1288 {
1289     return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
1290 }
1291 
1292 static void riscv_cpu_post_init(Object *obj)
1293 {
1294     accel_cpu_instance_init(CPU(obj));
1295 }
1296 
1297 static void riscv_cpu_init(Object *obj)
1298 {
1299     RISCVCPU *cpu = RISCV_CPU(obj);
1300 
1301 #ifndef CONFIG_USER_ONLY
1302     qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
1303                       IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
1304 #endif /* CONFIG_USER_ONLY */
1305 
1306     general_user_opts = g_hash_table_new(g_str_hash, g_str_equal);
1307 
1308     /*
1309      * The timer and performance counters extensions were supported
1310      * in QEMU before they were added as discrete extensions in the
1311      * ISA. To keep compatibility we'll always default them to 'true'
1312      * for all CPUs. Each accelerator will decide what to do when
1313      * users disable them.
1314      */
1315     RISCV_CPU(obj)->cfg.ext_zicntr = true;
1316     RISCV_CPU(obj)->cfg.ext_zihpm = true;
1317 
1318     /* Default values for non-bool cpu properties */
1319     cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16);
1320     cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1321 }
1322 
1323 typedef struct misa_ext_info {
1324     const char *name;
1325     const char *description;
1326 } MISAExtInfo;
1327 
1328 #define MISA_INFO_IDX(_bit) \
1329     __builtin_ctz(_bit)
1330 
1331 #define MISA_EXT_INFO(_bit, _propname, _descr) \
1332     [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr}
1333 
1334 static const MISAExtInfo misa_ext_info_arr[] = {
1335     MISA_EXT_INFO(RVA, "a", "Atomic instructions"),
1336     MISA_EXT_INFO(RVC, "c", "Compressed instructions"),
1337     MISA_EXT_INFO(RVD, "d", "Double-precision float point"),
1338     MISA_EXT_INFO(RVF, "f", "Single-precision float point"),
1339     MISA_EXT_INFO(RVI, "i", "Base integer instruction set"),
1340     MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"),
1341     MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"),
1342     MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
1343     MISA_EXT_INFO(RVU, "u", "User-level instructions"),
1344     MISA_EXT_INFO(RVH, "h", "Hypervisor"),
1345     MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
1346     MISA_EXT_INFO(RVV, "v", "Vector operations"),
1347     MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
1348     MISA_EXT_INFO(RVB, "x-b", "Bit manipulation (Zba_Zbb_Zbs)")
1349 };
1350 
1351 static int riscv_validate_misa_info_idx(uint32_t bit)
1352 {
1353     int idx;
1354 
1355     /*
1356      * Our lowest valid input (RVA) is 1 and
1357      * __builtin_ctz() is UB with zero.
1358      */
1359     g_assert(bit != 0);
1360     idx = MISA_INFO_IDX(bit);
1361 
1362     g_assert(idx < ARRAY_SIZE(misa_ext_info_arr));
1363     return idx;
1364 }
1365 
1366 const char *riscv_get_misa_ext_name(uint32_t bit)
1367 {
1368     int idx = riscv_validate_misa_info_idx(bit);
1369     const char *val = misa_ext_info_arr[idx].name;
1370 
1371     g_assert(val != NULL);
1372     return val;
1373 }
1374 
1375 const char *riscv_get_misa_ext_description(uint32_t bit)
1376 {
1377     int idx = riscv_validate_misa_info_idx(bit);
1378     const char *val = misa_ext_info_arr[idx].description;
1379 
1380     g_assert(val != NULL);
1381     return val;
1382 }
1383 
1384 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \
1385     {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
1386      .enabled = _defval}
1387 
1388 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
1389     /* Defaults for standard extensions */
1390     MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
1391     MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
1392     MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
1393     MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
1394     MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
1395     MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false),
1396     MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
1397     MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true),
1398     MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false),
1399     MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false),
1400     MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false),
1401     MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false),
1402     MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false),
1403     MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
1404 
1405     MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
1406     MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
1407     MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
1408     MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
1409     MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false),
1410     MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false),
1411 
1412     MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true),
1413     MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true),
1414 
1415     MULTI_EXT_CFG_BOOL("zba", ext_zba, true),
1416     MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true),
1417     MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true),
1418     MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false),
1419     MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false),
1420     MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false),
1421     MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true),
1422     MULTI_EXT_CFG_BOOL("zk", ext_zk, false),
1423     MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false),
1424     MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false),
1425     MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false),
1426     MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false),
1427     MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false),
1428     MULTI_EXT_CFG_BOOL("zks", ext_zks, false),
1429     MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false),
1430     MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false),
1431     MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false),
1432 
1433     MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false),
1434     MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false),
1435     MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false),
1436     MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false),
1437 
1438     MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true),
1439     MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true),
1440     MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true),
1441 
1442     MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false),
1443 
1444     MULTI_EXT_CFG_BOOL("zca", ext_zca, false),
1445     MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false),
1446     MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false),
1447     MULTI_EXT_CFG_BOOL("zce", ext_zce, false),
1448     MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false),
1449     MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false),
1450     MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false),
1451     MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false),
1452 
1453     /* Vector cryptography extensions */
1454     MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false),
1455     MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false),
1456     MULTI_EXT_CFG_BOOL("zvkb", ext_zvkg, false),
1457     MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false),
1458     MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false),
1459     MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false),
1460     MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false),
1461     MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false),
1462     MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false),
1463     MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false),
1464     MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false),
1465     MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false),
1466     MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false),
1467     MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false),
1468     MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false),
1469     MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false),
1470 
1471     DEFINE_PROP_END_OF_LIST(),
1472 };
1473 
1474 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = {
1475     MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false),
1476     MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false),
1477     MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false),
1478     MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false),
1479     MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false),
1480     MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false),
1481     MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false),
1482     MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false),
1483     MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false),
1484     MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false),
1485     MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false),
1486     MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false),
1487 
1488     DEFINE_PROP_END_OF_LIST(),
1489 };
1490 
1491 /* These are experimental so mark with 'x-' */
1492 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
1493     MULTI_EXT_CFG_BOOL("x-smaia", ext_smaia, false),
1494     MULTI_EXT_CFG_BOOL("x-ssaia", ext_ssaia, false),
1495 
1496     MULTI_EXT_CFG_BOOL("x-zvfh", ext_zvfh, false),
1497     MULTI_EXT_CFG_BOOL("x-zvfhmin", ext_zvfhmin, false),
1498 
1499     MULTI_EXT_CFG_BOOL("x-zfbfmin", ext_zfbfmin, false),
1500     MULTI_EXT_CFG_BOOL("x-zvfbfmin", ext_zvfbfmin, false),
1501     MULTI_EXT_CFG_BOOL("x-zvfbfwma", ext_zvfbfwma, false),
1502 
1503     DEFINE_PROP_END_OF_LIST(),
1504 };
1505 
1506 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
1507     MULTI_EXT_CFG_BOOL("svade", svade, true),
1508     MULTI_EXT_CFG_BOOL("zic64b", zic64b, true),
1509 
1510     DEFINE_PROP_END_OF_LIST(),
1511 };
1512 
1513 /* Deprecated entries marked for future removal */
1514 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = {
1515     MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true),
1516     MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true),
1517     MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true),
1518     MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true),
1519     MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true),
1520     MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true),
1521     MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false),
1522     MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false),
1523     MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false),
1524     MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false),
1525     MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false),
1526 
1527     DEFINE_PROP_END_OF_LIST(),
1528 };
1529 
1530 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname,
1531                              Error **errp)
1532 {
1533     g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1534     error_setg(errp, "CPU '%s' does not allow changing the value of '%s'",
1535                cpuname, propname);
1536 }
1537 
1538 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name,
1539                              void *opaque, Error **errp)
1540 {
1541     RISCVCPU *cpu = RISCV_CPU(obj);
1542     uint8_t pmu_num, curr_pmu_num;
1543     uint32_t pmu_mask;
1544 
1545     visit_type_uint8(v, name, &pmu_num, errp);
1546 
1547     curr_pmu_num = ctpop32(cpu->cfg.pmu_mask);
1548 
1549     if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) {
1550         cpu_set_prop_err(cpu, name, errp);
1551         error_append_hint(errp, "Current '%s' val: %u\n",
1552                           name, curr_pmu_num);
1553         return;
1554     }
1555 
1556     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1557         error_setg(errp, "Number of counters exceeds maximum available");
1558         return;
1559     }
1560 
1561     if (pmu_num == 0) {
1562         pmu_mask = 0;
1563     } else {
1564         pmu_mask = MAKE_64BIT_MASK(3, pmu_num);
1565     }
1566 
1567     warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\"");
1568     cpu->cfg.pmu_mask = pmu_mask;
1569     cpu_option_add_user_setting("pmu-mask", pmu_mask);
1570 }
1571 
1572 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name,
1573                              void *opaque, Error **errp)
1574 {
1575     RISCVCPU *cpu = RISCV_CPU(obj);
1576     uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask);
1577 
1578     visit_type_uint8(v, name, &pmu_num, errp);
1579 }
1580 
1581 static const PropertyInfo prop_pmu_num = {
1582     .name = "pmu-num",
1583     .get = prop_pmu_num_get,
1584     .set = prop_pmu_num_set,
1585 };
1586 
1587 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name,
1588                              void *opaque, Error **errp)
1589 {
1590     RISCVCPU *cpu = RISCV_CPU(obj);
1591     uint32_t value;
1592     uint8_t pmu_num;
1593 
1594     visit_type_uint32(v, name, &value, errp);
1595 
1596     if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) {
1597         cpu_set_prop_err(cpu, name, errp);
1598         error_append_hint(errp, "Current '%s' val: %x\n",
1599                           name, cpu->cfg.pmu_mask);
1600         return;
1601     }
1602 
1603     pmu_num = ctpop32(value);
1604 
1605     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1606         error_setg(errp, "Number of counters exceeds maximum available");
1607         return;
1608     }
1609 
1610     cpu_option_add_user_setting(name, value);
1611     cpu->cfg.pmu_mask = value;
1612 }
1613 
1614 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name,
1615                              void *opaque, Error **errp)
1616 {
1617     uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask;
1618 
1619     visit_type_uint8(v, name, &pmu_mask, errp);
1620 }
1621 
1622 static const PropertyInfo prop_pmu_mask = {
1623     .name = "pmu-mask",
1624     .get = prop_pmu_mask_get,
1625     .set = prop_pmu_mask_set,
1626 };
1627 
1628 static void prop_mmu_set(Object *obj, Visitor *v, const char *name,
1629                          void *opaque, Error **errp)
1630 {
1631     RISCVCPU *cpu = RISCV_CPU(obj);
1632     bool value;
1633 
1634     visit_type_bool(v, name, &value, errp);
1635 
1636     if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) {
1637         cpu_set_prop_err(cpu, "mmu", errp);
1638         return;
1639     }
1640 
1641     cpu_option_add_user_setting(name, value);
1642     cpu->cfg.mmu = value;
1643 }
1644 
1645 static void prop_mmu_get(Object *obj, Visitor *v, const char *name,
1646                          void *opaque, Error **errp)
1647 {
1648     bool value = RISCV_CPU(obj)->cfg.mmu;
1649 
1650     visit_type_bool(v, name, &value, errp);
1651 }
1652 
1653 static const PropertyInfo prop_mmu = {
1654     .name = "mmu",
1655     .get = prop_mmu_get,
1656     .set = prop_mmu_set,
1657 };
1658 
1659 static void prop_pmp_set(Object *obj, Visitor *v, const char *name,
1660                          void *opaque, Error **errp)
1661 {
1662     RISCVCPU *cpu = RISCV_CPU(obj);
1663     bool value;
1664 
1665     visit_type_bool(v, name, &value, errp);
1666 
1667     if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) {
1668         cpu_set_prop_err(cpu, name, errp);
1669         return;
1670     }
1671 
1672     cpu_option_add_user_setting(name, value);
1673     cpu->cfg.pmp = value;
1674 }
1675 
1676 static void prop_pmp_get(Object *obj, Visitor *v, const char *name,
1677                          void *opaque, Error **errp)
1678 {
1679     bool value = RISCV_CPU(obj)->cfg.pmp;
1680 
1681     visit_type_bool(v, name, &value, errp);
1682 }
1683 
1684 static const PropertyInfo prop_pmp = {
1685     .name = "pmp",
1686     .get = prop_pmp_get,
1687     .set = prop_pmp_set,
1688 };
1689 
1690 static int priv_spec_from_str(const char *priv_spec_str)
1691 {
1692     int priv_version = -1;
1693 
1694     if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) {
1695         priv_version = PRIV_VERSION_1_12_0;
1696     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) {
1697         priv_version = PRIV_VERSION_1_11_0;
1698     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) {
1699         priv_version = PRIV_VERSION_1_10_0;
1700     }
1701 
1702     return priv_version;
1703 }
1704 
1705 static const char *priv_spec_to_str(int priv_version)
1706 {
1707     switch (priv_version) {
1708     case PRIV_VERSION_1_10_0:
1709         return PRIV_VER_1_10_0_STR;
1710     case PRIV_VERSION_1_11_0:
1711         return PRIV_VER_1_11_0_STR;
1712     case PRIV_VERSION_1_12_0:
1713         return PRIV_VER_1_12_0_STR;
1714     default:
1715         return NULL;
1716     }
1717 }
1718 
1719 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name,
1720                                void *opaque, Error **errp)
1721 {
1722     RISCVCPU *cpu = RISCV_CPU(obj);
1723     g_autofree char *value = NULL;
1724     int priv_version = -1;
1725 
1726     visit_type_str(v, name, &value, errp);
1727 
1728     priv_version = priv_spec_from_str(value);
1729     if (priv_version < 0) {
1730         error_setg(errp, "Unsupported privilege spec version '%s'", value);
1731         return;
1732     }
1733 
1734     if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) {
1735         cpu_set_prop_err(cpu, name, errp);
1736         error_append_hint(errp, "Current '%s' val: %s\n", name,
1737                           object_property_get_str(obj, name, NULL));
1738         return;
1739     }
1740 
1741     cpu_option_add_user_setting(name, priv_version);
1742     cpu->env.priv_ver = priv_version;
1743 }
1744 
1745 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name,
1746                                void *opaque, Error **errp)
1747 {
1748     RISCVCPU *cpu = RISCV_CPU(obj);
1749     const char *value = priv_spec_to_str(cpu->env.priv_ver);
1750 
1751     visit_type_str(v, name, (char **)&value, errp);
1752 }
1753 
1754 static const PropertyInfo prop_priv_spec = {
1755     .name = "priv_spec",
1756     .get = prop_priv_spec_get,
1757     .set = prop_priv_spec_set,
1758 };
1759 
1760 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name,
1761                                void *opaque, Error **errp)
1762 {
1763     RISCVCPU *cpu = RISCV_CPU(obj);
1764     g_autofree char *value = NULL;
1765 
1766     visit_type_str(v, name, &value, errp);
1767 
1768     if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) {
1769         error_setg(errp, "Unsupported vector spec version '%s'", value);
1770         return;
1771     }
1772 
1773     cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0);
1774     cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1775 }
1776 
1777 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name,
1778                                void *opaque, Error **errp)
1779 {
1780     const char *value = VEXT_VER_1_00_0_STR;
1781 
1782     visit_type_str(v, name, (char **)&value, errp);
1783 }
1784 
1785 static const PropertyInfo prop_vext_spec = {
1786     .name = "vext_spec",
1787     .get = prop_vext_spec_get,
1788     .set = prop_vext_spec_set,
1789 };
1790 
1791 Property riscv_cpu_options[] = {
1792     DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128),
1793     DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64),
1794 
1795     DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64),
1796     DEFINE_PROP_UINT16("cbop_blocksize", RISCVCPU, cfg.cbop_blocksize, 64),
1797     DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64),
1798 
1799     DEFINE_PROP_END_OF_LIST(),
1800 };
1801 
1802 /*
1803  * RVA22U64 defines some 'named features' or 'synthetic extensions'
1804  * that are cache related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa
1805  * and Zicclsm. We do not implement caching in QEMU so we'll consider
1806  * all these named features as always enabled.
1807  *
1808  * There's no riscv,isa update for them (nor for zic64b, despite it
1809  * having a cfg offset) at this moment.
1810  */
1811 static RISCVCPUProfile RVA22U64 = {
1812     .parent = NULL,
1813     .name = "rva22u64",
1814     .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU,
1815     .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
1816     .satp_mode = RISCV_PROFILE_ATTR_UNUSED,
1817     .ext_offsets = {
1818         CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause),
1819         CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb),
1820         CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin),
1821         CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr),
1822         CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom),
1823         CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz),
1824 
1825         /* mandatory named features for this profile */
1826         CPU_CFG_OFFSET(zic64b),
1827 
1828         RISCV_PROFILE_EXT_LIST_END
1829     }
1830 };
1831 
1832 /*
1833  * As with RVA22U64, RVA22S64 also defines 'named features'.
1834  *
1835  * Cache related features that we consider enabled since we don't
1836  * implement cache: Ssccptr
1837  *
1838  * Other named features that we already implement: Sstvecd, Sstvala,
1839  * Sscounterenw
1840  *
1841  * Named features that we need to enable: svade
1842  *
1843  * The remaining features/extensions comes from RVA22U64.
1844  */
1845 static RISCVCPUProfile RVA22S64 = {
1846     .parent = &RVA22U64,
1847     .name = "rva22s64",
1848     .misa_ext = RVS,
1849     .priv_spec = PRIV_VERSION_1_12_0,
1850     .satp_mode = VM_1_10_SV39,
1851     .ext_offsets = {
1852         /* rva22s64 exts */
1853         CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt),
1854         CPU_CFG_OFFSET(ext_svinval),
1855 
1856         /* rva22s64 named features */
1857         CPU_CFG_OFFSET(svade),
1858 
1859         RISCV_PROFILE_EXT_LIST_END
1860     }
1861 };
1862 
1863 RISCVCPUProfile *riscv_profiles[] = {
1864     &RVA22U64,
1865     &RVA22S64,
1866     NULL,
1867 };
1868 
1869 static Property riscv_cpu_properties[] = {
1870     DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
1871 
1872     {.name = "pmu-mask", .info = &prop_pmu_mask},
1873     {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */
1874 
1875     {.name = "mmu", .info = &prop_mmu},
1876     {.name = "pmp", .info = &prop_pmp},
1877 
1878     {.name = "priv_spec", .info = &prop_priv_spec},
1879     {.name = "vext_spec", .info = &prop_vext_spec},
1880 
1881 #ifndef CONFIG_USER_ONLY
1882     DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
1883 #endif
1884 
1885     DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
1886 
1887     DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
1888     DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
1889 
1890     /*
1891      * write_misa() is marked as experimental for now so mark
1892      * it with -x and default to 'false'.
1893      */
1894     DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
1895     DEFINE_PROP_END_OF_LIST(),
1896 };
1897 
1898 #if defined(TARGET_RISCV64)
1899 static void rva22u64_profile_cpu_init(Object *obj)
1900 {
1901     rv64i_bare_cpu_init(obj);
1902 
1903     RVA22U64.enabled = true;
1904 }
1905 
1906 static void rva22s64_profile_cpu_init(Object *obj)
1907 {
1908     rv64i_bare_cpu_init(obj);
1909 
1910     RVA22S64.enabled = true;
1911 }
1912 #endif
1913 
1914 static const gchar *riscv_gdb_arch_name(CPUState *cs)
1915 {
1916     RISCVCPU *cpu = RISCV_CPU(cs);
1917     CPURISCVState *env = &cpu->env;
1918 
1919     switch (riscv_cpu_mxl(env)) {
1920     case MXL_RV32:
1921         return "riscv:rv32";
1922     case MXL_RV64:
1923     case MXL_RV128:
1924         return "riscv:rv64";
1925     default:
1926         g_assert_not_reached();
1927     }
1928 }
1929 
1930 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
1931 {
1932     RISCVCPU *cpu = RISCV_CPU(cs);
1933 
1934     if (strcmp(xmlname, "riscv-csr.xml") == 0) {
1935         return cpu->dyn_csr_xml;
1936     } else if (strcmp(xmlname, "riscv-vector.xml") == 0) {
1937         return cpu->dyn_vreg_xml;
1938     }
1939 
1940     return NULL;
1941 }
1942 
1943 #ifndef CONFIG_USER_ONLY
1944 static int64_t riscv_get_arch_id(CPUState *cs)
1945 {
1946     RISCVCPU *cpu = RISCV_CPU(cs);
1947 
1948     return cpu->env.mhartid;
1949 }
1950 
1951 #include "hw/core/sysemu-cpu-ops.h"
1952 
1953 static const struct SysemuCPUOps riscv_sysemu_ops = {
1954     .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
1955     .write_elf64_note = riscv_cpu_write_elf64_note,
1956     .write_elf32_note = riscv_cpu_write_elf32_note,
1957     .legacy_vmsd = &vmstate_riscv_cpu,
1958 };
1959 #endif
1960 
1961 static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name,
1962                               void *opaque, Error **errp)
1963 {
1964     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
1965     RISCVCPU *cpu = RISCV_CPU(obj);
1966     uint32_t prev_val = cpu->cfg.mvendorid;
1967     uint32_t value;
1968 
1969     if (!visit_type_uint32(v, name, &value, errp)) {
1970         return;
1971     }
1972 
1973     if (!dynamic_cpu && prev_val != value) {
1974         error_setg(errp, "Unable to change %s mvendorid (0x%x)",
1975                    object_get_typename(obj), prev_val);
1976         return;
1977     }
1978 
1979     cpu->cfg.mvendorid = value;
1980 }
1981 
1982 static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name,
1983                               void *opaque, Error **errp)
1984 {
1985     uint32_t value = RISCV_CPU(obj)->cfg.mvendorid;
1986 
1987     visit_type_uint32(v, name, &value, errp);
1988 }
1989 
1990 static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name,
1991                            void *opaque, Error **errp)
1992 {
1993     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
1994     RISCVCPU *cpu = RISCV_CPU(obj);
1995     uint64_t prev_val = cpu->cfg.mimpid;
1996     uint64_t value;
1997 
1998     if (!visit_type_uint64(v, name, &value, errp)) {
1999         return;
2000     }
2001 
2002     if (!dynamic_cpu && prev_val != value) {
2003         error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")",
2004                    object_get_typename(obj), prev_val);
2005         return;
2006     }
2007 
2008     cpu->cfg.mimpid = value;
2009 }
2010 
2011 static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name,
2012                            void *opaque, Error **errp)
2013 {
2014     uint64_t value = RISCV_CPU(obj)->cfg.mimpid;
2015 
2016     visit_type_uint64(v, name, &value, errp);
2017 }
2018 
2019 static void cpu_set_marchid(Object *obj, Visitor *v, const char *name,
2020                             void *opaque, Error **errp)
2021 {
2022     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2023     RISCVCPU *cpu = RISCV_CPU(obj);
2024     uint64_t prev_val = cpu->cfg.marchid;
2025     uint64_t value, invalid_val;
2026     uint32_t mxlen = 0;
2027 
2028     if (!visit_type_uint64(v, name, &value, errp)) {
2029         return;
2030     }
2031 
2032     if (!dynamic_cpu && prev_val != value) {
2033         error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")",
2034                    object_get_typename(obj), prev_val);
2035         return;
2036     }
2037 
2038     switch (riscv_cpu_mxl(&cpu->env)) {
2039     case MXL_RV32:
2040         mxlen = 32;
2041         break;
2042     case MXL_RV64:
2043     case MXL_RV128:
2044         mxlen = 64;
2045         break;
2046     default:
2047         g_assert_not_reached();
2048     }
2049 
2050     invalid_val = 1LL << (mxlen - 1);
2051 
2052     if (value == invalid_val) {
2053         error_setg(errp, "Unable to set marchid with MSB (%u) bit set "
2054                          "and the remaining bits zero", mxlen);
2055         return;
2056     }
2057 
2058     cpu->cfg.marchid = value;
2059 }
2060 
2061 static void cpu_get_marchid(Object *obj, Visitor *v, const char *name,
2062                            void *opaque, Error **errp)
2063 {
2064     uint64_t value = RISCV_CPU(obj)->cfg.marchid;
2065 
2066     visit_type_uint64(v, name, &value, errp);
2067 }
2068 
2069 static void riscv_cpu_class_init(ObjectClass *c, void *data)
2070 {
2071     RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2072     CPUClass *cc = CPU_CLASS(c);
2073     DeviceClass *dc = DEVICE_CLASS(c);
2074     ResettableClass *rc = RESETTABLE_CLASS(c);
2075 
2076     device_class_set_parent_realize(dc, riscv_cpu_realize,
2077                                     &mcc->parent_realize);
2078 
2079     resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL,
2080                                        &mcc->parent_phases);
2081 
2082     cc->class_by_name = riscv_cpu_class_by_name;
2083     cc->has_work = riscv_cpu_has_work;
2084     cc->mmu_index = riscv_cpu_mmu_index;
2085     cc->dump_state = riscv_cpu_dump_state;
2086     cc->set_pc = riscv_cpu_set_pc;
2087     cc->get_pc = riscv_cpu_get_pc;
2088     cc->gdb_read_register = riscv_cpu_gdb_read_register;
2089     cc->gdb_write_register = riscv_cpu_gdb_write_register;
2090     cc->gdb_num_core_regs = 33;
2091     cc->gdb_stop_before_watchpoint = true;
2092     cc->disas_set_info = riscv_cpu_disas_set_info;
2093 #ifndef CONFIG_USER_ONLY
2094     cc->sysemu_ops = &riscv_sysemu_ops;
2095     cc->get_arch_id = riscv_get_arch_id;
2096 #endif
2097     cc->gdb_arch_name = riscv_gdb_arch_name;
2098     cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml;
2099 
2100     object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid,
2101                               cpu_set_mvendorid, NULL, NULL);
2102 
2103     object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid,
2104                               cpu_set_mimpid, NULL, NULL);
2105 
2106     object_class_property_add(c, "marchid", "uint64", cpu_get_marchid,
2107                               cpu_set_marchid, NULL, NULL);
2108 
2109     device_class_set_props(dc, riscv_cpu_properties);
2110 }
2111 
2112 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
2113                                  int max_str_len)
2114 {
2115     const RISCVIsaExtData *edata;
2116     char *old = *isa_str;
2117     char *new = *isa_str;
2118 
2119     for (edata = isa_edata_arr; edata && edata->name; edata++) {
2120         if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2121             new = g_strconcat(old, "_", edata->name, NULL);
2122             g_free(old);
2123             old = new;
2124         }
2125     }
2126 
2127     *isa_str = new;
2128 }
2129 
2130 char *riscv_isa_string(RISCVCPU *cpu)
2131 {
2132     int i;
2133     const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
2134     char *isa_str = g_new(char, maxlen);
2135     char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS);
2136     for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2137         if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2138             *p++ = qemu_tolower(riscv_single_letter_exts[i]);
2139         }
2140     }
2141     *p = '\0';
2142     if (!cpu->cfg.short_isa_string) {
2143         riscv_isa_string_ext(cpu, &isa_str, maxlen);
2144     }
2145     return isa_str;
2146 }
2147 
2148 #define DEFINE_CPU(type_name, initfn)      \
2149     {                                      \
2150         .name = type_name,                 \
2151         .parent = TYPE_RISCV_CPU,          \
2152         .instance_init = initfn            \
2153     }
2154 
2155 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \
2156     {                                         \
2157         .name = type_name,                    \
2158         .parent = TYPE_RISCV_DYNAMIC_CPU,     \
2159         .instance_init = initfn               \
2160     }
2161 
2162 #define DEFINE_VENDOR_CPU(type_name, initfn) \
2163     {                                        \
2164         .name = type_name,                   \
2165         .parent = TYPE_RISCV_VENDOR_CPU,     \
2166         .instance_init = initfn              \
2167     }
2168 
2169 #define DEFINE_BARE_CPU(type_name, initfn) \
2170     {                                      \
2171         .name = type_name,                 \
2172         .parent = TYPE_RISCV_BARE_CPU,     \
2173         .instance_init = initfn            \
2174     }
2175 
2176 #define DEFINE_PROFILE_CPU(type_name, initfn) \
2177     {                                         \
2178         .name = type_name,                    \
2179         .parent = TYPE_RISCV_BARE_CPU,        \
2180         .instance_init = initfn               \
2181     }
2182 
2183 static const TypeInfo riscv_cpu_type_infos[] = {
2184     {
2185         .name = TYPE_RISCV_CPU,
2186         .parent = TYPE_CPU,
2187         .instance_size = sizeof(RISCVCPU),
2188         .instance_align = __alignof(RISCVCPU),
2189         .instance_init = riscv_cpu_init,
2190         .instance_post_init = riscv_cpu_post_init,
2191         .abstract = true,
2192         .class_size = sizeof(RISCVCPUClass),
2193         .class_init = riscv_cpu_class_init,
2194     },
2195     {
2196         .name = TYPE_RISCV_DYNAMIC_CPU,
2197         .parent = TYPE_RISCV_CPU,
2198         .abstract = true,
2199     },
2200     {
2201         .name = TYPE_RISCV_VENDOR_CPU,
2202         .parent = TYPE_RISCV_CPU,
2203         .abstract = true,
2204     },
2205     {
2206         .name = TYPE_RISCV_BARE_CPU,
2207         .parent = TYPE_RISCV_CPU,
2208         .abstract = true,
2209     },
2210     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY,      riscv_any_cpu_init),
2211     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX,      riscv_max_cpu_init),
2212 #if defined(TARGET_RISCV32)
2213     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32,   rv32_base_cpu_init),
2214     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX,        rv32_ibex_cpu_init),
2215     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31,  rv32_sifive_e_cpu_init),
2216     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34,  rv32_imafcu_nommu_cpu_init),
2217     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34,  rv32_sifive_u_cpu_init),
2218 #elif defined(TARGET_RISCV64)
2219     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64,   rv64_base_cpu_init),
2220     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51,  rv64_sifive_e_cpu_init),
2221     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54,  rv64_sifive_u_cpu_init),
2222     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C,    rv64_sifive_u_cpu_init),
2223     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906,  rv64_thead_c906_cpu_init),
2224     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1,   rv64_veyron_v1_cpu_init),
2225     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128,  rv128_base_cpu_init),
2226     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, rv64i_bare_cpu_init),
2227     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, rva22u64_profile_cpu_init),
2228     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, rva22s64_profile_cpu_init),
2229 #endif
2230 };
2231 
2232 DEFINE_TYPES(riscv_cpu_type_infos)
2233