xref: /openbmc/qemu/target/riscv/cpu.c (revision 11097be4)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "cpu_vendorid.h"
26 #include "internals.h"
27 #include "exec/exec-all.h"
28 #include "qapi/error.h"
29 #include "qapi/visitor.h"
30 #include "qemu/error-report.h"
31 #include "hw/qdev-properties.h"
32 #include "migration/vmstate.h"
33 #include "fpu/softfloat-helpers.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/tcg.h"
36 #include "kvm/kvm_riscv.h"
37 #include "tcg/tcg-cpu.h"
38 #include "tcg/tcg.h"
39 
40 /* RISC-V CPU definitions */
41 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH";
42 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
43                               RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0};
44 
45 /*
46  * From vector_helper.c
47  * Note that vector data is stored in host-endian 64-bit chunks,
48  * so addressing bytes needs a host-endian fixup.
49  */
50 #if HOST_BIG_ENDIAN
51 #define BYTE(x)   ((x) ^ 7)
52 #else
53 #define BYTE(x)   (x)
54 #endif
55 
56 bool riscv_cpu_is_32bit(RISCVCPU *cpu)
57 {
58     return riscv_cpu_mxl(&cpu->env) == MXL_RV32;
59 }
60 
61 /* Hash that stores general user set numeric options */
62 static GHashTable *general_user_opts;
63 
64 static void cpu_option_add_user_setting(const char *optname, uint32_t value)
65 {
66     g_hash_table_insert(general_user_opts, (gpointer)optname,
67                         GUINT_TO_POINTER(value));
68 }
69 
70 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
71     {#_name, _min_ver, CPU_CFG_OFFSET(_prop)}
72 
73 /*
74  * Here are the ordering rules of extension naming defined by RISC-V
75  * specification :
76  * 1. All extensions should be separated from other multi-letter extensions
77  *    by an underscore.
78  * 2. The first letter following the 'Z' conventionally indicates the most
79  *    closely related alphabetical extension category, IMAFDQLCBKJTPVH.
80  *    If multiple 'Z' extensions are named, they should be ordered first
81  *    by category, then alphabetically within a category.
82  * 3. Standard supervisor-level extensions (starts with 'S') should be
83  *    listed after standard unprivileged extensions.  If multiple
84  *    supervisor-level extensions are listed, they should be ordered
85  *    alphabetically.
86  * 4. Non-standard extensions (starts with 'X') must be listed after all
87  *    standard extensions. They must be separated from other multi-letter
88  *    extensions by an underscore.
89  *
90  * Single letter extensions are checked in riscv_cpu_validate_misa_priv()
91  * instead.
92  */
93 const RISCVIsaExtData isa_edata_arr[] = {
94     ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom),
95     ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop),
96     ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz),
97     ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
98     ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr),
99     ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr),
100     ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei),
101     ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl),
102     ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
103     ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm),
104     ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
105     ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
106     ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
107     ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
108     ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
109     ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh),
110     ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin),
111     ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx),
112     ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx),
113     ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca),
114     ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb),
115     ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf),
116     ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd),
117     ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce),
118     ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp),
119     ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt),
120     ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba),
121     ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb),
122     ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc),
123     ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb),
124     ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc),
125     ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx),
126     ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs),
127     ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk),
128     ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn),
129     ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd),
130     ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne),
131     ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh),
132     ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr),
133     ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks),
134     ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
135     ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
136     ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
137     ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb),
138     ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
139     ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
140     ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
141     ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
142     ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin),
143     ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
144     ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
145     ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
146     ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb),
147     ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg),
148     ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn),
149     ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc),
150     ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
151     ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng),
152     ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
153     ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
154     ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks),
155     ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc),
156     ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed),
157     ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg),
158     ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
159     ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt),
160     ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
161     ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
162     ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
163     ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
164     ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
165     ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
166     ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
167     ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
168     ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
169     ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
170     ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
171     ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt),
172     ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba),
173     ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb),
174     ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs),
175     ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo),
176     ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov),
177     ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx),
178     ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv),
179     ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac),
180     ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx),
181     ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair),
182     ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync),
183     ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
184 
185     DEFINE_PROP_END_OF_LIST(),
186 };
187 
188 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset)
189 {
190     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
191 
192     return *ext_enabled;
193 }
194 
195 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en)
196 {
197     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
198 
199     *ext_enabled = en;
200 }
201 
202 bool riscv_cpu_is_vendor(Object *cpu_obj)
203 {
204     return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL;
205 }
206 
207 const char * const riscv_int_regnames[] = {
208     "x0/zero", "x1/ra",  "x2/sp",  "x3/gp",  "x4/tp",  "x5/t0",   "x6/t1",
209     "x7/t2",   "x8/s0",  "x9/s1",  "x10/a0", "x11/a1", "x12/a2",  "x13/a3",
210     "x14/a4",  "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3",  "x20/s4",
211     "x21/s5",  "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
212     "x28/t3",  "x29/t4", "x30/t5", "x31/t6"
213 };
214 
215 const char * const riscv_int_regnamesh[] = {
216     "x0h/zeroh", "x1h/rah",  "x2h/sph",   "x3h/gph",   "x4h/tph",  "x5h/t0h",
217     "x6h/t1h",   "x7h/t2h",  "x8h/s0h",   "x9h/s1h",   "x10h/a0h", "x11h/a1h",
218     "x12h/a2h",  "x13h/a3h", "x14h/a4h",  "x15h/a5h",  "x16h/a6h", "x17h/a7h",
219     "x18h/s2h",  "x19h/s3h", "x20h/s4h",  "x21h/s5h",  "x22h/s6h", "x23h/s7h",
220     "x24h/s8h",  "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
221     "x30h/t5h",  "x31h/t6h"
222 };
223 
224 const char * const riscv_fpr_regnames[] = {
225     "f0/ft0",   "f1/ft1",  "f2/ft2",   "f3/ft3",   "f4/ft4",  "f5/ft5",
226     "f6/ft6",   "f7/ft7",  "f8/fs0",   "f9/fs1",   "f10/fa0", "f11/fa1",
227     "f12/fa2",  "f13/fa3", "f14/fa4",  "f15/fa5",  "f16/fa6", "f17/fa7",
228     "f18/fs2",  "f19/fs3", "f20/fs4",  "f21/fs5",  "f22/fs6", "f23/fs7",
229     "f24/fs8",  "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
230     "f30/ft10", "f31/ft11"
231 };
232 
233 const char * const riscv_rvv_regnames[] = {
234   "v0",  "v1",  "v2",  "v3",  "v4",  "v5",  "v6",
235   "v7",  "v8",  "v9",  "v10", "v11", "v12", "v13",
236   "v14", "v15", "v16", "v17", "v18", "v19", "v20",
237   "v21", "v22", "v23", "v24", "v25", "v26", "v27",
238   "v28", "v29", "v30", "v31"
239 };
240 
241 static const char * const riscv_excp_names[] = {
242     "misaligned_fetch",
243     "fault_fetch",
244     "illegal_instruction",
245     "breakpoint",
246     "misaligned_load",
247     "fault_load",
248     "misaligned_store",
249     "fault_store",
250     "user_ecall",
251     "supervisor_ecall",
252     "hypervisor_ecall",
253     "machine_ecall",
254     "exec_page_fault",
255     "load_page_fault",
256     "reserved",
257     "store_page_fault",
258     "reserved",
259     "reserved",
260     "reserved",
261     "reserved",
262     "guest_exec_page_fault",
263     "guest_load_page_fault",
264     "reserved",
265     "guest_store_page_fault",
266 };
267 
268 static const char * const riscv_intr_names[] = {
269     "u_software",
270     "s_software",
271     "vs_software",
272     "m_software",
273     "u_timer",
274     "s_timer",
275     "vs_timer",
276     "m_timer",
277     "u_external",
278     "s_external",
279     "vs_external",
280     "m_external",
281     "reserved",
282     "reserved",
283     "reserved",
284     "reserved"
285 };
286 
287 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
288 {
289     if (async) {
290         return (cause < ARRAY_SIZE(riscv_intr_names)) ?
291                riscv_intr_names[cause] : "(unknown)";
292     } else {
293         return (cause < ARRAY_SIZE(riscv_excp_names)) ?
294                riscv_excp_names[cause] : "(unknown)";
295     }
296 }
297 
298 void riscv_cpu_set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext)
299 {
300     env->misa_mxl_max = env->misa_mxl = mxl;
301     env->misa_ext_mask = env->misa_ext = ext;
302 }
303 
304 #ifndef CONFIG_USER_ONLY
305 static uint8_t satp_mode_from_str(const char *satp_mode_str)
306 {
307     if (!strncmp(satp_mode_str, "mbare", 5)) {
308         return VM_1_10_MBARE;
309     }
310 
311     if (!strncmp(satp_mode_str, "sv32", 4)) {
312         return VM_1_10_SV32;
313     }
314 
315     if (!strncmp(satp_mode_str, "sv39", 4)) {
316         return VM_1_10_SV39;
317     }
318 
319     if (!strncmp(satp_mode_str, "sv48", 4)) {
320         return VM_1_10_SV48;
321     }
322 
323     if (!strncmp(satp_mode_str, "sv57", 4)) {
324         return VM_1_10_SV57;
325     }
326 
327     if (!strncmp(satp_mode_str, "sv64", 4)) {
328         return VM_1_10_SV64;
329     }
330 
331     g_assert_not_reached();
332 }
333 
334 uint8_t satp_mode_max_from_map(uint32_t map)
335 {
336     /*
337      * 'map = 0' will make us return (31 - 32), which C will
338      * happily overflow to UINT_MAX. There's no good result to
339      * return if 'map = 0' (e.g. returning 0 will be ambiguous
340      * with the result for 'map = 1').
341      *
342      * Assert out if map = 0. Callers will have to deal with
343      * it outside of this function.
344      */
345     g_assert(map > 0);
346 
347     /* map here has at least one bit set, so no problem with clz */
348     return 31 - __builtin_clz(map);
349 }
350 
351 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
352 {
353     if (is_32_bit) {
354         switch (satp_mode) {
355         case VM_1_10_SV32:
356             return "sv32";
357         case VM_1_10_MBARE:
358             return "none";
359         }
360     } else {
361         switch (satp_mode) {
362         case VM_1_10_SV64:
363             return "sv64";
364         case VM_1_10_SV57:
365             return "sv57";
366         case VM_1_10_SV48:
367             return "sv48";
368         case VM_1_10_SV39:
369             return "sv39";
370         case VM_1_10_MBARE:
371             return "none";
372         }
373     }
374 
375     g_assert_not_reached();
376 }
377 
378 static void set_satp_mode_max_supported(RISCVCPU *cpu,
379                                         uint8_t satp_mode)
380 {
381     bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
382     const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
383 
384     for (int i = 0; i <= satp_mode; ++i) {
385         if (valid_vm[i]) {
386             cpu->cfg.satp_mode.supported |= (1 << i);
387         }
388     }
389 }
390 
391 /* Set the satp mode to the max supported */
392 static void set_satp_mode_default_map(RISCVCPU *cpu)
393 {
394     /*
395      * Bare CPUs do not default to the max available.
396      * Users must set a valid satp_mode in the command
397      * line.
398      */
399     if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) {
400         warn_report("No satp mode set. Defaulting to 'bare'");
401         cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE);
402         return;
403     }
404 
405     cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
406 }
407 #endif
408 
409 static void riscv_any_cpu_init(Object *obj)
410 {
411     RISCVCPU *cpu = RISCV_CPU(obj);
412     CPURISCVState *env = &cpu->env;
413 #if defined(TARGET_RISCV32)
414     riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
415 #elif defined(TARGET_RISCV64)
416     riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
417 #endif
418 
419 #ifndef CONFIG_USER_ONLY
420     set_satp_mode_max_supported(RISCV_CPU(obj),
421         riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ?
422         VM_1_10_SV32 : VM_1_10_SV57);
423 #endif
424 
425     env->priv_ver = PRIV_VERSION_LATEST;
426 
427     /* inherited from parent obj via riscv_cpu_init() */
428     cpu->cfg.ext_zifencei = true;
429     cpu->cfg.ext_zicsr = true;
430     cpu->cfg.mmu = true;
431     cpu->cfg.pmp = true;
432 }
433 
434 static void riscv_max_cpu_init(Object *obj)
435 {
436     RISCVCPU *cpu = RISCV_CPU(obj);
437     CPURISCVState *env = &cpu->env;
438     RISCVMXL mlx = MXL_RV64;
439 
440     cpu->cfg.mmu = true;
441     cpu->cfg.pmp = true;
442 
443 #ifdef TARGET_RISCV32
444     mlx = MXL_RV32;
445 #endif
446     riscv_cpu_set_misa(env, mlx, 0);
447     env->priv_ver = PRIV_VERSION_LATEST;
448 #ifndef CONFIG_USER_ONLY
449     set_satp_mode_max_supported(RISCV_CPU(obj), mlx == MXL_RV32 ?
450                                 VM_1_10_SV32 : VM_1_10_SV57);
451 #endif
452 }
453 
454 #if defined(TARGET_RISCV64)
455 static void rv64_base_cpu_init(Object *obj)
456 {
457     RISCVCPU *cpu = RISCV_CPU(obj);
458     CPURISCVState *env = &cpu->env;
459 
460     cpu->cfg.mmu = true;
461     cpu->cfg.pmp = true;
462 
463     /* We set this in the realise function */
464     riscv_cpu_set_misa(env, MXL_RV64, 0);
465     /* Set latest version of privileged specification */
466     env->priv_ver = PRIV_VERSION_LATEST;
467 #ifndef CONFIG_USER_ONLY
468     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
469 #endif
470 }
471 
472 static void rv64_sifive_u_cpu_init(Object *obj)
473 {
474     RISCVCPU *cpu = RISCV_CPU(obj);
475     CPURISCVState *env = &cpu->env;
476     riscv_cpu_set_misa(env, MXL_RV64,
477                        RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
478     env->priv_ver = PRIV_VERSION_1_10_0;
479 #ifndef CONFIG_USER_ONLY
480     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39);
481 #endif
482 
483     /* inherited from parent obj via riscv_cpu_init() */
484     cpu->cfg.ext_zifencei = true;
485     cpu->cfg.ext_zicsr = true;
486     cpu->cfg.mmu = true;
487     cpu->cfg.pmp = true;
488 }
489 
490 static void rv64_sifive_e_cpu_init(Object *obj)
491 {
492     CPURISCVState *env = &RISCV_CPU(obj)->env;
493     RISCVCPU *cpu = RISCV_CPU(obj);
494 
495     riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU);
496     env->priv_ver = PRIV_VERSION_1_10_0;
497 #ifndef CONFIG_USER_ONLY
498     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
499 #endif
500 
501     /* inherited from parent obj via riscv_cpu_init() */
502     cpu->cfg.ext_zifencei = true;
503     cpu->cfg.ext_zicsr = true;
504     cpu->cfg.pmp = true;
505 }
506 
507 static void rv64_thead_c906_cpu_init(Object *obj)
508 {
509     CPURISCVState *env = &RISCV_CPU(obj)->env;
510     RISCVCPU *cpu = RISCV_CPU(obj);
511 
512     riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU);
513     env->priv_ver = PRIV_VERSION_1_11_0;
514 
515     cpu->cfg.ext_zfa = true;
516     cpu->cfg.ext_zfh = true;
517     cpu->cfg.mmu = true;
518     cpu->cfg.ext_xtheadba = true;
519     cpu->cfg.ext_xtheadbb = true;
520     cpu->cfg.ext_xtheadbs = true;
521     cpu->cfg.ext_xtheadcmo = true;
522     cpu->cfg.ext_xtheadcondmov = true;
523     cpu->cfg.ext_xtheadfmemidx = true;
524     cpu->cfg.ext_xtheadmac = true;
525     cpu->cfg.ext_xtheadmemidx = true;
526     cpu->cfg.ext_xtheadmempair = true;
527     cpu->cfg.ext_xtheadsync = true;
528 
529     cpu->cfg.mvendorid = THEAD_VENDOR_ID;
530 #ifndef CONFIG_USER_ONLY
531     set_satp_mode_max_supported(cpu, VM_1_10_SV39);
532 #endif
533 
534     /* inherited from parent obj via riscv_cpu_init() */
535     cpu->cfg.pmp = true;
536 }
537 
538 static void rv64_veyron_v1_cpu_init(Object *obj)
539 {
540     CPURISCVState *env = &RISCV_CPU(obj)->env;
541     RISCVCPU *cpu = RISCV_CPU(obj);
542 
543     riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH);
544     env->priv_ver = PRIV_VERSION_1_12_0;
545 
546     /* Enable ISA extensions */
547     cpu->cfg.mmu = true;
548     cpu->cfg.ext_zifencei = true;
549     cpu->cfg.ext_zicsr = true;
550     cpu->cfg.pmp = true;
551     cpu->cfg.ext_zicbom = true;
552     cpu->cfg.cbom_blocksize = 64;
553     cpu->cfg.cboz_blocksize = 64;
554     cpu->cfg.ext_zicboz = true;
555     cpu->cfg.ext_smaia = true;
556     cpu->cfg.ext_ssaia = true;
557     cpu->cfg.ext_sscofpmf = true;
558     cpu->cfg.ext_sstc = true;
559     cpu->cfg.ext_svinval = true;
560     cpu->cfg.ext_svnapot = true;
561     cpu->cfg.ext_svpbmt = true;
562     cpu->cfg.ext_smstateen = true;
563     cpu->cfg.ext_zba = true;
564     cpu->cfg.ext_zbb = true;
565     cpu->cfg.ext_zbc = true;
566     cpu->cfg.ext_zbs = true;
567     cpu->cfg.ext_XVentanaCondOps = true;
568 
569     cpu->cfg.mvendorid = VEYRON_V1_MVENDORID;
570     cpu->cfg.marchid = VEYRON_V1_MARCHID;
571     cpu->cfg.mimpid = VEYRON_V1_MIMPID;
572 
573 #ifndef CONFIG_USER_ONLY
574     set_satp_mode_max_supported(cpu, VM_1_10_SV48);
575 #endif
576 }
577 
578 static void rv128_base_cpu_init(Object *obj)
579 {
580     RISCVCPU *cpu = RISCV_CPU(obj);
581     CPURISCVState *env = &cpu->env;
582 
583     if (qemu_tcg_mttcg_enabled()) {
584         /* Missing 128-bit aligned atomics */
585         error_report("128-bit RISC-V currently does not work with Multi "
586                      "Threaded TCG. Please use: -accel tcg,thread=single");
587         exit(EXIT_FAILURE);
588     }
589 
590     cpu->cfg.mmu = true;
591     cpu->cfg.pmp = true;
592 
593     /* We set this in the realise function */
594     riscv_cpu_set_misa(env, MXL_RV128, 0);
595     /* Set latest version of privileged specification */
596     env->priv_ver = PRIV_VERSION_LATEST;
597 #ifndef CONFIG_USER_ONLY
598     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
599 #endif
600 }
601 
602 static void rv64i_bare_cpu_init(Object *obj)
603 {
604     CPURISCVState *env = &RISCV_CPU(obj)->env;
605     riscv_cpu_set_misa(env, MXL_RV64, RVI);
606 
607     /* Remove the defaults from the parent class */
608     RISCV_CPU(obj)->cfg.ext_zicntr = false;
609     RISCV_CPU(obj)->cfg.ext_zihpm = false;
610 
611     /* Set to QEMU's first supported priv version */
612     env->priv_ver = PRIV_VERSION_1_10_0;
613 
614     /*
615      * Support all available satp_mode settings. The default
616      * value will be set to MBARE if the user doesn't set
617      * satp_mode manually (see set_satp_mode_default()).
618      */
619 #ifndef CONFIG_USER_ONLY
620     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV64);
621 #endif
622 }
623 #else
624 static void rv32_base_cpu_init(Object *obj)
625 {
626     RISCVCPU *cpu = RISCV_CPU(obj);
627     CPURISCVState *env = &cpu->env;
628 
629     cpu->cfg.mmu = true;
630     cpu->cfg.pmp = true;
631 
632     /* We set this in the realise function */
633     riscv_cpu_set_misa(env, MXL_RV32, 0);
634     /* Set latest version of privileged specification */
635     env->priv_ver = PRIV_VERSION_LATEST;
636 #ifndef CONFIG_USER_ONLY
637     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
638 #endif
639 }
640 
641 static void rv32_sifive_u_cpu_init(Object *obj)
642 {
643     RISCVCPU *cpu = RISCV_CPU(obj);
644     CPURISCVState *env = &cpu->env;
645     riscv_cpu_set_misa(env, MXL_RV32,
646                        RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
647     env->priv_ver = PRIV_VERSION_1_10_0;
648 #ifndef CONFIG_USER_ONLY
649     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
650 #endif
651 
652     /* inherited from parent obj via riscv_cpu_init() */
653     cpu->cfg.ext_zifencei = true;
654     cpu->cfg.ext_zicsr = true;
655     cpu->cfg.mmu = true;
656     cpu->cfg.pmp = true;
657 }
658 
659 static void rv32_sifive_e_cpu_init(Object *obj)
660 {
661     CPURISCVState *env = &RISCV_CPU(obj)->env;
662     RISCVCPU *cpu = RISCV_CPU(obj);
663 
664     riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU);
665     env->priv_ver = PRIV_VERSION_1_10_0;
666 #ifndef CONFIG_USER_ONLY
667     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
668 #endif
669 
670     /* inherited from parent obj via riscv_cpu_init() */
671     cpu->cfg.ext_zifencei = true;
672     cpu->cfg.ext_zicsr = true;
673     cpu->cfg.pmp = true;
674 }
675 
676 static void rv32_ibex_cpu_init(Object *obj)
677 {
678     CPURISCVState *env = &RISCV_CPU(obj)->env;
679     RISCVCPU *cpu = RISCV_CPU(obj);
680 
681     riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU);
682     env->priv_ver = PRIV_VERSION_1_12_0;
683 #ifndef CONFIG_USER_ONLY
684     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
685 #endif
686     /* inherited from parent obj via riscv_cpu_init() */
687     cpu->cfg.ext_zifencei = true;
688     cpu->cfg.ext_zicsr = true;
689     cpu->cfg.pmp = true;
690     cpu->cfg.ext_smepmp = true;
691 }
692 
693 static void rv32_imafcu_nommu_cpu_init(Object *obj)
694 {
695     CPURISCVState *env = &RISCV_CPU(obj)->env;
696     RISCVCPU *cpu = RISCV_CPU(obj);
697 
698     riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU);
699     env->priv_ver = PRIV_VERSION_1_10_0;
700 #ifndef CONFIG_USER_ONLY
701     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
702 #endif
703 
704     /* inherited from parent obj via riscv_cpu_init() */
705     cpu->cfg.ext_zifencei = true;
706     cpu->cfg.ext_zicsr = true;
707     cpu->cfg.pmp = true;
708 }
709 #endif
710 
711 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
712 {
713     ObjectClass *oc;
714     char *typename;
715     char **cpuname;
716 
717     cpuname = g_strsplit(cpu_model, ",", 1);
718     typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
719     oc = object_class_by_name(typename);
720     g_strfreev(cpuname);
721     g_free(typename);
722 
723     return oc;
724 }
725 
726 char *riscv_cpu_get_name(RISCVCPU *cpu)
727 {
728     RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu);
729     const char *typename = object_class_get_name(OBJECT_CLASS(rcc));
730 
731     g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX));
732 
733     return cpu_model_from_type(typename);
734 }
735 
736 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
737 {
738     RISCVCPU *cpu = RISCV_CPU(cs);
739     CPURISCVState *env = &cpu->env;
740     int i, j;
741     uint8_t *p;
742 
743 #if !defined(CONFIG_USER_ONLY)
744     if (riscv_has_ext(env, RVH)) {
745         qemu_fprintf(f, " %s %d\n", "V      =  ", env->virt_enabled);
746     }
747 #endif
748     qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc      ", env->pc);
749 #ifndef CONFIG_USER_ONLY
750     {
751         static const int dump_csrs[] = {
752             CSR_MHARTID,
753             CSR_MSTATUS,
754             CSR_MSTATUSH,
755             /*
756              * CSR_SSTATUS is intentionally omitted here as its value
757              * can be figured out by looking at CSR_MSTATUS
758              */
759             CSR_HSTATUS,
760             CSR_VSSTATUS,
761             CSR_MIP,
762             CSR_MIE,
763             CSR_MIDELEG,
764             CSR_HIDELEG,
765             CSR_MEDELEG,
766             CSR_HEDELEG,
767             CSR_MTVEC,
768             CSR_STVEC,
769             CSR_VSTVEC,
770             CSR_MEPC,
771             CSR_SEPC,
772             CSR_VSEPC,
773             CSR_MCAUSE,
774             CSR_SCAUSE,
775             CSR_VSCAUSE,
776             CSR_MTVAL,
777             CSR_STVAL,
778             CSR_HTVAL,
779             CSR_MTVAL2,
780             CSR_MSCRATCH,
781             CSR_SSCRATCH,
782             CSR_SATP,
783             CSR_MMTE,
784             CSR_UPMBASE,
785             CSR_UPMMASK,
786             CSR_SPMBASE,
787             CSR_SPMMASK,
788             CSR_MPMBASE,
789             CSR_MPMMASK,
790         };
791 
792         for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
793             int csrno = dump_csrs[i];
794             target_ulong val = 0;
795             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
796 
797             /*
798              * Rely on the smode, hmode, etc, predicates within csr.c
799              * to do the filtering of the registers that are present.
800              */
801             if (res == RISCV_EXCP_NONE) {
802                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
803                              csr_ops[csrno].name, val);
804             }
805         }
806     }
807 #endif
808 
809     for (i = 0; i < 32; i++) {
810         qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
811                      riscv_int_regnames[i], env->gpr[i]);
812         if ((i & 3) == 3) {
813             qemu_fprintf(f, "\n");
814         }
815     }
816     if (flags & CPU_DUMP_FPU) {
817         for (i = 0; i < 32; i++) {
818             qemu_fprintf(f, " %-8s %016" PRIx64,
819                          riscv_fpr_regnames[i], env->fpr[i]);
820             if ((i & 3) == 3) {
821                 qemu_fprintf(f, "\n");
822             }
823         }
824     }
825     if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) {
826         static const int dump_rvv_csrs[] = {
827                     CSR_VSTART,
828                     CSR_VXSAT,
829                     CSR_VXRM,
830                     CSR_VCSR,
831                     CSR_VL,
832                     CSR_VTYPE,
833                     CSR_VLENB,
834                 };
835         for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
836             int csrno = dump_rvv_csrs[i];
837             target_ulong val = 0;
838             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
839 
840             /*
841              * Rely on the smode, hmode, etc, predicates within csr.c
842              * to do the filtering of the registers that are present.
843              */
844             if (res == RISCV_EXCP_NONE) {
845                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
846                              csr_ops[csrno].name, val);
847             }
848         }
849         uint16_t vlenb = cpu->cfg.vlen >> 3;
850 
851         for (i = 0; i < 32; i++) {
852             qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]);
853             p = (uint8_t *)env->vreg;
854             for (j = vlenb - 1 ; j >= 0; j--) {
855                 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j)));
856             }
857             qemu_fprintf(f, "\n");
858         }
859     }
860 }
861 
862 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
863 {
864     RISCVCPU *cpu = RISCV_CPU(cs);
865     CPURISCVState *env = &cpu->env;
866 
867     if (env->xl == MXL_RV32) {
868         env->pc = (int32_t)value;
869     } else {
870         env->pc = value;
871     }
872 }
873 
874 static vaddr riscv_cpu_get_pc(CPUState *cs)
875 {
876     RISCVCPU *cpu = RISCV_CPU(cs);
877     CPURISCVState *env = &cpu->env;
878 
879     /* Match cpu_get_tb_cpu_state. */
880     if (env->xl == MXL_RV32) {
881         return env->pc & UINT32_MAX;
882     }
883     return env->pc;
884 }
885 
886 static bool riscv_cpu_has_work(CPUState *cs)
887 {
888 #ifndef CONFIG_USER_ONLY
889     RISCVCPU *cpu = RISCV_CPU(cs);
890     CPURISCVState *env = &cpu->env;
891     /*
892      * Definition of the WFI instruction requires it to ignore the privilege
893      * mode and delegation registers, but respect individual enables
894      */
895     return riscv_cpu_all_pending(env) != 0 ||
896         riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE ||
897         riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE;
898 #else
899     return true;
900 #endif
901 }
902 
903 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch)
904 {
905     return riscv_env_mmu_index(cpu_env(cs), ifetch);
906 }
907 
908 static void riscv_cpu_reset_hold(Object *obj)
909 {
910 #ifndef CONFIG_USER_ONLY
911     uint8_t iprio;
912     int i, irq, rdzero;
913 #endif
914     CPUState *cs = CPU(obj);
915     RISCVCPU *cpu = RISCV_CPU(cs);
916     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
917     CPURISCVState *env = &cpu->env;
918 
919     if (mcc->parent_phases.hold) {
920         mcc->parent_phases.hold(obj);
921     }
922 #ifndef CONFIG_USER_ONLY
923     env->misa_mxl = env->misa_mxl_max;
924     env->priv = PRV_M;
925     env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
926     if (env->misa_mxl > MXL_RV32) {
927         /*
928          * The reset status of SXL/UXL is undefined, but mstatus is WARL
929          * and we must ensure that the value after init is valid for read.
930          */
931         env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
932         env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
933         if (riscv_has_ext(env, RVH)) {
934             env->vsstatus = set_field(env->vsstatus,
935                                       MSTATUS64_SXL, env->misa_mxl);
936             env->vsstatus = set_field(env->vsstatus,
937                                       MSTATUS64_UXL, env->misa_mxl);
938             env->mstatus_hs = set_field(env->mstatus_hs,
939                                         MSTATUS64_SXL, env->misa_mxl);
940             env->mstatus_hs = set_field(env->mstatus_hs,
941                                         MSTATUS64_UXL, env->misa_mxl);
942         }
943     }
944     env->mcause = 0;
945     env->miclaim = MIP_SGEIP;
946     env->pc = env->resetvec;
947     env->bins = 0;
948     env->two_stage_lookup = false;
949 
950     env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
951                    (cpu->cfg.ext_svadu ? MENVCFG_ADUE : 0);
952     env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) |
953                    (cpu->cfg.ext_svadu ? HENVCFG_ADUE : 0);
954 
955     /* Initialized default priorities of local interrupts. */
956     for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
957         iprio = riscv_cpu_default_priority(i);
958         env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
959         env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
960         env->hviprio[i] = 0;
961     }
962     i = 0;
963     while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
964         if (!rdzero) {
965             env->hviprio[irq] = env->miprio[irq];
966         }
967         i++;
968     }
969     /* mmte is supposed to have pm.current hardwired to 1 */
970     env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
971 
972     /*
973      * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor
974      * extension is enabled.
975      */
976     if (riscv_has_ext(env, RVH)) {
977         env->mideleg |= HS_MODE_INTERRUPTS;
978     }
979 
980     /*
981      * Clear mseccfg and unlock all the PMP entries upon reset.
982      * This is allowed as per the priv and smepmp specifications
983      * and is needed to clear stale entries across reboots.
984      */
985     if (riscv_cpu_cfg(env)->ext_smepmp) {
986         env->mseccfg = 0;
987     }
988 
989     pmp_unlock_entries(env);
990 #endif
991     env->xl = riscv_cpu_mxl(env);
992     riscv_cpu_update_mask(env);
993     cs->exception_index = RISCV_EXCP_NONE;
994     env->load_res = -1;
995     set_default_nan_mode(1, &env->fp_status);
996 
997 #ifndef CONFIG_USER_ONLY
998     if (cpu->cfg.debug) {
999         riscv_trigger_reset_hold(env);
1000     }
1001 
1002     if (kvm_enabled()) {
1003         kvm_riscv_reset_vcpu(cpu);
1004     }
1005 #endif
1006 }
1007 
1008 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
1009 {
1010     RISCVCPU *cpu = RISCV_CPU(s);
1011     CPURISCVState *env = &cpu->env;
1012     info->target_info = &cpu->cfg;
1013 
1014     switch (env->xl) {
1015     case MXL_RV32:
1016         info->print_insn = print_insn_riscv32;
1017         break;
1018     case MXL_RV64:
1019         info->print_insn = print_insn_riscv64;
1020         break;
1021     case MXL_RV128:
1022         info->print_insn = print_insn_riscv128;
1023         break;
1024     default:
1025         g_assert_not_reached();
1026     }
1027 }
1028 
1029 #ifndef CONFIG_USER_ONLY
1030 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
1031 {
1032     bool rv32 = riscv_cpu_is_32bit(cpu);
1033     uint8_t satp_mode_map_max, satp_mode_supported_max;
1034 
1035     /* The CPU wants the OS to decide which satp mode to use */
1036     if (cpu->cfg.satp_mode.supported == 0) {
1037         return;
1038     }
1039 
1040     satp_mode_supported_max =
1041                     satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
1042 
1043     if (cpu->cfg.satp_mode.map == 0) {
1044         if (cpu->cfg.satp_mode.init == 0) {
1045             /* If unset by the user, we fallback to the default satp mode. */
1046             set_satp_mode_default_map(cpu);
1047         } else {
1048             /*
1049              * Find the lowest level that was disabled and then enable the
1050              * first valid level below which can be found in
1051              * valid_vm_1_10_32/64.
1052              */
1053             for (int i = 1; i < 16; ++i) {
1054                 if ((cpu->cfg.satp_mode.init & (1 << i)) &&
1055                     (cpu->cfg.satp_mode.supported & (1 << i))) {
1056                     for (int j = i - 1; j >= 0; --j) {
1057                         if (cpu->cfg.satp_mode.supported & (1 << j)) {
1058                             cpu->cfg.satp_mode.map |= (1 << j);
1059                             break;
1060                         }
1061                     }
1062                     break;
1063                 }
1064             }
1065         }
1066     }
1067 
1068     satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
1069 
1070     /* Make sure the user asked for a supported configuration (HW and qemu) */
1071     if (satp_mode_map_max > satp_mode_supported_max) {
1072         error_setg(errp, "satp_mode %s is higher than hw max capability %s",
1073                    satp_mode_str(satp_mode_map_max, rv32),
1074                    satp_mode_str(satp_mode_supported_max, rv32));
1075         return;
1076     }
1077 
1078     /*
1079      * Make sure the user did not ask for an invalid configuration as per
1080      * the specification.
1081      */
1082     if (!rv32) {
1083         for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1084             if (!(cpu->cfg.satp_mode.map & (1 << i)) &&
1085                 (cpu->cfg.satp_mode.init & (1 << i)) &&
1086                 (cpu->cfg.satp_mode.supported & (1 << i))) {
1087                 error_setg(errp, "cannot disable %s satp mode if %s "
1088                            "is enabled", satp_mode_str(i, false),
1089                            satp_mode_str(satp_mode_map_max, false));
1090                 return;
1091             }
1092         }
1093     }
1094 
1095     /* Finally expand the map so that all valid modes are set */
1096     for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1097         if (cpu->cfg.satp_mode.supported & (1 << i)) {
1098             cpu->cfg.satp_mode.map |= (1 << i);
1099         }
1100     }
1101 }
1102 #endif
1103 
1104 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1105 {
1106     Error *local_err = NULL;
1107 
1108 #ifndef CONFIG_USER_ONLY
1109     riscv_cpu_satp_mode_finalize(cpu, &local_err);
1110     if (local_err != NULL) {
1111         error_propagate(errp, local_err);
1112         return;
1113     }
1114 #endif
1115 
1116     /*
1117      * KVM accel does not have a specialized finalize()
1118      * callback because its extensions are validated
1119      * in the get()/set() callbacks of each property.
1120      */
1121     if (tcg_enabled()) {
1122         riscv_tcg_cpu_finalize_features(cpu, &local_err);
1123         if (local_err != NULL) {
1124             error_propagate(errp, local_err);
1125             return;
1126         }
1127     }
1128 }
1129 
1130 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
1131 {
1132     CPUState *cs = CPU(dev);
1133     RISCVCPU *cpu = RISCV_CPU(dev);
1134     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
1135     Error *local_err = NULL;
1136 
1137     if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) {
1138         warn_report("The 'any' CPU is deprecated and will be "
1139                     "removed in the future.");
1140     }
1141 
1142     cpu_exec_realizefn(cs, &local_err);
1143     if (local_err != NULL) {
1144         error_propagate(errp, local_err);
1145         return;
1146     }
1147 
1148     riscv_cpu_finalize_features(cpu, &local_err);
1149     if (local_err != NULL) {
1150         error_propagate(errp, local_err);
1151         return;
1152     }
1153 
1154     riscv_cpu_register_gdb_regs_for_features(cs);
1155 
1156 #ifndef CONFIG_USER_ONLY
1157     if (cpu->cfg.debug) {
1158         riscv_trigger_realize(&cpu->env);
1159     }
1160 #endif
1161 
1162     qemu_init_vcpu(cs);
1163     cpu_reset(cs);
1164 
1165     mcc->parent_realize(dev, errp);
1166 }
1167 
1168 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu)
1169 {
1170     if (tcg_enabled()) {
1171         return riscv_cpu_tcg_compatible(cpu);
1172     }
1173 
1174     return true;
1175 }
1176 
1177 #ifndef CONFIG_USER_ONLY
1178 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
1179                                void *opaque, Error **errp)
1180 {
1181     RISCVSATPMap *satp_map = opaque;
1182     uint8_t satp = satp_mode_from_str(name);
1183     bool value;
1184 
1185     value = satp_map->map & (1 << satp);
1186 
1187     visit_type_bool(v, name, &value, errp);
1188 }
1189 
1190 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
1191                                void *opaque, Error **errp)
1192 {
1193     RISCVSATPMap *satp_map = opaque;
1194     uint8_t satp = satp_mode_from_str(name);
1195     bool value;
1196 
1197     if (!visit_type_bool(v, name, &value, errp)) {
1198         return;
1199     }
1200 
1201     satp_map->map = deposit32(satp_map->map, satp, 1, value);
1202     satp_map->init |= 1 << satp;
1203 }
1204 
1205 void riscv_add_satp_mode_properties(Object *obj)
1206 {
1207     RISCVCPU *cpu = RISCV_CPU(obj);
1208 
1209     if (cpu->env.misa_mxl == MXL_RV32) {
1210         object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
1211                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1212     } else {
1213         object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
1214                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1215         object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
1216                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1217         object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
1218                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1219         object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
1220                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1221     }
1222 }
1223 
1224 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
1225 {
1226     RISCVCPU *cpu = RISCV_CPU(opaque);
1227     CPURISCVState *env = &cpu->env;
1228 
1229     if (irq < IRQ_LOCAL_MAX) {
1230         switch (irq) {
1231         case IRQ_U_SOFT:
1232         case IRQ_S_SOFT:
1233         case IRQ_VS_SOFT:
1234         case IRQ_M_SOFT:
1235         case IRQ_U_TIMER:
1236         case IRQ_S_TIMER:
1237         case IRQ_VS_TIMER:
1238         case IRQ_M_TIMER:
1239         case IRQ_U_EXT:
1240         case IRQ_VS_EXT:
1241         case IRQ_M_EXT:
1242             if (kvm_enabled()) {
1243                 kvm_riscv_set_irq(cpu, irq, level);
1244             } else {
1245                 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level));
1246             }
1247              break;
1248         case IRQ_S_EXT:
1249             if (kvm_enabled()) {
1250                 kvm_riscv_set_irq(cpu, irq, level);
1251             } else {
1252                 env->external_seip = level;
1253                 riscv_cpu_update_mip(env, 1 << irq,
1254                                      BOOL_TO_MASK(level | env->software_seip));
1255             }
1256             break;
1257         default:
1258             g_assert_not_reached();
1259         }
1260     } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
1261         /* Require H-extension for handling guest local interrupts */
1262         if (!riscv_has_ext(env, RVH)) {
1263             g_assert_not_reached();
1264         }
1265 
1266         /* Compute bit position in HGEIP CSR */
1267         irq = irq - IRQ_LOCAL_MAX + 1;
1268         if (env->geilen < irq) {
1269             g_assert_not_reached();
1270         }
1271 
1272         /* Update HGEIP CSR */
1273         env->hgeip &= ~((target_ulong)1 << irq);
1274         if (level) {
1275             env->hgeip |= (target_ulong)1 << irq;
1276         }
1277 
1278         /* Update mip.SGEIP bit */
1279         riscv_cpu_update_mip(env, MIP_SGEIP,
1280                              BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
1281     } else {
1282         g_assert_not_reached();
1283     }
1284 }
1285 #endif /* CONFIG_USER_ONLY */
1286 
1287 static bool riscv_cpu_is_dynamic(Object *cpu_obj)
1288 {
1289     return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
1290 }
1291 
1292 static void riscv_cpu_post_init(Object *obj)
1293 {
1294     accel_cpu_instance_init(CPU(obj));
1295 }
1296 
1297 static void riscv_cpu_init(Object *obj)
1298 {
1299     RISCVCPU *cpu = RISCV_CPU(obj);
1300 
1301 #ifndef CONFIG_USER_ONLY
1302     qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
1303                       IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
1304 #endif /* CONFIG_USER_ONLY */
1305 
1306     general_user_opts = g_hash_table_new(g_str_hash, g_str_equal);
1307 
1308     /*
1309      * The timer and performance counters extensions were supported
1310      * in QEMU before they were added as discrete extensions in the
1311      * ISA. To keep compatibility we'll always default them to 'true'
1312      * for all CPUs. Each accelerator will decide what to do when
1313      * users disable them.
1314      */
1315     RISCV_CPU(obj)->cfg.ext_zicntr = true;
1316     RISCV_CPU(obj)->cfg.ext_zihpm = true;
1317 
1318     /* Default values for non-bool cpu properties */
1319     cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16);
1320 }
1321 
1322 typedef struct misa_ext_info {
1323     const char *name;
1324     const char *description;
1325 } MISAExtInfo;
1326 
1327 #define MISA_INFO_IDX(_bit) \
1328     __builtin_ctz(_bit)
1329 
1330 #define MISA_EXT_INFO(_bit, _propname, _descr) \
1331     [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr}
1332 
1333 static const MISAExtInfo misa_ext_info_arr[] = {
1334     MISA_EXT_INFO(RVA, "a", "Atomic instructions"),
1335     MISA_EXT_INFO(RVC, "c", "Compressed instructions"),
1336     MISA_EXT_INFO(RVD, "d", "Double-precision float point"),
1337     MISA_EXT_INFO(RVF, "f", "Single-precision float point"),
1338     MISA_EXT_INFO(RVI, "i", "Base integer instruction set"),
1339     MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"),
1340     MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"),
1341     MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
1342     MISA_EXT_INFO(RVU, "u", "User-level instructions"),
1343     MISA_EXT_INFO(RVH, "h", "Hypervisor"),
1344     MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
1345     MISA_EXT_INFO(RVV, "v", "Vector operations"),
1346     MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
1347     MISA_EXT_INFO(RVB, "x-b", "Bit manipulation (Zba_Zbb_Zbs)")
1348 };
1349 
1350 static int riscv_validate_misa_info_idx(uint32_t bit)
1351 {
1352     int idx;
1353 
1354     /*
1355      * Our lowest valid input (RVA) is 1 and
1356      * __builtin_ctz() is UB with zero.
1357      */
1358     g_assert(bit != 0);
1359     idx = MISA_INFO_IDX(bit);
1360 
1361     g_assert(idx < ARRAY_SIZE(misa_ext_info_arr));
1362     return idx;
1363 }
1364 
1365 const char *riscv_get_misa_ext_name(uint32_t bit)
1366 {
1367     int idx = riscv_validate_misa_info_idx(bit);
1368     const char *val = misa_ext_info_arr[idx].name;
1369 
1370     g_assert(val != NULL);
1371     return val;
1372 }
1373 
1374 const char *riscv_get_misa_ext_description(uint32_t bit)
1375 {
1376     int idx = riscv_validate_misa_info_idx(bit);
1377     const char *val = misa_ext_info_arr[idx].description;
1378 
1379     g_assert(val != NULL);
1380     return val;
1381 }
1382 
1383 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \
1384     {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
1385      .enabled = _defval}
1386 
1387 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
1388     /* Defaults for standard extensions */
1389     MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
1390     MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
1391     MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
1392     MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
1393     MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
1394     MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false),
1395     MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
1396     MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true),
1397     MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false),
1398     MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false),
1399     MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false),
1400     MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false),
1401     MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false),
1402     MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
1403 
1404     MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
1405     MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
1406     MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
1407     MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
1408     MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false),
1409     MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false),
1410 
1411     MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true),
1412     MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true),
1413 
1414     MULTI_EXT_CFG_BOOL("zba", ext_zba, true),
1415     MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true),
1416     MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true),
1417     MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false),
1418     MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false),
1419     MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false),
1420     MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true),
1421     MULTI_EXT_CFG_BOOL("zk", ext_zk, false),
1422     MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false),
1423     MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false),
1424     MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false),
1425     MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false),
1426     MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false),
1427     MULTI_EXT_CFG_BOOL("zks", ext_zks, false),
1428     MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false),
1429     MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false),
1430     MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false),
1431 
1432     MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false),
1433     MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false),
1434     MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false),
1435     MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false),
1436 
1437     MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true),
1438     MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true),
1439     MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true),
1440 
1441     MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false),
1442 
1443     MULTI_EXT_CFG_BOOL("zca", ext_zca, false),
1444     MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false),
1445     MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false),
1446     MULTI_EXT_CFG_BOOL("zce", ext_zce, false),
1447     MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false),
1448     MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false),
1449     MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false),
1450     MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false),
1451 
1452     /* Vector cryptography extensions */
1453     MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false),
1454     MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false),
1455     MULTI_EXT_CFG_BOOL("zvkb", ext_zvkg, false),
1456     MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false),
1457     MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false),
1458     MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false),
1459     MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false),
1460     MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false),
1461     MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false),
1462     MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false),
1463     MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false),
1464     MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false),
1465     MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false),
1466     MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false),
1467     MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false),
1468     MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false),
1469 
1470     DEFINE_PROP_END_OF_LIST(),
1471 };
1472 
1473 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = {
1474     MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false),
1475     MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false),
1476     MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false),
1477     MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false),
1478     MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false),
1479     MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false),
1480     MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false),
1481     MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false),
1482     MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false),
1483     MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false),
1484     MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false),
1485     MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false),
1486 
1487     DEFINE_PROP_END_OF_LIST(),
1488 };
1489 
1490 /* These are experimental so mark with 'x-' */
1491 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
1492     MULTI_EXT_CFG_BOOL("x-smaia", ext_smaia, false),
1493     MULTI_EXT_CFG_BOOL("x-ssaia", ext_ssaia, false),
1494 
1495     MULTI_EXT_CFG_BOOL("x-zvfh", ext_zvfh, false),
1496     MULTI_EXT_CFG_BOOL("x-zvfhmin", ext_zvfhmin, false),
1497 
1498     MULTI_EXT_CFG_BOOL("x-zfbfmin", ext_zfbfmin, false),
1499     MULTI_EXT_CFG_BOOL("x-zvfbfmin", ext_zvfbfmin, false),
1500     MULTI_EXT_CFG_BOOL("x-zvfbfwma", ext_zvfbfwma, false),
1501 
1502     DEFINE_PROP_END_OF_LIST(),
1503 };
1504 
1505 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
1506     MULTI_EXT_CFG_BOOL("svade", svade, true),
1507     MULTI_EXT_CFG_BOOL("zic64b", zic64b, true),
1508 
1509     DEFINE_PROP_END_OF_LIST(),
1510 };
1511 
1512 /* Deprecated entries marked for future removal */
1513 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = {
1514     MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true),
1515     MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true),
1516     MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true),
1517     MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true),
1518     MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true),
1519     MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true),
1520     MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false),
1521     MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false),
1522     MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false),
1523     MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false),
1524     MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false),
1525 
1526     DEFINE_PROP_END_OF_LIST(),
1527 };
1528 
1529 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname,
1530                              Error **errp)
1531 {
1532     g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1533     error_setg(errp, "CPU '%s' does not allow changing the value of '%s'",
1534                cpuname, propname);
1535 }
1536 
1537 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name,
1538                              void *opaque, Error **errp)
1539 {
1540     RISCVCPU *cpu = RISCV_CPU(obj);
1541     uint8_t pmu_num, curr_pmu_num;
1542     uint32_t pmu_mask;
1543 
1544     visit_type_uint8(v, name, &pmu_num, errp);
1545 
1546     curr_pmu_num = ctpop32(cpu->cfg.pmu_mask);
1547 
1548     if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) {
1549         cpu_set_prop_err(cpu, name, errp);
1550         error_append_hint(errp, "Current '%s' val: %u\n",
1551                           name, curr_pmu_num);
1552         return;
1553     }
1554 
1555     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1556         error_setg(errp, "Number of counters exceeds maximum available");
1557         return;
1558     }
1559 
1560     if (pmu_num == 0) {
1561         pmu_mask = 0;
1562     } else {
1563         pmu_mask = MAKE_64BIT_MASK(3, pmu_num);
1564     }
1565 
1566     warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\"");
1567     cpu->cfg.pmu_mask = pmu_mask;
1568     cpu_option_add_user_setting("pmu-mask", pmu_mask);
1569 }
1570 
1571 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name,
1572                              void *opaque, Error **errp)
1573 {
1574     RISCVCPU *cpu = RISCV_CPU(obj);
1575     uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask);
1576 
1577     visit_type_uint8(v, name, &pmu_num, errp);
1578 }
1579 
1580 static const PropertyInfo prop_pmu_num = {
1581     .name = "pmu-num",
1582     .get = prop_pmu_num_get,
1583     .set = prop_pmu_num_set,
1584 };
1585 
1586 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name,
1587                              void *opaque, Error **errp)
1588 {
1589     RISCVCPU *cpu = RISCV_CPU(obj);
1590     uint32_t value;
1591     uint8_t pmu_num;
1592 
1593     visit_type_uint32(v, name, &value, errp);
1594 
1595     if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) {
1596         cpu_set_prop_err(cpu, name, errp);
1597         error_append_hint(errp, "Current '%s' val: %x\n",
1598                           name, cpu->cfg.pmu_mask);
1599         return;
1600     }
1601 
1602     pmu_num = ctpop32(value);
1603 
1604     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1605         error_setg(errp, "Number of counters exceeds maximum available");
1606         return;
1607     }
1608 
1609     cpu_option_add_user_setting(name, value);
1610     cpu->cfg.pmu_mask = value;
1611 }
1612 
1613 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name,
1614                              void *opaque, Error **errp)
1615 {
1616     uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask;
1617 
1618     visit_type_uint8(v, name, &pmu_mask, errp);
1619 }
1620 
1621 static const PropertyInfo prop_pmu_mask = {
1622     .name = "pmu-mask",
1623     .get = prop_pmu_mask_get,
1624     .set = prop_pmu_mask_set,
1625 };
1626 
1627 static void prop_mmu_set(Object *obj, Visitor *v, const char *name,
1628                          void *opaque, Error **errp)
1629 {
1630     RISCVCPU *cpu = RISCV_CPU(obj);
1631     bool value;
1632 
1633     visit_type_bool(v, name, &value, errp);
1634 
1635     if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) {
1636         cpu_set_prop_err(cpu, "mmu", errp);
1637         return;
1638     }
1639 
1640     cpu_option_add_user_setting(name, value);
1641     cpu->cfg.mmu = value;
1642 }
1643 
1644 static void prop_mmu_get(Object *obj, Visitor *v, const char *name,
1645                          void *opaque, Error **errp)
1646 {
1647     bool value = RISCV_CPU(obj)->cfg.mmu;
1648 
1649     visit_type_bool(v, name, &value, errp);
1650 }
1651 
1652 static const PropertyInfo prop_mmu = {
1653     .name = "mmu",
1654     .get = prop_mmu_get,
1655     .set = prop_mmu_set,
1656 };
1657 
1658 static void prop_pmp_set(Object *obj, Visitor *v, const char *name,
1659                          void *opaque, Error **errp)
1660 {
1661     RISCVCPU *cpu = RISCV_CPU(obj);
1662     bool value;
1663 
1664     visit_type_bool(v, name, &value, errp);
1665 
1666     if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) {
1667         cpu_set_prop_err(cpu, name, errp);
1668         return;
1669     }
1670 
1671     cpu_option_add_user_setting(name, value);
1672     cpu->cfg.pmp = value;
1673 }
1674 
1675 static void prop_pmp_get(Object *obj, Visitor *v, const char *name,
1676                          void *opaque, Error **errp)
1677 {
1678     bool value = RISCV_CPU(obj)->cfg.pmp;
1679 
1680     visit_type_bool(v, name, &value, errp);
1681 }
1682 
1683 static const PropertyInfo prop_pmp = {
1684     .name = "pmp",
1685     .get = prop_pmp_get,
1686     .set = prop_pmp_set,
1687 };
1688 
1689 Property riscv_cpu_options[] = {
1690     DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec),
1691     DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec),
1692 
1693     DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128),
1694     DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64),
1695 
1696     DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64),
1697     DEFINE_PROP_UINT16("cbop_blocksize", RISCVCPU, cfg.cbop_blocksize, 64),
1698     DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64),
1699 
1700     DEFINE_PROP_END_OF_LIST(),
1701 };
1702 
1703 /*
1704  * RVA22U64 defines some 'named features' or 'synthetic extensions'
1705  * that are cache related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa
1706  * and Zicclsm. We do not implement caching in QEMU so we'll consider
1707  * all these named features as always enabled.
1708  *
1709  * There's no riscv,isa update for them (nor for zic64b, despite it
1710  * having a cfg offset) at this moment.
1711  */
1712 static RISCVCPUProfile RVA22U64 = {
1713     .parent = NULL,
1714     .name = "rva22u64",
1715     .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU,
1716     .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
1717     .satp_mode = RISCV_PROFILE_ATTR_UNUSED,
1718     .ext_offsets = {
1719         CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause),
1720         CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb),
1721         CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin),
1722         CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr),
1723         CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom),
1724         CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz),
1725 
1726         /* mandatory named features for this profile */
1727         CPU_CFG_OFFSET(zic64b),
1728 
1729         RISCV_PROFILE_EXT_LIST_END
1730     }
1731 };
1732 
1733 /*
1734  * As with RVA22U64, RVA22S64 also defines 'named features'.
1735  *
1736  * Cache related features that we consider enabled since we don't
1737  * implement cache: Ssccptr
1738  *
1739  * Other named features that we already implement: Sstvecd, Sstvala,
1740  * Sscounterenw
1741  *
1742  * Named features that we need to enable: svade
1743  *
1744  * The remaining features/extensions comes from RVA22U64.
1745  */
1746 static RISCVCPUProfile RVA22S64 = {
1747     .parent = &RVA22U64,
1748     .name = "rva22s64",
1749     .misa_ext = RVS,
1750     .priv_spec = PRIV_VERSION_1_12_0,
1751     .satp_mode = VM_1_10_SV39,
1752     .ext_offsets = {
1753         /* rva22s64 exts */
1754         CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt),
1755         CPU_CFG_OFFSET(ext_svinval),
1756 
1757         /* rva22s64 named features */
1758         CPU_CFG_OFFSET(svade),
1759 
1760         RISCV_PROFILE_EXT_LIST_END
1761     }
1762 };
1763 
1764 RISCVCPUProfile *riscv_profiles[] = {
1765     &RVA22U64,
1766     &RVA22S64,
1767     NULL,
1768 };
1769 
1770 static Property riscv_cpu_properties[] = {
1771     DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
1772 
1773     {.name = "pmu-mask", .info = &prop_pmu_mask},
1774     {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */
1775 
1776     {.name = "mmu", .info = &prop_mmu},
1777     {.name = "pmp", .info = &prop_pmp},
1778 
1779 #ifndef CONFIG_USER_ONLY
1780     DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
1781 #endif
1782 
1783     DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
1784 
1785     DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
1786     DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
1787 
1788     /*
1789      * write_misa() is marked as experimental for now so mark
1790      * it with -x and default to 'false'.
1791      */
1792     DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
1793     DEFINE_PROP_END_OF_LIST(),
1794 };
1795 
1796 #if defined(TARGET_RISCV64)
1797 static void rva22u64_profile_cpu_init(Object *obj)
1798 {
1799     rv64i_bare_cpu_init(obj);
1800 
1801     RVA22U64.enabled = true;
1802 }
1803 
1804 static void rva22s64_profile_cpu_init(Object *obj)
1805 {
1806     rv64i_bare_cpu_init(obj);
1807 
1808     RVA22S64.enabled = true;
1809 }
1810 #endif
1811 
1812 static const gchar *riscv_gdb_arch_name(CPUState *cs)
1813 {
1814     RISCVCPU *cpu = RISCV_CPU(cs);
1815     CPURISCVState *env = &cpu->env;
1816 
1817     switch (riscv_cpu_mxl(env)) {
1818     case MXL_RV32:
1819         return "riscv:rv32";
1820     case MXL_RV64:
1821     case MXL_RV128:
1822         return "riscv:rv64";
1823     default:
1824         g_assert_not_reached();
1825     }
1826 }
1827 
1828 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
1829 {
1830     RISCVCPU *cpu = RISCV_CPU(cs);
1831 
1832     if (strcmp(xmlname, "riscv-csr.xml") == 0) {
1833         return cpu->dyn_csr_xml;
1834     } else if (strcmp(xmlname, "riscv-vector.xml") == 0) {
1835         return cpu->dyn_vreg_xml;
1836     }
1837 
1838     return NULL;
1839 }
1840 
1841 #ifndef CONFIG_USER_ONLY
1842 static int64_t riscv_get_arch_id(CPUState *cs)
1843 {
1844     RISCVCPU *cpu = RISCV_CPU(cs);
1845 
1846     return cpu->env.mhartid;
1847 }
1848 
1849 #include "hw/core/sysemu-cpu-ops.h"
1850 
1851 static const struct SysemuCPUOps riscv_sysemu_ops = {
1852     .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
1853     .write_elf64_note = riscv_cpu_write_elf64_note,
1854     .write_elf32_note = riscv_cpu_write_elf32_note,
1855     .legacy_vmsd = &vmstate_riscv_cpu,
1856 };
1857 #endif
1858 
1859 static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name,
1860                               void *opaque, Error **errp)
1861 {
1862     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
1863     RISCVCPU *cpu = RISCV_CPU(obj);
1864     uint32_t prev_val = cpu->cfg.mvendorid;
1865     uint32_t value;
1866 
1867     if (!visit_type_uint32(v, name, &value, errp)) {
1868         return;
1869     }
1870 
1871     if (!dynamic_cpu && prev_val != value) {
1872         error_setg(errp, "Unable to change %s mvendorid (0x%x)",
1873                    object_get_typename(obj), prev_val);
1874         return;
1875     }
1876 
1877     cpu->cfg.mvendorid = value;
1878 }
1879 
1880 static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name,
1881                               void *opaque, Error **errp)
1882 {
1883     uint32_t value = RISCV_CPU(obj)->cfg.mvendorid;
1884 
1885     visit_type_uint32(v, name, &value, errp);
1886 }
1887 
1888 static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name,
1889                            void *opaque, Error **errp)
1890 {
1891     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
1892     RISCVCPU *cpu = RISCV_CPU(obj);
1893     uint64_t prev_val = cpu->cfg.mimpid;
1894     uint64_t value;
1895 
1896     if (!visit_type_uint64(v, name, &value, errp)) {
1897         return;
1898     }
1899 
1900     if (!dynamic_cpu && prev_val != value) {
1901         error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")",
1902                    object_get_typename(obj), prev_val);
1903         return;
1904     }
1905 
1906     cpu->cfg.mimpid = value;
1907 }
1908 
1909 static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name,
1910                            void *opaque, Error **errp)
1911 {
1912     uint64_t value = RISCV_CPU(obj)->cfg.mimpid;
1913 
1914     visit_type_uint64(v, name, &value, errp);
1915 }
1916 
1917 static void cpu_set_marchid(Object *obj, Visitor *v, const char *name,
1918                             void *opaque, Error **errp)
1919 {
1920     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
1921     RISCVCPU *cpu = RISCV_CPU(obj);
1922     uint64_t prev_val = cpu->cfg.marchid;
1923     uint64_t value, invalid_val;
1924     uint32_t mxlen = 0;
1925 
1926     if (!visit_type_uint64(v, name, &value, errp)) {
1927         return;
1928     }
1929 
1930     if (!dynamic_cpu && prev_val != value) {
1931         error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")",
1932                    object_get_typename(obj), prev_val);
1933         return;
1934     }
1935 
1936     switch (riscv_cpu_mxl(&cpu->env)) {
1937     case MXL_RV32:
1938         mxlen = 32;
1939         break;
1940     case MXL_RV64:
1941     case MXL_RV128:
1942         mxlen = 64;
1943         break;
1944     default:
1945         g_assert_not_reached();
1946     }
1947 
1948     invalid_val = 1LL << (mxlen - 1);
1949 
1950     if (value == invalid_val) {
1951         error_setg(errp, "Unable to set marchid with MSB (%u) bit set "
1952                          "and the remaining bits zero", mxlen);
1953         return;
1954     }
1955 
1956     cpu->cfg.marchid = value;
1957 }
1958 
1959 static void cpu_get_marchid(Object *obj, Visitor *v, const char *name,
1960                            void *opaque, Error **errp)
1961 {
1962     uint64_t value = RISCV_CPU(obj)->cfg.marchid;
1963 
1964     visit_type_uint64(v, name, &value, errp);
1965 }
1966 
1967 static void riscv_cpu_class_init(ObjectClass *c, void *data)
1968 {
1969     RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
1970     CPUClass *cc = CPU_CLASS(c);
1971     DeviceClass *dc = DEVICE_CLASS(c);
1972     ResettableClass *rc = RESETTABLE_CLASS(c);
1973 
1974     device_class_set_parent_realize(dc, riscv_cpu_realize,
1975                                     &mcc->parent_realize);
1976 
1977     resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL,
1978                                        &mcc->parent_phases);
1979 
1980     cc->class_by_name = riscv_cpu_class_by_name;
1981     cc->has_work = riscv_cpu_has_work;
1982     cc->mmu_index = riscv_cpu_mmu_index;
1983     cc->dump_state = riscv_cpu_dump_state;
1984     cc->set_pc = riscv_cpu_set_pc;
1985     cc->get_pc = riscv_cpu_get_pc;
1986     cc->gdb_read_register = riscv_cpu_gdb_read_register;
1987     cc->gdb_write_register = riscv_cpu_gdb_write_register;
1988     cc->gdb_num_core_regs = 33;
1989     cc->gdb_stop_before_watchpoint = true;
1990     cc->disas_set_info = riscv_cpu_disas_set_info;
1991 #ifndef CONFIG_USER_ONLY
1992     cc->sysemu_ops = &riscv_sysemu_ops;
1993     cc->get_arch_id = riscv_get_arch_id;
1994 #endif
1995     cc->gdb_arch_name = riscv_gdb_arch_name;
1996     cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml;
1997 
1998     object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid,
1999                               cpu_set_mvendorid, NULL, NULL);
2000 
2001     object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid,
2002                               cpu_set_mimpid, NULL, NULL);
2003 
2004     object_class_property_add(c, "marchid", "uint64", cpu_get_marchid,
2005                               cpu_set_marchid, NULL, NULL);
2006 
2007     device_class_set_props(dc, riscv_cpu_properties);
2008 }
2009 
2010 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
2011                                  int max_str_len)
2012 {
2013     const RISCVIsaExtData *edata;
2014     char *old = *isa_str;
2015     char *new = *isa_str;
2016 
2017     for (edata = isa_edata_arr; edata && edata->name; edata++) {
2018         if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2019             new = g_strconcat(old, "_", edata->name, NULL);
2020             g_free(old);
2021             old = new;
2022         }
2023     }
2024 
2025     *isa_str = new;
2026 }
2027 
2028 char *riscv_isa_string(RISCVCPU *cpu)
2029 {
2030     int i;
2031     const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
2032     char *isa_str = g_new(char, maxlen);
2033     char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS);
2034     for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2035         if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2036             *p++ = qemu_tolower(riscv_single_letter_exts[i]);
2037         }
2038     }
2039     *p = '\0';
2040     if (!cpu->cfg.short_isa_string) {
2041         riscv_isa_string_ext(cpu, &isa_str, maxlen);
2042     }
2043     return isa_str;
2044 }
2045 
2046 #define DEFINE_CPU(type_name, initfn)      \
2047     {                                      \
2048         .name = type_name,                 \
2049         .parent = TYPE_RISCV_CPU,          \
2050         .instance_init = initfn            \
2051     }
2052 
2053 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \
2054     {                                         \
2055         .name = type_name,                    \
2056         .parent = TYPE_RISCV_DYNAMIC_CPU,     \
2057         .instance_init = initfn               \
2058     }
2059 
2060 #define DEFINE_VENDOR_CPU(type_name, initfn) \
2061     {                                        \
2062         .name = type_name,                   \
2063         .parent = TYPE_RISCV_VENDOR_CPU,     \
2064         .instance_init = initfn              \
2065     }
2066 
2067 #define DEFINE_BARE_CPU(type_name, initfn) \
2068     {                                      \
2069         .name = type_name,                 \
2070         .parent = TYPE_RISCV_BARE_CPU,     \
2071         .instance_init = initfn            \
2072     }
2073 
2074 #define DEFINE_PROFILE_CPU(type_name, initfn) \
2075     {                                         \
2076         .name = type_name,                    \
2077         .parent = TYPE_RISCV_BARE_CPU,        \
2078         .instance_init = initfn               \
2079     }
2080 
2081 static const TypeInfo riscv_cpu_type_infos[] = {
2082     {
2083         .name = TYPE_RISCV_CPU,
2084         .parent = TYPE_CPU,
2085         .instance_size = sizeof(RISCVCPU),
2086         .instance_align = __alignof(RISCVCPU),
2087         .instance_init = riscv_cpu_init,
2088         .instance_post_init = riscv_cpu_post_init,
2089         .abstract = true,
2090         .class_size = sizeof(RISCVCPUClass),
2091         .class_init = riscv_cpu_class_init,
2092     },
2093     {
2094         .name = TYPE_RISCV_DYNAMIC_CPU,
2095         .parent = TYPE_RISCV_CPU,
2096         .abstract = true,
2097     },
2098     {
2099         .name = TYPE_RISCV_VENDOR_CPU,
2100         .parent = TYPE_RISCV_CPU,
2101         .abstract = true,
2102     },
2103     {
2104         .name = TYPE_RISCV_BARE_CPU,
2105         .parent = TYPE_RISCV_CPU,
2106         .abstract = true,
2107     },
2108     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY,      riscv_any_cpu_init),
2109     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX,      riscv_max_cpu_init),
2110 #if defined(TARGET_RISCV32)
2111     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32,   rv32_base_cpu_init),
2112     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX,        rv32_ibex_cpu_init),
2113     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31,  rv32_sifive_e_cpu_init),
2114     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34,  rv32_imafcu_nommu_cpu_init),
2115     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34,  rv32_sifive_u_cpu_init),
2116 #elif defined(TARGET_RISCV64)
2117     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64,   rv64_base_cpu_init),
2118     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51,  rv64_sifive_e_cpu_init),
2119     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54,  rv64_sifive_u_cpu_init),
2120     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C,    rv64_sifive_u_cpu_init),
2121     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906,  rv64_thead_c906_cpu_init),
2122     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1,   rv64_veyron_v1_cpu_init),
2123     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128,  rv128_base_cpu_init),
2124     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, rv64i_bare_cpu_init),
2125     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, rva22u64_profile_cpu_init),
2126     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, rva22s64_profile_cpu_init),
2127 #endif
2128 };
2129 
2130 DEFINE_TYPES(riscv_cpu_type_infos)
2131