xref: /openbmc/qemu/target/riscv/cpu.c (revision d06f28db)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "cpu_vendorid.h"
26 #include "internals.h"
27 #include "exec/exec-all.h"
28 #include "qapi/error.h"
29 #include "qapi/visitor.h"
30 #include "qemu/error-report.h"
31 #include "hw/qdev-properties.h"
32 #include "migration/vmstate.h"
33 #include "fpu/softfloat-helpers.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/tcg.h"
36 #include "kvm/kvm_riscv.h"
37 #include "tcg/tcg-cpu.h"
38 #include "tcg/tcg.h"
39 
40 /* RISC-V CPU definitions */
41 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH";
42 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
43                               RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0};
44 
45 /*
46  * From vector_helper.c
47  * Note that vector data is stored in host-endian 64-bit chunks,
48  * so addressing bytes needs a host-endian fixup.
49  */
50 #if HOST_BIG_ENDIAN
51 #define BYTE(x)   ((x) ^ 7)
52 #else
53 #define BYTE(x)   (x)
54 #endif
55 
56 bool riscv_cpu_is_32bit(RISCVCPU *cpu)
57 {
58     return riscv_cpu_mxl(&cpu->env) == MXL_RV32;
59 }
60 
61 /* Hash that stores general user set numeric options */
62 static GHashTable *general_user_opts;
63 
64 static void cpu_option_add_user_setting(const char *optname, uint32_t value)
65 {
66     g_hash_table_insert(general_user_opts, (gpointer)optname,
67                         GUINT_TO_POINTER(value));
68 }
69 
70 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
71     {#_name, _min_ver, CPU_CFG_OFFSET(_prop)}
72 
73 /*
74  * Here are the ordering rules of extension naming defined by RISC-V
75  * specification :
76  * 1. All extensions should be separated from other multi-letter extensions
77  *    by an underscore.
78  * 2. The first letter following the 'Z' conventionally indicates the most
79  *    closely related alphabetical extension category, IMAFDQLCBKJTPVH.
80  *    If multiple 'Z' extensions are named, they should be ordered first
81  *    by category, then alphabetically within a category.
82  * 3. Standard supervisor-level extensions (starts with 'S') should be
83  *    listed after standard unprivileged extensions.  If multiple
84  *    supervisor-level extensions are listed, they should be ordered
85  *    alphabetically.
86  * 4. Non-standard extensions (starts with 'X') must be listed after all
87  *    standard extensions. They must be separated from other multi-letter
88  *    extensions by an underscore.
89  *
90  * Single letter extensions are checked in riscv_cpu_validate_misa_priv()
91  * instead.
92  */
93 const RISCVIsaExtData isa_edata_arr[] = {
94     ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom),
95     ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop),
96     ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz),
97     ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
98     ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr),
99     ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr),
100     ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei),
101     ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl),
102     ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
103     ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm),
104     ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
105     ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
106     ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
107     ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
108     ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
109     ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh),
110     ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin),
111     ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx),
112     ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx),
113     ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca),
114     ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb),
115     ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf),
116     ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd),
117     ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce),
118     ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp),
119     ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt),
120     ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba),
121     ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb),
122     ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc),
123     ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb),
124     ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc),
125     ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx),
126     ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs),
127     ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk),
128     ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn),
129     ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd),
130     ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne),
131     ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh),
132     ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr),
133     ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks),
134     ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
135     ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
136     ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
137     ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb),
138     ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
139     ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
140     ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
141     ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
142     ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin),
143     ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
144     ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
145     ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
146     ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb),
147     ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg),
148     ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn),
149     ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc),
150     ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
151     ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng),
152     ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
153     ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
154     ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks),
155     ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc),
156     ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed),
157     ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg),
158     ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
159     ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt),
160     ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
161     ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
162     ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
163     ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
164     ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
165     ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
166     ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
167     ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
168     ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
169     ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
170     ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
171     ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt),
172     ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba),
173     ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb),
174     ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs),
175     ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo),
176     ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov),
177     ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx),
178     ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv),
179     ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac),
180     ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx),
181     ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair),
182     ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync),
183     ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
184 
185     DEFINE_PROP_END_OF_LIST(),
186 };
187 
188 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset)
189 {
190     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
191 
192     return *ext_enabled;
193 }
194 
195 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en)
196 {
197     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
198 
199     *ext_enabled = en;
200 }
201 
202 bool riscv_cpu_is_vendor(Object *cpu_obj)
203 {
204     return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL;
205 }
206 
207 const char * const riscv_int_regnames[] = {
208     "x0/zero", "x1/ra",  "x2/sp",  "x3/gp",  "x4/tp",  "x5/t0",   "x6/t1",
209     "x7/t2",   "x8/s0",  "x9/s1",  "x10/a0", "x11/a1", "x12/a2",  "x13/a3",
210     "x14/a4",  "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3",  "x20/s4",
211     "x21/s5",  "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
212     "x28/t3",  "x29/t4", "x30/t5", "x31/t6"
213 };
214 
215 const char * const riscv_int_regnamesh[] = {
216     "x0h/zeroh", "x1h/rah",  "x2h/sph",   "x3h/gph",   "x4h/tph",  "x5h/t0h",
217     "x6h/t1h",   "x7h/t2h",  "x8h/s0h",   "x9h/s1h",   "x10h/a0h", "x11h/a1h",
218     "x12h/a2h",  "x13h/a3h", "x14h/a4h",  "x15h/a5h",  "x16h/a6h", "x17h/a7h",
219     "x18h/s2h",  "x19h/s3h", "x20h/s4h",  "x21h/s5h",  "x22h/s6h", "x23h/s7h",
220     "x24h/s8h",  "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
221     "x30h/t5h",  "x31h/t6h"
222 };
223 
224 const char * const riscv_fpr_regnames[] = {
225     "f0/ft0",   "f1/ft1",  "f2/ft2",   "f3/ft3",   "f4/ft4",  "f5/ft5",
226     "f6/ft6",   "f7/ft7",  "f8/fs0",   "f9/fs1",   "f10/fa0", "f11/fa1",
227     "f12/fa2",  "f13/fa3", "f14/fa4",  "f15/fa5",  "f16/fa6", "f17/fa7",
228     "f18/fs2",  "f19/fs3", "f20/fs4",  "f21/fs5",  "f22/fs6", "f23/fs7",
229     "f24/fs8",  "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
230     "f30/ft10", "f31/ft11"
231 };
232 
233 const char * const riscv_rvv_regnames[] = {
234   "v0",  "v1",  "v2",  "v3",  "v4",  "v5",  "v6",
235   "v7",  "v8",  "v9",  "v10", "v11", "v12", "v13",
236   "v14", "v15", "v16", "v17", "v18", "v19", "v20",
237   "v21", "v22", "v23", "v24", "v25", "v26", "v27",
238   "v28", "v29", "v30", "v31"
239 };
240 
241 static const char * const riscv_excp_names[] = {
242     "misaligned_fetch",
243     "fault_fetch",
244     "illegal_instruction",
245     "breakpoint",
246     "misaligned_load",
247     "fault_load",
248     "misaligned_store",
249     "fault_store",
250     "user_ecall",
251     "supervisor_ecall",
252     "hypervisor_ecall",
253     "machine_ecall",
254     "exec_page_fault",
255     "load_page_fault",
256     "reserved",
257     "store_page_fault",
258     "reserved",
259     "reserved",
260     "reserved",
261     "reserved",
262     "guest_exec_page_fault",
263     "guest_load_page_fault",
264     "reserved",
265     "guest_store_page_fault",
266 };
267 
268 static const char * const riscv_intr_names[] = {
269     "u_software",
270     "s_software",
271     "vs_software",
272     "m_software",
273     "u_timer",
274     "s_timer",
275     "vs_timer",
276     "m_timer",
277     "u_external",
278     "s_external",
279     "vs_external",
280     "m_external",
281     "reserved",
282     "reserved",
283     "reserved",
284     "reserved"
285 };
286 
287 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
288 {
289     if (async) {
290         return (cause < ARRAY_SIZE(riscv_intr_names)) ?
291                riscv_intr_names[cause] : "(unknown)";
292     } else {
293         return (cause < ARRAY_SIZE(riscv_excp_names)) ?
294                riscv_excp_names[cause] : "(unknown)";
295     }
296 }
297 
298 void riscv_cpu_set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext)
299 {
300     env->misa_mxl_max = env->misa_mxl = mxl;
301     env->misa_ext_mask = env->misa_ext = ext;
302 }
303 
304 #ifndef CONFIG_USER_ONLY
305 static uint8_t satp_mode_from_str(const char *satp_mode_str)
306 {
307     if (!strncmp(satp_mode_str, "mbare", 5)) {
308         return VM_1_10_MBARE;
309     }
310 
311     if (!strncmp(satp_mode_str, "sv32", 4)) {
312         return VM_1_10_SV32;
313     }
314 
315     if (!strncmp(satp_mode_str, "sv39", 4)) {
316         return VM_1_10_SV39;
317     }
318 
319     if (!strncmp(satp_mode_str, "sv48", 4)) {
320         return VM_1_10_SV48;
321     }
322 
323     if (!strncmp(satp_mode_str, "sv57", 4)) {
324         return VM_1_10_SV57;
325     }
326 
327     if (!strncmp(satp_mode_str, "sv64", 4)) {
328         return VM_1_10_SV64;
329     }
330 
331     g_assert_not_reached();
332 }
333 
334 uint8_t satp_mode_max_from_map(uint32_t map)
335 {
336     /*
337      * 'map = 0' will make us return (31 - 32), which C will
338      * happily overflow to UINT_MAX. There's no good result to
339      * return if 'map = 0' (e.g. returning 0 will be ambiguous
340      * with the result for 'map = 1').
341      *
342      * Assert out if map = 0. Callers will have to deal with
343      * it outside of this function.
344      */
345     g_assert(map > 0);
346 
347     /* map here has at least one bit set, so no problem with clz */
348     return 31 - __builtin_clz(map);
349 }
350 
351 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
352 {
353     if (is_32_bit) {
354         switch (satp_mode) {
355         case VM_1_10_SV32:
356             return "sv32";
357         case VM_1_10_MBARE:
358             return "none";
359         }
360     } else {
361         switch (satp_mode) {
362         case VM_1_10_SV64:
363             return "sv64";
364         case VM_1_10_SV57:
365             return "sv57";
366         case VM_1_10_SV48:
367             return "sv48";
368         case VM_1_10_SV39:
369             return "sv39";
370         case VM_1_10_MBARE:
371             return "none";
372         }
373     }
374 
375     g_assert_not_reached();
376 }
377 
378 static void set_satp_mode_max_supported(RISCVCPU *cpu,
379                                         uint8_t satp_mode)
380 {
381     bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
382     const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
383 
384     for (int i = 0; i <= satp_mode; ++i) {
385         if (valid_vm[i]) {
386             cpu->cfg.satp_mode.supported |= (1 << i);
387         }
388     }
389 }
390 
391 /* Set the satp mode to the max supported */
392 static void set_satp_mode_default_map(RISCVCPU *cpu)
393 {
394     /*
395      * Bare CPUs do not default to the max available.
396      * Users must set a valid satp_mode in the command
397      * line.
398      */
399     if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) {
400         warn_report("No satp mode set. Defaulting to 'bare'");
401         cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE);
402         return;
403     }
404 
405     cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
406 }
407 #endif
408 
409 static void riscv_any_cpu_init(Object *obj)
410 {
411     RISCVCPU *cpu = RISCV_CPU(obj);
412     CPURISCVState *env = &cpu->env;
413 #if defined(TARGET_RISCV32)
414     riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
415 #elif defined(TARGET_RISCV64)
416     riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
417 #endif
418 
419 #ifndef CONFIG_USER_ONLY
420     set_satp_mode_max_supported(RISCV_CPU(obj),
421         riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ?
422         VM_1_10_SV32 : VM_1_10_SV57);
423 #endif
424 
425     env->priv_ver = PRIV_VERSION_LATEST;
426 
427     /* inherited from parent obj via riscv_cpu_init() */
428     cpu->cfg.ext_zifencei = true;
429     cpu->cfg.ext_zicsr = true;
430     cpu->cfg.mmu = true;
431     cpu->cfg.pmp = true;
432 }
433 
434 static void riscv_max_cpu_init(Object *obj)
435 {
436     RISCVCPU *cpu = RISCV_CPU(obj);
437     CPURISCVState *env = &cpu->env;
438     RISCVMXL mlx = MXL_RV64;
439 
440     cpu->cfg.mmu = true;
441 
442 #ifdef TARGET_RISCV32
443     mlx = MXL_RV32;
444 #endif
445     riscv_cpu_set_misa(env, mlx, 0);
446     env->priv_ver = PRIV_VERSION_LATEST;
447 #ifndef CONFIG_USER_ONLY
448     set_satp_mode_max_supported(RISCV_CPU(obj), mlx == MXL_RV32 ?
449                                 VM_1_10_SV32 : VM_1_10_SV57);
450 #endif
451 }
452 
453 #if defined(TARGET_RISCV64)
454 static void rv64_base_cpu_init(Object *obj)
455 {
456     RISCVCPU *cpu = RISCV_CPU(obj);
457     CPURISCVState *env = &cpu->env;
458 
459     cpu->cfg.mmu = true;
460 
461     /* We set this in the realise function */
462     riscv_cpu_set_misa(env, MXL_RV64, 0);
463     /* Set latest version of privileged specification */
464     env->priv_ver = PRIV_VERSION_LATEST;
465 #ifndef CONFIG_USER_ONLY
466     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
467 #endif
468 }
469 
470 static void rv64_sifive_u_cpu_init(Object *obj)
471 {
472     RISCVCPU *cpu = RISCV_CPU(obj);
473     CPURISCVState *env = &cpu->env;
474     riscv_cpu_set_misa(env, MXL_RV64,
475                        RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
476     env->priv_ver = PRIV_VERSION_1_10_0;
477 #ifndef CONFIG_USER_ONLY
478     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39);
479 #endif
480 
481     /* inherited from parent obj via riscv_cpu_init() */
482     cpu->cfg.ext_zifencei = true;
483     cpu->cfg.ext_zicsr = true;
484     cpu->cfg.mmu = true;
485     cpu->cfg.pmp = true;
486 }
487 
488 static void rv64_sifive_e_cpu_init(Object *obj)
489 {
490     CPURISCVState *env = &RISCV_CPU(obj)->env;
491     RISCVCPU *cpu = RISCV_CPU(obj);
492 
493     riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU);
494     env->priv_ver = PRIV_VERSION_1_10_0;
495 #ifndef CONFIG_USER_ONLY
496     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
497 #endif
498 
499     /* inherited from parent obj via riscv_cpu_init() */
500     cpu->cfg.ext_zifencei = true;
501     cpu->cfg.ext_zicsr = true;
502     cpu->cfg.pmp = true;
503 }
504 
505 static void rv64_thead_c906_cpu_init(Object *obj)
506 {
507     CPURISCVState *env = &RISCV_CPU(obj)->env;
508     RISCVCPU *cpu = RISCV_CPU(obj);
509 
510     riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU);
511     env->priv_ver = PRIV_VERSION_1_11_0;
512 
513     cpu->cfg.ext_zfa = true;
514     cpu->cfg.ext_zfh = true;
515     cpu->cfg.mmu = true;
516     cpu->cfg.ext_xtheadba = true;
517     cpu->cfg.ext_xtheadbb = true;
518     cpu->cfg.ext_xtheadbs = true;
519     cpu->cfg.ext_xtheadcmo = true;
520     cpu->cfg.ext_xtheadcondmov = true;
521     cpu->cfg.ext_xtheadfmemidx = true;
522     cpu->cfg.ext_xtheadmac = true;
523     cpu->cfg.ext_xtheadmemidx = true;
524     cpu->cfg.ext_xtheadmempair = true;
525     cpu->cfg.ext_xtheadsync = true;
526 
527     cpu->cfg.mvendorid = THEAD_VENDOR_ID;
528 #ifndef CONFIG_USER_ONLY
529     set_satp_mode_max_supported(cpu, VM_1_10_SV39);
530 #endif
531 
532     /* inherited from parent obj via riscv_cpu_init() */
533     cpu->cfg.pmp = true;
534 }
535 
536 static void rv64_veyron_v1_cpu_init(Object *obj)
537 {
538     CPURISCVState *env = &RISCV_CPU(obj)->env;
539     RISCVCPU *cpu = RISCV_CPU(obj);
540 
541     riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH);
542     env->priv_ver = PRIV_VERSION_1_12_0;
543 
544     /* Enable ISA extensions */
545     cpu->cfg.mmu = true;
546     cpu->cfg.ext_zifencei = true;
547     cpu->cfg.ext_zicsr = true;
548     cpu->cfg.pmp = true;
549     cpu->cfg.ext_zicbom = true;
550     cpu->cfg.cbom_blocksize = 64;
551     cpu->cfg.cboz_blocksize = 64;
552     cpu->cfg.ext_zicboz = true;
553     cpu->cfg.ext_smaia = true;
554     cpu->cfg.ext_ssaia = true;
555     cpu->cfg.ext_sscofpmf = true;
556     cpu->cfg.ext_sstc = true;
557     cpu->cfg.ext_svinval = true;
558     cpu->cfg.ext_svnapot = true;
559     cpu->cfg.ext_svpbmt = true;
560     cpu->cfg.ext_smstateen = true;
561     cpu->cfg.ext_zba = true;
562     cpu->cfg.ext_zbb = true;
563     cpu->cfg.ext_zbc = true;
564     cpu->cfg.ext_zbs = true;
565     cpu->cfg.ext_XVentanaCondOps = true;
566 
567     cpu->cfg.mvendorid = VEYRON_V1_MVENDORID;
568     cpu->cfg.marchid = VEYRON_V1_MARCHID;
569     cpu->cfg.mimpid = VEYRON_V1_MIMPID;
570 
571 #ifndef CONFIG_USER_ONLY
572     set_satp_mode_max_supported(cpu, VM_1_10_SV48);
573 #endif
574 }
575 
576 static void rv128_base_cpu_init(Object *obj)
577 {
578     RISCVCPU *cpu = RISCV_CPU(obj);
579     CPURISCVState *env = &cpu->env;
580 
581     if (qemu_tcg_mttcg_enabled()) {
582         /* Missing 128-bit aligned atomics */
583         error_report("128-bit RISC-V currently does not work with Multi "
584                      "Threaded TCG. Please use: -accel tcg,thread=single");
585         exit(EXIT_FAILURE);
586     }
587 
588     cpu->cfg.mmu = true;
589 
590     /* We set this in the realise function */
591     riscv_cpu_set_misa(env, MXL_RV128, 0);
592     /* Set latest version of privileged specification */
593     env->priv_ver = PRIV_VERSION_LATEST;
594 #ifndef CONFIG_USER_ONLY
595     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
596 #endif
597 }
598 
599 static void rv64i_bare_cpu_init(Object *obj)
600 {
601     CPURISCVState *env = &RISCV_CPU(obj)->env;
602     riscv_cpu_set_misa(env, MXL_RV64, RVI);
603 
604     /* Remove the defaults from the parent class */
605     RISCV_CPU(obj)->cfg.ext_zicntr = false;
606     RISCV_CPU(obj)->cfg.ext_zihpm = false;
607 
608     /* Set to QEMU's first supported priv version */
609     env->priv_ver = PRIV_VERSION_1_10_0;
610 
611     /*
612      * Support all available satp_mode settings. The default
613      * value will be set to MBARE if the user doesn't set
614      * satp_mode manually (see set_satp_mode_default()).
615      */
616 #ifndef CONFIG_USER_ONLY
617     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV64);
618 #endif
619 }
620 #else
621 static void rv32_base_cpu_init(Object *obj)
622 {
623     RISCVCPU *cpu = RISCV_CPU(obj);
624     CPURISCVState *env = &cpu->env;
625 
626     cpu->cfg.mmu = true;
627 
628     /* We set this in the realise function */
629     riscv_cpu_set_misa(env, MXL_RV32, 0);
630     /* Set latest version of privileged specification */
631     env->priv_ver = PRIV_VERSION_LATEST;
632 #ifndef CONFIG_USER_ONLY
633     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
634 #endif
635 }
636 
637 static void rv32_sifive_u_cpu_init(Object *obj)
638 {
639     RISCVCPU *cpu = RISCV_CPU(obj);
640     CPURISCVState *env = &cpu->env;
641     riscv_cpu_set_misa(env, MXL_RV32,
642                        RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
643     env->priv_ver = PRIV_VERSION_1_10_0;
644 #ifndef CONFIG_USER_ONLY
645     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
646 #endif
647 
648     /* inherited from parent obj via riscv_cpu_init() */
649     cpu->cfg.ext_zifencei = true;
650     cpu->cfg.ext_zicsr = true;
651     cpu->cfg.mmu = true;
652     cpu->cfg.pmp = true;
653 }
654 
655 static void rv32_sifive_e_cpu_init(Object *obj)
656 {
657     CPURISCVState *env = &RISCV_CPU(obj)->env;
658     RISCVCPU *cpu = RISCV_CPU(obj);
659 
660     riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU);
661     env->priv_ver = PRIV_VERSION_1_10_0;
662 #ifndef CONFIG_USER_ONLY
663     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
664 #endif
665 
666     /* inherited from parent obj via riscv_cpu_init() */
667     cpu->cfg.ext_zifencei = true;
668     cpu->cfg.ext_zicsr = true;
669     cpu->cfg.pmp = true;
670 }
671 
672 static void rv32_ibex_cpu_init(Object *obj)
673 {
674     CPURISCVState *env = &RISCV_CPU(obj)->env;
675     RISCVCPU *cpu = RISCV_CPU(obj);
676 
677     riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU);
678     env->priv_ver = PRIV_VERSION_1_12_0;
679 #ifndef CONFIG_USER_ONLY
680     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
681 #endif
682     /* inherited from parent obj via riscv_cpu_init() */
683     cpu->cfg.ext_zifencei = true;
684     cpu->cfg.ext_zicsr = true;
685     cpu->cfg.pmp = true;
686     cpu->cfg.ext_smepmp = true;
687 }
688 
689 static void rv32_imafcu_nommu_cpu_init(Object *obj)
690 {
691     CPURISCVState *env = &RISCV_CPU(obj)->env;
692     RISCVCPU *cpu = RISCV_CPU(obj);
693 
694     riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU);
695     env->priv_ver = PRIV_VERSION_1_10_0;
696 #ifndef CONFIG_USER_ONLY
697     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
698 #endif
699 
700     /* inherited from parent obj via riscv_cpu_init() */
701     cpu->cfg.ext_zifencei = true;
702     cpu->cfg.ext_zicsr = true;
703     cpu->cfg.pmp = true;
704 }
705 #endif
706 
707 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
708 {
709     ObjectClass *oc;
710     char *typename;
711     char **cpuname;
712 
713     cpuname = g_strsplit(cpu_model, ",", 1);
714     typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
715     oc = object_class_by_name(typename);
716     g_strfreev(cpuname);
717     g_free(typename);
718 
719     return oc;
720 }
721 
722 char *riscv_cpu_get_name(RISCVCPU *cpu)
723 {
724     RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu);
725     const char *typename = object_class_get_name(OBJECT_CLASS(rcc));
726 
727     g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX));
728 
729     return cpu_model_from_type(typename);
730 }
731 
732 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
733 {
734     RISCVCPU *cpu = RISCV_CPU(cs);
735     CPURISCVState *env = &cpu->env;
736     int i, j;
737     uint8_t *p;
738 
739 #if !defined(CONFIG_USER_ONLY)
740     if (riscv_has_ext(env, RVH)) {
741         qemu_fprintf(f, " %s %d\n", "V      =  ", env->virt_enabled);
742     }
743 #endif
744     qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc      ", env->pc);
745 #ifndef CONFIG_USER_ONLY
746     {
747         static const int dump_csrs[] = {
748             CSR_MHARTID,
749             CSR_MSTATUS,
750             CSR_MSTATUSH,
751             /*
752              * CSR_SSTATUS is intentionally omitted here as its value
753              * can be figured out by looking at CSR_MSTATUS
754              */
755             CSR_HSTATUS,
756             CSR_VSSTATUS,
757             CSR_MIP,
758             CSR_MIE,
759             CSR_MIDELEG,
760             CSR_HIDELEG,
761             CSR_MEDELEG,
762             CSR_HEDELEG,
763             CSR_MTVEC,
764             CSR_STVEC,
765             CSR_VSTVEC,
766             CSR_MEPC,
767             CSR_SEPC,
768             CSR_VSEPC,
769             CSR_MCAUSE,
770             CSR_SCAUSE,
771             CSR_VSCAUSE,
772             CSR_MTVAL,
773             CSR_STVAL,
774             CSR_HTVAL,
775             CSR_MTVAL2,
776             CSR_MSCRATCH,
777             CSR_SSCRATCH,
778             CSR_SATP,
779             CSR_MMTE,
780             CSR_UPMBASE,
781             CSR_UPMMASK,
782             CSR_SPMBASE,
783             CSR_SPMMASK,
784             CSR_MPMBASE,
785             CSR_MPMMASK,
786         };
787 
788         for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
789             int csrno = dump_csrs[i];
790             target_ulong val = 0;
791             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
792 
793             /*
794              * Rely on the smode, hmode, etc, predicates within csr.c
795              * to do the filtering of the registers that are present.
796              */
797             if (res == RISCV_EXCP_NONE) {
798                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
799                              csr_ops[csrno].name, val);
800             }
801         }
802     }
803 #endif
804 
805     for (i = 0; i < 32; i++) {
806         qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
807                      riscv_int_regnames[i], env->gpr[i]);
808         if ((i & 3) == 3) {
809             qemu_fprintf(f, "\n");
810         }
811     }
812     if (flags & CPU_DUMP_FPU) {
813         for (i = 0; i < 32; i++) {
814             qemu_fprintf(f, " %-8s %016" PRIx64,
815                          riscv_fpr_regnames[i], env->fpr[i]);
816             if ((i & 3) == 3) {
817                 qemu_fprintf(f, "\n");
818             }
819         }
820     }
821     if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) {
822         static const int dump_rvv_csrs[] = {
823                     CSR_VSTART,
824                     CSR_VXSAT,
825                     CSR_VXRM,
826                     CSR_VCSR,
827                     CSR_VL,
828                     CSR_VTYPE,
829                     CSR_VLENB,
830                 };
831         for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
832             int csrno = dump_rvv_csrs[i];
833             target_ulong val = 0;
834             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
835 
836             /*
837              * Rely on the smode, hmode, etc, predicates within csr.c
838              * to do the filtering of the registers that are present.
839              */
840             if (res == RISCV_EXCP_NONE) {
841                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
842                              csr_ops[csrno].name, val);
843             }
844         }
845         uint16_t vlenb = cpu->cfg.vlen >> 3;
846 
847         for (i = 0; i < 32; i++) {
848             qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]);
849             p = (uint8_t *)env->vreg;
850             for (j = vlenb - 1 ; j >= 0; j--) {
851                 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j)));
852             }
853             qemu_fprintf(f, "\n");
854         }
855     }
856 }
857 
858 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
859 {
860     RISCVCPU *cpu = RISCV_CPU(cs);
861     CPURISCVState *env = &cpu->env;
862 
863     if (env->xl == MXL_RV32) {
864         env->pc = (int32_t)value;
865     } else {
866         env->pc = value;
867     }
868 }
869 
870 static vaddr riscv_cpu_get_pc(CPUState *cs)
871 {
872     RISCVCPU *cpu = RISCV_CPU(cs);
873     CPURISCVState *env = &cpu->env;
874 
875     /* Match cpu_get_tb_cpu_state. */
876     if (env->xl == MXL_RV32) {
877         return env->pc & UINT32_MAX;
878     }
879     return env->pc;
880 }
881 
882 static bool riscv_cpu_has_work(CPUState *cs)
883 {
884 #ifndef CONFIG_USER_ONLY
885     RISCVCPU *cpu = RISCV_CPU(cs);
886     CPURISCVState *env = &cpu->env;
887     /*
888      * Definition of the WFI instruction requires it to ignore the privilege
889      * mode and delegation registers, but respect individual enables
890      */
891     return riscv_cpu_all_pending(env) != 0 ||
892         riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE ||
893         riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE;
894 #else
895     return true;
896 #endif
897 }
898 
899 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch)
900 {
901     return riscv_env_mmu_index(cpu_env(cs), ifetch);
902 }
903 
904 static void riscv_cpu_reset_hold(Object *obj)
905 {
906 #ifndef CONFIG_USER_ONLY
907     uint8_t iprio;
908     int i, irq, rdzero;
909 #endif
910     CPUState *cs = CPU(obj);
911     RISCVCPU *cpu = RISCV_CPU(cs);
912     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
913     CPURISCVState *env = &cpu->env;
914 
915     if (mcc->parent_phases.hold) {
916         mcc->parent_phases.hold(obj);
917     }
918 #ifndef CONFIG_USER_ONLY
919     env->misa_mxl = env->misa_mxl_max;
920     env->priv = PRV_M;
921     env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
922     if (env->misa_mxl > MXL_RV32) {
923         /*
924          * The reset status of SXL/UXL is undefined, but mstatus is WARL
925          * and we must ensure that the value after init is valid for read.
926          */
927         env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
928         env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
929         if (riscv_has_ext(env, RVH)) {
930             env->vsstatus = set_field(env->vsstatus,
931                                       MSTATUS64_SXL, env->misa_mxl);
932             env->vsstatus = set_field(env->vsstatus,
933                                       MSTATUS64_UXL, env->misa_mxl);
934             env->mstatus_hs = set_field(env->mstatus_hs,
935                                         MSTATUS64_SXL, env->misa_mxl);
936             env->mstatus_hs = set_field(env->mstatus_hs,
937                                         MSTATUS64_UXL, env->misa_mxl);
938         }
939     }
940     env->mcause = 0;
941     env->miclaim = MIP_SGEIP;
942     env->pc = env->resetvec;
943     env->bins = 0;
944     env->two_stage_lookup = false;
945 
946     env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
947                    (cpu->cfg.ext_svadu ? MENVCFG_ADUE : 0);
948     env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) |
949                    (cpu->cfg.ext_svadu ? HENVCFG_ADUE : 0);
950 
951     /* Initialized default priorities of local interrupts. */
952     for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
953         iprio = riscv_cpu_default_priority(i);
954         env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
955         env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
956         env->hviprio[i] = 0;
957     }
958     i = 0;
959     while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
960         if (!rdzero) {
961             env->hviprio[irq] = env->miprio[irq];
962         }
963         i++;
964     }
965     /* mmte is supposed to have pm.current hardwired to 1 */
966     env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
967 
968     /*
969      * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor
970      * extension is enabled.
971      */
972     if (riscv_has_ext(env, RVH)) {
973         env->mideleg |= HS_MODE_INTERRUPTS;
974     }
975 
976     /*
977      * Clear mseccfg and unlock all the PMP entries upon reset.
978      * This is allowed as per the priv and smepmp specifications
979      * and is needed to clear stale entries across reboots.
980      */
981     if (riscv_cpu_cfg(env)->ext_smepmp) {
982         env->mseccfg = 0;
983     }
984 
985     pmp_unlock_entries(env);
986 #endif
987     env->xl = riscv_cpu_mxl(env);
988     riscv_cpu_update_mask(env);
989     cs->exception_index = RISCV_EXCP_NONE;
990     env->load_res = -1;
991     set_default_nan_mode(1, &env->fp_status);
992 
993 #ifndef CONFIG_USER_ONLY
994     if (cpu->cfg.debug) {
995         riscv_trigger_reset_hold(env);
996     }
997 
998     if (kvm_enabled()) {
999         kvm_riscv_reset_vcpu(cpu);
1000     }
1001 #endif
1002 }
1003 
1004 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
1005 {
1006     RISCVCPU *cpu = RISCV_CPU(s);
1007     CPURISCVState *env = &cpu->env;
1008     info->target_info = &cpu->cfg;
1009 
1010     switch (env->xl) {
1011     case MXL_RV32:
1012         info->print_insn = print_insn_riscv32;
1013         break;
1014     case MXL_RV64:
1015         info->print_insn = print_insn_riscv64;
1016         break;
1017     case MXL_RV128:
1018         info->print_insn = print_insn_riscv128;
1019         break;
1020     default:
1021         g_assert_not_reached();
1022     }
1023 }
1024 
1025 #ifndef CONFIG_USER_ONLY
1026 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
1027 {
1028     bool rv32 = riscv_cpu_is_32bit(cpu);
1029     uint8_t satp_mode_map_max, satp_mode_supported_max;
1030 
1031     /* The CPU wants the OS to decide which satp mode to use */
1032     if (cpu->cfg.satp_mode.supported == 0) {
1033         return;
1034     }
1035 
1036     satp_mode_supported_max =
1037                     satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
1038 
1039     if (cpu->cfg.satp_mode.map == 0) {
1040         if (cpu->cfg.satp_mode.init == 0) {
1041             /* If unset by the user, we fallback to the default satp mode. */
1042             set_satp_mode_default_map(cpu);
1043         } else {
1044             /*
1045              * Find the lowest level that was disabled and then enable the
1046              * first valid level below which can be found in
1047              * valid_vm_1_10_32/64.
1048              */
1049             for (int i = 1; i < 16; ++i) {
1050                 if ((cpu->cfg.satp_mode.init & (1 << i)) &&
1051                     (cpu->cfg.satp_mode.supported & (1 << i))) {
1052                     for (int j = i - 1; j >= 0; --j) {
1053                         if (cpu->cfg.satp_mode.supported & (1 << j)) {
1054                             cpu->cfg.satp_mode.map |= (1 << j);
1055                             break;
1056                         }
1057                     }
1058                     break;
1059                 }
1060             }
1061         }
1062     }
1063 
1064     satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
1065 
1066     /* Make sure the user asked for a supported configuration (HW and qemu) */
1067     if (satp_mode_map_max > satp_mode_supported_max) {
1068         error_setg(errp, "satp_mode %s is higher than hw max capability %s",
1069                    satp_mode_str(satp_mode_map_max, rv32),
1070                    satp_mode_str(satp_mode_supported_max, rv32));
1071         return;
1072     }
1073 
1074     /*
1075      * Make sure the user did not ask for an invalid configuration as per
1076      * the specification.
1077      */
1078     if (!rv32) {
1079         for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1080             if (!(cpu->cfg.satp_mode.map & (1 << i)) &&
1081                 (cpu->cfg.satp_mode.init & (1 << i)) &&
1082                 (cpu->cfg.satp_mode.supported & (1 << i))) {
1083                 error_setg(errp, "cannot disable %s satp mode if %s "
1084                            "is enabled", satp_mode_str(i, false),
1085                            satp_mode_str(satp_mode_map_max, false));
1086                 return;
1087             }
1088         }
1089     }
1090 
1091     /* Finally expand the map so that all valid modes are set */
1092     for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1093         if (cpu->cfg.satp_mode.supported & (1 << i)) {
1094             cpu->cfg.satp_mode.map |= (1 << i);
1095         }
1096     }
1097 }
1098 #endif
1099 
1100 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1101 {
1102     Error *local_err = NULL;
1103 
1104 #ifndef CONFIG_USER_ONLY
1105     riscv_cpu_satp_mode_finalize(cpu, &local_err);
1106     if (local_err != NULL) {
1107         error_propagate(errp, local_err);
1108         return;
1109     }
1110 #endif
1111 
1112     /*
1113      * KVM accel does not have a specialized finalize()
1114      * callback because its extensions are validated
1115      * in the get()/set() callbacks of each property.
1116      */
1117     if (tcg_enabled()) {
1118         riscv_tcg_cpu_finalize_features(cpu, &local_err);
1119         if (local_err != NULL) {
1120             error_propagate(errp, local_err);
1121             return;
1122         }
1123     }
1124 }
1125 
1126 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
1127 {
1128     CPUState *cs = CPU(dev);
1129     RISCVCPU *cpu = RISCV_CPU(dev);
1130     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
1131     Error *local_err = NULL;
1132 
1133     if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) {
1134         warn_report("The 'any' CPU is deprecated and will be "
1135                     "removed in the future.");
1136     }
1137 
1138     cpu_exec_realizefn(cs, &local_err);
1139     if (local_err != NULL) {
1140         error_propagate(errp, local_err);
1141         return;
1142     }
1143 
1144     riscv_cpu_finalize_features(cpu, &local_err);
1145     if (local_err != NULL) {
1146         error_propagate(errp, local_err);
1147         return;
1148     }
1149 
1150     riscv_cpu_register_gdb_regs_for_features(cs);
1151 
1152 #ifndef CONFIG_USER_ONLY
1153     if (cpu->cfg.debug) {
1154         riscv_trigger_realize(&cpu->env);
1155     }
1156 #endif
1157 
1158     qemu_init_vcpu(cs);
1159     cpu_reset(cs);
1160 
1161     mcc->parent_realize(dev, errp);
1162 }
1163 
1164 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu)
1165 {
1166     if (tcg_enabled()) {
1167         return riscv_cpu_tcg_compatible(cpu);
1168     }
1169 
1170     return true;
1171 }
1172 
1173 #ifndef CONFIG_USER_ONLY
1174 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
1175                                void *opaque, Error **errp)
1176 {
1177     RISCVSATPMap *satp_map = opaque;
1178     uint8_t satp = satp_mode_from_str(name);
1179     bool value;
1180 
1181     value = satp_map->map & (1 << satp);
1182 
1183     visit_type_bool(v, name, &value, errp);
1184 }
1185 
1186 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
1187                                void *opaque, Error **errp)
1188 {
1189     RISCVSATPMap *satp_map = opaque;
1190     uint8_t satp = satp_mode_from_str(name);
1191     bool value;
1192 
1193     if (!visit_type_bool(v, name, &value, errp)) {
1194         return;
1195     }
1196 
1197     satp_map->map = deposit32(satp_map->map, satp, 1, value);
1198     satp_map->init |= 1 << satp;
1199 }
1200 
1201 void riscv_add_satp_mode_properties(Object *obj)
1202 {
1203     RISCVCPU *cpu = RISCV_CPU(obj);
1204 
1205     if (cpu->env.misa_mxl == MXL_RV32) {
1206         object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
1207                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1208     } else {
1209         object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
1210                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1211         object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
1212                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1213         object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
1214                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1215         object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
1216                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1217     }
1218 }
1219 
1220 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
1221 {
1222     RISCVCPU *cpu = RISCV_CPU(opaque);
1223     CPURISCVState *env = &cpu->env;
1224 
1225     if (irq < IRQ_LOCAL_MAX) {
1226         switch (irq) {
1227         case IRQ_U_SOFT:
1228         case IRQ_S_SOFT:
1229         case IRQ_VS_SOFT:
1230         case IRQ_M_SOFT:
1231         case IRQ_U_TIMER:
1232         case IRQ_S_TIMER:
1233         case IRQ_VS_TIMER:
1234         case IRQ_M_TIMER:
1235         case IRQ_U_EXT:
1236         case IRQ_VS_EXT:
1237         case IRQ_M_EXT:
1238             if (kvm_enabled()) {
1239                 kvm_riscv_set_irq(cpu, irq, level);
1240             } else {
1241                 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level));
1242             }
1243              break;
1244         case IRQ_S_EXT:
1245             if (kvm_enabled()) {
1246                 kvm_riscv_set_irq(cpu, irq, level);
1247             } else {
1248                 env->external_seip = level;
1249                 riscv_cpu_update_mip(env, 1 << irq,
1250                                      BOOL_TO_MASK(level | env->software_seip));
1251             }
1252             break;
1253         default:
1254             g_assert_not_reached();
1255         }
1256     } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
1257         /* Require H-extension for handling guest local interrupts */
1258         if (!riscv_has_ext(env, RVH)) {
1259             g_assert_not_reached();
1260         }
1261 
1262         /* Compute bit position in HGEIP CSR */
1263         irq = irq - IRQ_LOCAL_MAX + 1;
1264         if (env->geilen < irq) {
1265             g_assert_not_reached();
1266         }
1267 
1268         /* Update HGEIP CSR */
1269         env->hgeip &= ~((target_ulong)1 << irq);
1270         if (level) {
1271             env->hgeip |= (target_ulong)1 << irq;
1272         }
1273 
1274         /* Update mip.SGEIP bit */
1275         riscv_cpu_update_mip(env, MIP_SGEIP,
1276                              BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
1277     } else {
1278         g_assert_not_reached();
1279     }
1280 }
1281 #endif /* CONFIG_USER_ONLY */
1282 
1283 static bool riscv_cpu_is_dynamic(Object *cpu_obj)
1284 {
1285     return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
1286 }
1287 
1288 static void riscv_cpu_post_init(Object *obj)
1289 {
1290     accel_cpu_instance_init(CPU(obj));
1291 }
1292 
1293 static void riscv_cpu_init(Object *obj)
1294 {
1295     RISCVCPU *cpu = RISCV_CPU(obj);
1296 
1297 #ifndef CONFIG_USER_ONLY
1298     qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
1299                       IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
1300 #endif /* CONFIG_USER_ONLY */
1301 
1302     general_user_opts = g_hash_table_new(g_str_hash, g_str_equal);
1303 
1304     /*
1305      * The timer and performance counters extensions were supported
1306      * in QEMU before they were added as discrete extensions in the
1307      * ISA. To keep compatibility we'll always default them to 'true'
1308      * for all CPUs. Each accelerator will decide what to do when
1309      * users disable them.
1310      */
1311     RISCV_CPU(obj)->cfg.ext_zicntr = true;
1312     RISCV_CPU(obj)->cfg.ext_zihpm = true;
1313 
1314     /* Default values for non-bool cpu properties */
1315     cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16);
1316 }
1317 
1318 typedef struct misa_ext_info {
1319     const char *name;
1320     const char *description;
1321 } MISAExtInfo;
1322 
1323 #define MISA_INFO_IDX(_bit) \
1324     __builtin_ctz(_bit)
1325 
1326 #define MISA_EXT_INFO(_bit, _propname, _descr) \
1327     [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr}
1328 
1329 static const MISAExtInfo misa_ext_info_arr[] = {
1330     MISA_EXT_INFO(RVA, "a", "Atomic instructions"),
1331     MISA_EXT_INFO(RVC, "c", "Compressed instructions"),
1332     MISA_EXT_INFO(RVD, "d", "Double-precision float point"),
1333     MISA_EXT_INFO(RVF, "f", "Single-precision float point"),
1334     MISA_EXT_INFO(RVI, "i", "Base integer instruction set"),
1335     MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"),
1336     MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"),
1337     MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
1338     MISA_EXT_INFO(RVU, "u", "User-level instructions"),
1339     MISA_EXT_INFO(RVH, "h", "Hypervisor"),
1340     MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
1341     MISA_EXT_INFO(RVV, "v", "Vector operations"),
1342     MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
1343     MISA_EXT_INFO(RVB, "x-b", "Bit manipulation (Zba_Zbb_Zbs)")
1344 };
1345 
1346 static int riscv_validate_misa_info_idx(uint32_t bit)
1347 {
1348     int idx;
1349 
1350     /*
1351      * Our lowest valid input (RVA) is 1 and
1352      * __builtin_ctz() is UB with zero.
1353      */
1354     g_assert(bit != 0);
1355     idx = MISA_INFO_IDX(bit);
1356 
1357     g_assert(idx < ARRAY_SIZE(misa_ext_info_arr));
1358     return idx;
1359 }
1360 
1361 const char *riscv_get_misa_ext_name(uint32_t bit)
1362 {
1363     int idx = riscv_validate_misa_info_idx(bit);
1364     const char *val = misa_ext_info_arr[idx].name;
1365 
1366     g_assert(val != NULL);
1367     return val;
1368 }
1369 
1370 const char *riscv_get_misa_ext_description(uint32_t bit)
1371 {
1372     int idx = riscv_validate_misa_info_idx(bit);
1373     const char *val = misa_ext_info_arr[idx].description;
1374 
1375     g_assert(val != NULL);
1376     return val;
1377 }
1378 
1379 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \
1380     {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
1381      .enabled = _defval}
1382 
1383 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
1384     /* Defaults for standard extensions */
1385     MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
1386     MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
1387     MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
1388     MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
1389     MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
1390     MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false),
1391     MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
1392     MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true),
1393     MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false),
1394     MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false),
1395     MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false),
1396     MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false),
1397     MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false),
1398     MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
1399 
1400     MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
1401     MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
1402     MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
1403     MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
1404     MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false),
1405     MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false),
1406 
1407     MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true),
1408     MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true),
1409 
1410     MULTI_EXT_CFG_BOOL("zba", ext_zba, true),
1411     MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true),
1412     MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true),
1413     MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false),
1414     MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false),
1415     MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false),
1416     MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true),
1417     MULTI_EXT_CFG_BOOL("zk", ext_zk, false),
1418     MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false),
1419     MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false),
1420     MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false),
1421     MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false),
1422     MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false),
1423     MULTI_EXT_CFG_BOOL("zks", ext_zks, false),
1424     MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false),
1425     MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false),
1426     MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false),
1427 
1428     MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false),
1429     MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false),
1430     MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false),
1431     MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false),
1432 
1433     MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true),
1434     MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true),
1435     MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true),
1436 
1437     MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false),
1438 
1439     MULTI_EXT_CFG_BOOL("zca", ext_zca, false),
1440     MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false),
1441     MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false),
1442     MULTI_EXT_CFG_BOOL("zce", ext_zce, false),
1443     MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false),
1444     MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false),
1445     MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false),
1446     MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false),
1447 
1448     /* Vector cryptography extensions */
1449     MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false),
1450     MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false),
1451     MULTI_EXT_CFG_BOOL("zvkb", ext_zvkg, false),
1452     MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false),
1453     MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false),
1454     MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false),
1455     MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false),
1456     MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false),
1457     MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false),
1458     MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false),
1459     MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false),
1460     MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false),
1461     MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false),
1462     MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false),
1463     MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false),
1464     MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false),
1465 
1466     DEFINE_PROP_END_OF_LIST(),
1467 };
1468 
1469 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = {
1470     MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false),
1471     MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false),
1472     MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false),
1473     MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false),
1474     MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false),
1475     MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false),
1476     MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false),
1477     MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false),
1478     MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false),
1479     MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false),
1480     MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false),
1481     MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false),
1482 
1483     DEFINE_PROP_END_OF_LIST(),
1484 };
1485 
1486 /* These are experimental so mark with 'x-' */
1487 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
1488     MULTI_EXT_CFG_BOOL("x-smaia", ext_smaia, false),
1489     MULTI_EXT_CFG_BOOL("x-ssaia", ext_ssaia, false),
1490 
1491     MULTI_EXT_CFG_BOOL("x-zvfh", ext_zvfh, false),
1492     MULTI_EXT_CFG_BOOL("x-zvfhmin", ext_zvfhmin, false),
1493 
1494     MULTI_EXT_CFG_BOOL("x-zfbfmin", ext_zfbfmin, false),
1495     MULTI_EXT_CFG_BOOL("x-zvfbfmin", ext_zvfbfmin, false),
1496     MULTI_EXT_CFG_BOOL("x-zvfbfwma", ext_zvfbfwma, false),
1497 
1498     DEFINE_PROP_END_OF_LIST(),
1499 };
1500 
1501 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
1502     MULTI_EXT_CFG_BOOL("svade", svade, true),
1503     MULTI_EXT_CFG_BOOL("zic64b", zic64b, true),
1504 
1505     DEFINE_PROP_END_OF_LIST(),
1506 };
1507 
1508 /* Deprecated entries marked for future removal */
1509 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = {
1510     MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true),
1511     MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true),
1512     MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true),
1513     MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true),
1514     MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true),
1515     MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true),
1516     MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false),
1517     MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false),
1518     MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false),
1519     MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false),
1520     MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false),
1521 
1522     DEFINE_PROP_END_OF_LIST(),
1523 };
1524 
1525 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname,
1526                              Error **errp)
1527 {
1528     g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1529     error_setg(errp, "CPU '%s' does not allow changing the value of '%s'",
1530                cpuname, propname);
1531 }
1532 
1533 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name,
1534                              void *opaque, Error **errp)
1535 {
1536     RISCVCPU *cpu = RISCV_CPU(obj);
1537     uint8_t pmu_num, curr_pmu_num;
1538     uint32_t pmu_mask;
1539 
1540     visit_type_uint8(v, name, &pmu_num, errp);
1541 
1542     curr_pmu_num = ctpop32(cpu->cfg.pmu_mask);
1543 
1544     if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) {
1545         cpu_set_prop_err(cpu, name, errp);
1546         error_append_hint(errp, "Current '%s' val: %u\n",
1547                           name, curr_pmu_num);
1548         return;
1549     }
1550 
1551     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1552         error_setg(errp, "Number of counters exceeds maximum available");
1553         return;
1554     }
1555 
1556     if (pmu_num == 0) {
1557         pmu_mask = 0;
1558     } else {
1559         pmu_mask = MAKE_64BIT_MASK(3, pmu_num);
1560     }
1561 
1562     warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\"");
1563     cpu->cfg.pmu_mask = pmu_mask;
1564     cpu_option_add_user_setting("pmu-mask", pmu_mask);
1565 }
1566 
1567 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name,
1568                              void *opaque, Error **errp)
1569 {
1570     RISCVCPU *cpu = RISCV_CPU(obj);
1571     uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask);
1572 
1573     visit_type_uint8(v, name, &pmu_num, errp);
1574 }
1575 
1576 static const PropertyInfo prop_pmu_num = {
1577     .name = "pmu-num",
1578     .get = prop_pmu_num_get,
1579     .set = prop_pmu_num_set,
1580 };
1581 
1582 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name,
1583                              void *opaque, Error **errp)
1584 {
1585     RISCVCPU *cpu = RISCV_CPU(obj);
1586     uint32_t value;
1587     uint8_t pmu_num;
1588 
1589     visit_type_uint32(v, name, &value, errp);
1590 
1591     if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) {
1592         cpu_set_prop_err(cpu, name, errp);
1593         error_append_hint(errp, "Current '%s' val: %x\n",
1594                           name, cpu->cfg.pmu_mask);
1595         return;
1596     }
1597 
1598     pmu_num = ctpop32(value);
1599 
1600     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1601         error_setg(errp, "Number of counters exceeds maximum available");
1602         return;
1603     }
1604 
1605     cpu_option_add_user_setting(name, value);
1606     cpu->cfg.pmu_mask = value;
1607 }
1608 
1609 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name,
1610                              void *opaque, Error **errp)
1611 {
1612     uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask;
1613 
1614     visit_type_uint8(v, name, &pmu_mask, errp);
1615 }
1616 
1617 static const PropertyInfo prop_pmu_mask = {
1618     .name = "pmu-mask",
1619     .get = prop_pmu_mask_get,
1620     .set = prop_pmu_mask_set,
1621 };
1622 
1623 static void prop_mmu_set(Object *obj, Visitor *v, const char *name,
1624                          void *opaque, Error **errp)
1625 {
1626     RISCVCPU *cpu = RISCV_CPU(obj);
1627     bool value;
1628 
1629     visit_type_bool(v, name, &value, errp);
1630 
1631     if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) {
1632         cpu_set_prop_err(cpu, "mmu", errp);
1633         return;
1634     }
1635 
1636     cpu_option_add_user_setting(name, value);
1637     cpu->cfg.mmu = value;
1638 }
1639 
1640 static void prop_mmu_get(Object *obj, Visitor *v, const char *name,
1641                          void *opaque, Error **errp)
1642 {
1643     bool value = RISCV_CPU(obj)->cfg.mmu;
1644 
1645     visit_type_bool(v, name, &value, errp);
1646 }
1647 
1648 static const PropertyInfo prop_mmu = {
1649     .name = "mmu",
1650     .get = prop_mmu_get,
1651     .set = prop_mmu_set,
1652 };
1653 
1654 Property riscv_cpu_options[] = {
1655     DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true),
1656 
1657     DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec),
1658     DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec),
1659 
1660     DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128),
1661     DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64),
1662 
1663     DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64),
1664     DEFINE_PROP_UINT16("cbop_blocksize", RISCVCPU, cfg.cbop_blocksize, 64),
1665     DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64),
1666 
1667     DEFINE_PROP_END_OF_LIST(),
1668 };
1669 
1670 /*
1671  * RVA22U64 defines some 'named features' or 'synthetic extensions'
1672  * that are cache related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa
1673  * and Zicclsm. We do not implement caching in QEMU so we'll consider
1674  * all these named features as always enabled.
1675  *
1676  * There's no riscv,isa update for them (nor for zic64b, despite it
1677  * having a cfg offset) at this moment.
1678  */
1679 static RISCVCPUProfile RVA22U64 = {
1680     .parent = NULL,
1681     .name = "rva22u64",
1682     .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU,
1683     .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
1684     .satp_mode = RISCV_PROFILE_ATTR_UNUSED,
1685     .ext_offsets = {
1686         CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause),
1687         CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb),
1688         CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin),
1689         CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr),
1690         CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom),
1691         CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz),
1692 
1693         /* mandatory named features for this profile */
1694         CPU_CFG_OFFSET(zic64b),
1695 
1696         RISCV_PROFILE_EXT_LIST_END
1697     }
1698 };
1699 
1700 /*
1701  * As with RVA22U64, RVA22S64 also defines 'named features'.
1702  *
1703  * Cache related features that we consider enabled since we don't
1704  * implement cache: Ssccptr
1705  *
1706  * Other named features that we already implement: Sstvecd, Sstvala,
1707  * Sscounterenw
1708  *
1709  * Named features that we need to enable: svade
1710  *
1711  * The remaining features/extensions comes from RVA22U64.
1712  */
1713 static RISCVCPUProfile RVA22S64 = {
1714     .parent = &RVA22U64,
1715     .name = "rva22s64",
1716     .misa_ext = RVS,
1717     .priv_spec = PRIV_VERSION_1_12_0,
1718     .satp_mode = VM_1_10_SV39,
1719     .ext_offsets = {
1720         /* rva22s64 exts */
1721         CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt),
1722         CPU_CFG_OFFSET(ext_svinval),
1723 
1724         /* rva22s64 named features */
1725         CPU_CFG_OFFSET(svade),
1726 
1727         RISCV_PROFILE_EXT_LIST_END
1728     }
1729 };
1730 
1731 RISCVCPUProfile *riscv_profiles[] = {
1732     &RVA22U64,
1733     &RVA22S64,
1734     NULL,
1735 };
1736 
1737 static Property riscv_cpu_properties[] = {
1738     DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
1739 
1740     {.name = "pmu-mask", .info = &prop_pmu_mask},
1741     {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */
1742 
1743     {.name = "mmu", .info = &prop_mmu},
1744 
1745 #ifndef CONFIG_USER_ONLY
1746     DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
1747 #endif
1748 
1749     DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
1750 
1751     DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
1752     DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
1753 
1754     /*
1755      * write_misa() is marked as experimental for now so mark
1756      * it with -x and default to 'false'.
1757      */
1758     DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
1759     DEFINE_PROP_END_OF_LIST(),
1760 };
1761 
1762 #if defined(TARGET_RISCV64)
1763 static void rva22u64_profile_cpu_init(Object *obj)
1764 {
1765     rv64i_bare_cpu_init(obj);
1766 
1767     RVA22U64.enabled = true;
1768 }
1769 
1770 static void rva22s64_profile_cpu_init(Object *obj)
1771 {
1772     rv64i_bare_cpu_init(obj);
1773 
1774     RVA22S64.enabled = true;
1775 }
1776 #endif
1777 
1778 static const gchar *riscv_gdb_arch_name(CPUState *cs)
1779 {
1780     RISCVCPU *cpu = RISCV_CPU(cs);
1781     CPURISCVState *env = &cpu->env;
1782 
1783     switch (riscv_cpu_mxl(env)) {
1784     case MXL_RV32:
1785         return "riscv:rv32";
1786     case MXL_RV64:
1787     case MXL_RV128:
1788         return "riscv:rv64";
1789     default:
1790         g_assert_not_reached();
1791     }
1792 }
1793 
1794 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
1795 {
1796     RISCVCPU *cpu = RISCV_CPU(cs);
1797 
1798     if (strcmp(xmlname, "riscv-csr.xml") == 0) {
1799         return cpu->dyn_csr_xml;
1800     } else if (strcmp(xmlname, "riscv-vector.xml") == 0) {
1801         return cpu->dyn_vreg_xml;
1802     }
1803 
1804     return NULL;
1805 }
1806 
1807 #ifndef CONFIG_USER_ONLY
1808 static int64_t riscv_get_arch_id(CPUState *cs)
1809 {
1810     RISCVCPU *cpu = RISCV_CPU(cs);
1811 
1812     return cpu->env.mhartid;
1813 }
1814 
1815 #include "hw/core/sysemu-cpu-ops.h"
1816 
1817 static const struct SysemuCPUOps riscv_sysemu_ops = {
1818     .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
1819     .write_elf64_note = riscv_cpu_write_elf64_note,
1820     .write_elf32_note = riscv_cpu_write_elf32_note,
1821     .legacy_vmsd = &vmstate_riscv_cpu,
1822 };
1823 #endif
1824 
1825 static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name,
1826                               void *opaque, Error **errp)
1827 {
1828     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
1829     RISCVCPU *cpu = RISCV_CPU(obj);
1830     uint32_t prev_val = cpu->cfg.mvendorid;
1831     uint32_t value;
1832 
1833     if (!visit_type_uint32(v, name, &value, errp)) {
1834         return;
1835     }
1836 
1837     if (!dynamic_cpu && prev_val != value) {
1838         error_setg(errp, "Unable to change %s mvendorid (0x%x)",
1839                    object_get_typename(obj), prev_val);
1840         return;
1841     }
1842 
1843     cpu->cfg.mvendorid = value;
1844 }
1845 
1846 static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name,
1847                               void *opaque, Error **errp)
1848 {
1849     uint32_t value = RISCV_CPU(obj)->cfg.mvendorid;
1850 
1851     visit_type_uint32(v, name, &value, errp);
1852 }
1853 
1854 static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name,
1855                            void *opaque, Error **errp)
1856 {
1857     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
1858     RISCVCPU *cpu = RISCV_CPU(obj);
1859     uint64_t prev_val = cpu->cfg.mimpid;
1860     uint64_t value;
1861 
1862     if (!visit_type_uint64(v, name, &value, errp)) {
1863         return;
1864     }
1865 
1866     if (!dynamic_cpu && prev_val != value) {
1867         error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")",
1868                    object_get_typename(obj), prev_val);
1869         return;
1870     }
1871 
1872     cpu->cfg.mimpid = value;
1873 }
1874 
1875 static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name,
1876                            void *opaque, Error **errp)
1877 {
1878     uint64_t value = RISCV_CPU(obj)->cfg.mimpid;
1879 
1880     visit_type_uint64(v, name, &value, errp);
1881 }
1882 
1883 static void cpu_set_marchid(Object *obj, Visitor *v, const char *name,
1884                             void *opaque, Error **errp)
1885 {
1886     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
1887     RISCVCPU *cpu = RISCV_CPU(obj);
1888     uint64_t prev_val = cpu->cfg.marchid;
1889     uint64_t value, invalid_val;
1890     uint32_t mxlen = 0;
1891 
1892     if (!visit_type_uint64(v, name, &value, errp)) {
1893         return;
1894     }
1895 
1896     if (!dynamic_cpu && prev_val != value) {
1897         error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")",
1898                    object_get_typename(obj), prev_val);
1899         return;
1900     }
1901 
1902     switch (riscv_cpu_mxl(&cpu->env)) {
1903     case MXL_RV32:
1904         mxlen = 32;
1905         break;
1906     case MXL_RV64:
1907     case MXL_RV128:
1908         mxlen = 64;
1909         break;
1910     default:
1911         g_assert_not_reached();
1912     }
1913 
1914     invalid_val = 1LL << (mxlen - 1);
1915 
1916     if (value == invalid_val) {
1917         error_setg(errp, "Unable to set marchid with MSB (%u) bit set "
1918                          "and the remaining bits zero", mxlen);
1919         return;
1920     }
1921 
1922     cpu->cfg.marchid = value;
1923 }
1924 
1925 static void cpu_get_marchid(Object *obj, Visitor *v, const char *name,
1926                            void *opaque, Error **errp)
1927 {
1928     uint64_t value = RISCV_CPU(obj)->cfg.marchid;
1929 
1930     visit_type_uint64(v, name, &value, errp);
1931 }
1932 
1933 static void riscv_cpu_class_init(ObjectClass *c, void *data)
1934 {
1935     RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
1936     CPUClass *cc = CPU_CLASS(c);
1937     DeviceClass *dc = DEVICE_CLASS(c);
1938     ResettableClass *rc = RESETTABLE_CLASS(c);
1939 
1940     device_class_set_parent_realize(dc, riscv_cpu_realize,
1941                                     &mcc->parent_realize);
1942 
1943     resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL,
1944                                        &mcc->parent_phases);
1945 
1946     cc->class_by_name = riscv_cpu_class_by_name;
1947     cc->has_work = riscv_cpu_has_work;
1948     cc->mmu_index = riscv_cpu_mmu_index;
1949     cc->dump_state = riscv_cpu_dump_state;
1950     cc->set_pc = riscv_cpu_set_pc;
1951     cc->get_pc = riscv_cpu_get_pc;
1952     cc->gdb_read_register = riscv_cpu_gdb_read_register;
1953     cc->gdb_write_register = riscv_cpu_gdb_write_register;
1954     cc->gdb_num_core_regs = 33;
1955     cc->gdb_stop_before_watchpoint = true;
1956     cc->disas_set_info = riscv_cpu_disas_set_info;
1957 #ifndef CONFIG_USER_ONLY
1958     cc->sysemu_ops = &riscv_sysemu_ops;
1959     cc->get_arch_id = riscv_get_arch_id;
1960 #endif
1961     cc->gdb_arch_name = riscv_gdb_arch_name;
1962     cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml;
1963 
1964     object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid,
1965                               cpu_set_mvendorid, NULL, NULL);
1966 
1967     object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid,
1968                               cpu_set_mimpid, NULL, NULL);
1969 
1970     object_class_property_add(c, "marchid", "uint64", cpu_get_marchid,
1971                               cpu_set_marchid, NULL, NULL);
1972 
1973     device_class_set_props(dc, riscv_cpu_properties);
1974 }
1975 
1976 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
1977                                  int max_str_len)
1978 {
1979     const RISCVIsaExtData *edata;
1980     char *old = *isa_str;
1981     char *new = *isa_str;
1982 
1983     for (edata = isa_edata_arr; edata && edata->name; edata++) {
1984         if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
1985             new = g_strconcat(old, "_", edata->name, NULL);
1986             g_free(old);
1987             old = new;
1988         }
1989     }
1990 
1991     *isa_str = new;
1992 }
1993 
1994 char *riscv_isa_string(RISCVCPU *cpu)
1995 {
1996     int i;
1997     const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
1998     char *isa_str = g_new(char, maxlen);
1999     char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS);
2000     for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2001         if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2002             *p++ = qemu_tolower(riscv_single_letter_exts[i]);
2003         }
2004     }
2005     *p = '\0';
2006     if (!cpu->cfg.short_isa_string) {
2007         riscv_isa_string_ext(cpu, &isa_str, maxlen);
2008     }
2009     return isa_str;
2010 }
2011 
2012 #define DEFINE_CPU(type_name, initfn)      \
2013     {                                      \
2014         .name = type_name,                 \
2015         .parent = TYPE_RISCV_CPU,          \
2016         .instance_init = initfn            \
2017     }
2018 
2019 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \
2020     {                                         \
2021         .name = type_name,                    \
2022         .parent = TYPE_RISCV_DYNAMIC_CPU,     \
2023         .instance_init = initfn               \
2024     }
2025 
2026 #define DEFINE_VENDOR_CPU(type_name, initfn) \
2027     {                                        \
2028         .name = type_name,                   \
2029         .parent = TYPE_RISCV_VENDOR_CPU,     \
2030         .instance_init = initfn              \
2031     }
2032 
2033 #define DEFINE_BARE_CPU(type_name, initfn) \
2034     {                                      \
2035         .name = type_name,                 \
2036         .parent = TYPE_RISCV_BARE_CPU,     \
2037         .instance_init = initfn            \
2038     }
2039 
2040 #define DEFINE_PROFILE_CPU(type_name, initfn) \
2041     {                                         \
2042         .name = type_name,                    \
2043         .parent = TYPE_RISCV_BARE_CPU,        \
2044         .instance_init = initfn               \
2045     }
2046 
2047 static const TypeInfo riscv_cpu_type_infos[] = {
2048     {
2049         .name = TYPE_RISCV_CPU,
2050         .parent = TYPE_CPU,
2051         .instance_size = sizeof(RISCVCPU),
2052         .instance_align = __alignof(RISCVCPU),
2053         .instance_init = riscv_cpu_init,
2054         .instance_post_init = riscv_cpu_post_init,
2055         .abstract = true,
2056         .class_size = sizeof(RISCVCPUClass),
2057         .class_init = riscv_cpu_class_init,
2058     },
2059     {
2060         .name = TYPE_RISCV_DYNAMIC_CPU,
2061         .parent = TYPE_RISCV_CPU,
2062         .abstract = true,
2063     },
2064     {
2065         .name = TYPE_RISCV_VENDOR_CPU,
2066         .parent = TYPE_RISCV_CPU,
2067         .abstract = true,
2068     },
2069     {
2070         .name = TYPE_RISCV_BARE_CPU,
2071         .parent = TYPE_RISCV_CPU,
2072         .abstract = true,
2073     },
2074     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY,      riscv_any_cpu_init),
2075     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX,      riscv_max_cpu_init),
2076 #if defined(TARGET_RISCV32)
2077     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32,   rv32_base_cpu_init),
2078     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX,        rv32_ibex_cpu_init),
2079     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31,  rv32_sifive_e_cpu_init),
2080     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34,  rv32_imafcu_nommu_cpu_init),
2081     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34,  rv32_sifive_u_cpu_init),
2082 #elif defined(TARGET_RISCV64)
2083     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64,   rv64_base_cpu_init),
2084     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51,  rv64_sifive_e_cpu_init),
2085     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54,  rv64_sifive_u_cpu_init),
2086     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C,    rv64_sifive_u_cpu_init),
2087     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906,  rv64_thead_c906_cpu_init),
2088     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1,   rv64_veyron_v1_cpu_init),
2089     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128,  rv128_base_cpu_init),
2090     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, rv64i_bare_cpu_init),
2091     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, rva22u64_profile_cpu_init),
2092     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, rva22s64_profile_cpu_init),
2093 #endif
2094 };
2095 
2096 DEFINE_TYPES(riscv_cpu_type_infos)
2097