xref: /openbmc/qemu/target/riscv/cpu.c (revision 811ef853)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "cpu_vendorid.h"
26 #include "internals.h"
27 #include "exec/exec-all.h"
28 #include "qapi/error.h"
29 #include "qapi/visitor.h"
30 #include "qemu/error-report.h"
31 #include "hw/qdev-properties.h"
32 #include "hw/core/qdev-prop-internal.h"
33 #include "migration/vmstate.h"
34 #include "fpu/softfloat-helpers.h"
35 #include "sysemu/kvm.h"
36 #include "sysemu/tcg.h"
37 #include "kvm/kvm_riscv.h"
38 #include "tcg/tcg-cpu.h"
39 #include "tcg/tcg.h"
40 
41 /* RISC-V CPU definitions */
42 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH";
43 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
44                               RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0};
45 
46 /*
47  * From vector_helper.c
48  * Note that vector data is stored in host-endian 64-bit chunks,
49  * so addressing bytes needs a host-endian fixup.
50  */
51 #if HOST_BIG_ENDIAN
52 #define BYTE(x)   ((x) ^ 7)
53 #else
54 #define BYTE(x)   (x)
55 #endif
56 
57 bool riscv_cpu_is_32bit(RISCVCPU *cpu)
58 {
59     return riscv_cpu_mxl(&cpu->env) == MXL_RV32;
60 }
61 
62 /* Hash that stores general user set numeric options */
63 static GHashTable *general_user_opts;
64 
65 static void cpu_option_add_user_setting(const char *optname, uint32_t value)
66 {
67     g_hash_table_insert(general_user_opts, (gpointer)optname,
68                         GUINT_TO_POINTER(value));
69 }
70 
71 bool riscv_cpu_option_set(const char *optname)
72 {
73     return g_hash_table_contains(general_user_opts, optname);
74 }
75 
76 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
77     {#_name, _min_ver, CPU_CFG_OFFSET(_prop)}
78 
79 /*
80  * Here are the ordering rules of extension naming defined by RISC-V
81  * specification :
82  * 1. All extensions should be separated from other multi-letter extensions
83  *    by an underscore.
84  * 2. The first letter following the 'Z' conventionally indicates the most
85  *    closely related alphabetical extension category, IMAFDQLCBKJTPVH.
86  *    If multiple 'Z' extensions are named, they should be ordered first
87  *    by category, then alphabetically within a category.
88  * 3. Standard supervisor-level extensions (starts with 'S') should be
89  *    listed after standard unprivileged extensions.  If multiple
90  *    supervisor-level extensions are listed, they should be ordered
91  *    alphabetically.
92  * 4. Non-standard extensions (starts with 'X') must be listed after all
93  *    standard extensions. They must be separated from other multi-letter
94  *    extensions by an underscore.
95  *
96  * Single letter extensions are checked in riscv_cpu_validate_misa_priv()
97  * instead.
98  */
99 const RISCVIsaExtData isa_edata_arr[] = {
100     ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom),
101     ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop),
102     ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz),
103     ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
104     ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr),
105     ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr),
106     ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei),
107     ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl),
108     ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
109     ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm),
110     ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
111     ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
112     ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
113     ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
114     ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
115     ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh),
116     ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin),
117     ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx),
118     ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx),
119     ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca),
120     ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb),
121     ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf),
122     ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd),
123     ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce),
124     ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp),
125     ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt),
126     ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba),
127     ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb),
128     ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc),
129     ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb),
130     ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc),
131     ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx),
132     ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs),
133     ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk),
134     ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn),
135     ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd),
136     ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne),
137     ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh),
138     ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr),
139     ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks),
140     ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
141     ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
142     ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
143     ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb),
144     ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
145     ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
146     ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
147     ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
148     ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin),
149     ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
150     ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
151     ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
152     ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb),
153     ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg),
154     ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn),
155     ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc),
156     ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
157     ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng),
158     ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
159     ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
160     ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks),
161     ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc),
162     ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed),
163     ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg),
164     ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
165     ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt),
166     ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
167     ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
168     ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
169     ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
170     ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
171     ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
172     ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
173     ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
174     ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
175     ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
176     ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
177     ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt),
178     ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba),
179     ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb),
180     ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs),
181     ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo),
182     ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov),
183     ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx),
184     ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv),
185     ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac),
186     ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx),
187     ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair),
188     ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync),
189     ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
190 
191     DEFINE_PROP_END_OF_LIST(),
192 };
193 
194 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset)
195 {
196     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
197 
198     return *ext_enabled;
199 }
200 
201 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en)
202 {
203     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
204 
205     *ext_enabled = en;
206 }
207 
208 bool riscv_cpu_is_vendor(Object *cpu_obj)
209 {
210     return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL;
211 }
212 
213 const char * const riscv_int_regnames[] = {
214     "x0/zero", "x1/ra",  "x2/sp",  "x3/gp",  "x4/tp",  "x5/t0",   "x6/t1",
215     "x7/t2",   "x8/s0",  "x9/s1",  "x10/a0", "x11/a1", "x12/a2",  "x13/a3",
216     "x14/a4",  "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3",  "x20/s4",
217     "x21/s5",  "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
218     "x28/t3",  "x29/t4", "x30/t5", "x31/t6"
219 };
220 
221 const char * const riscv_int_regnamesh[] = {
222     "x0h/zeroh", "x1h/rah",  "x2h/sph",   "x3h/gph",   "x4h/tph",  "x5h/t0h",
223     "x6h/t1h",   "x7h/t2h",  "x8h/s0h",   "x9h/s1h",   "x10h/a0h", "x11h/a1h",
224     "x12h/a2h",  "x13h/a3h", "x14h/a4h",  "x15h/a5h",  "x16h/a6h", "x17h/a7h",
225     "x18h/s2h",  "x19h/s3h", "x20h/s4h",  "x21h/s5h",  "x22h/s6h", "x23h/s7h",
226     "x24h/s8h",  "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
227     "x30h/t5h",  "x31h/t6h"
228 };
229 
230 const char * const riscv_fpr_regnames[] = {
231     "f0/ft0",   "f1/ft1",  "f2/ft2",   "f3/ft3",   "f4/ft4",  "f5/ft5",
232     "f6/ft6",   "f7/ft7",  "f8/fs0",   "f9/fs1",   "f10/fa0", "f11/fa1",
233     "f12/fa2",  "f13/fa3", "f14/fa4",  "f15/fa5",  "f16/fa6", "f17/fa7",
234     "f18/fs2",  "f19/fs3", "f20/fs4",  "f21/fs5",  "f22/fs6", "f23/fs7",
235     "f24/fs8",  "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
236     "f30/ft10", "f31/ft11"
237 };
238 
239 const char * const riscv_rvv_regnames[] = {
240   "v0",  "v1",  "v2",  "v3",  "v4",  "v5",  "v6",
241   "v7",  "v8",  "v9",  "v10", "v11", "v12", "v13",
242   "v14", "v15", "v16", "v17", "v18", "v19", "v20",
243   "v21", "v22", "v23", "v24", "v25", "v26", "v27",
244   "v28", "v29", "v30", "v31"
245 };
246 
247 static const char * const riscv_excp_names[] = {
248     "misaligned_fetch",
249     "fault_fetch",
250     "illegal_instruction",
251     "breakpoint",
252     "misaligned_load",
253     "fault_load",
254     "misaligned_store",
255     "fault_store",
256     "user_ecall",
257     "supervisor_ecall",
258     "hypervisor_ecall",
259     "machine_ecall",
260     "exec_page_fault",
261     "load_page_fault",
262     "reserved",
263     "store_page_fault",
264     "reserved",
265     "reserved",
266     "reserved",
267     "reserved",
268     "guest_exec_page_fault",
269     "guest_load_page_fault",
270     "reserved",
271     "guest_store_page_fault",
272 };
273 
274 static const char * const riscv_intr_names[] = {
275     "u_software",
276     "s_software",
277     "vs_software",
278     "m_software",
279     "u_timer",
280     "s_timer",
281     "vs_timer",
282     "m_timer",
283     "u_external",
284     "s_external",
285     "vs_external",
286     "m_external",
287     "reserved",
288     "reserved",
289     "reserved",
290     "reserved"
291 };
292 
293 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
294 {
295     if (async) {
296         return (cause < ARRAY_SIZE(riscv_intr_names)) ?
297                riscv_intr_names[cause] : "(unknown)";
298     } else {
299         return (cause < ARRAY_SIZE(riscv_excp_names)) ?
300                riscv_excp_names[cause] : "(unknown)";
301     }
302 }
303 
304 void riscv_cpu_set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext)
305 {
306     env->misa_mxl_max = env->misa_mxl = mxl;
307     env->misa_ext_mask = env->misa_ext = ext;
308 }
309 
310 #ifndef CONFIG_USER_ONLY
311 static uint8_t satp_mode_from_str(const char *satp_mode_str)
312 {
313     if (!strncmp(satp_mode_str, "mbare", 5)) {
314         return VM_1_10_MBARE;
315     }
316 
317     if (!strncmp(satp_mode_str, "sv32", 4)) {
318         return VM_1_10_SV32;
319     }
320 
321     if (!strncmp(satp_mode_str, "sv39", 4)) {
322         return VM_1_10_SV39;
323     }
324 
325     if (!strncmp(satp_mode_str, "sv48", 4)) {
326         return VM_1_10_SV48;
327     }
328 
329     if (!strncmp(satp_mode_str, "sv57", 4)) {
330         return VM_1_10_SV57;
331     }
332 
333     if (!strncmp(satp_mode_str, "sv64", 4)) {
334         return VM_1_10_SV64;
335     }
336 
337     g_assert_not_reached();
338 }
339 
340 uint8_t satp_mode_max_from_map(uint32_t map)
341 {
342     /*
343      * 'map = 0' will make us return (31 - 32), which C will
344      * happily overflow to UINT_MAX. There's no good result to
345      * return if 'map = 0' (e.g. returning 0 will be ambiguous
346      * with the result for 'map = 1').
347      *
348      * Assert out if map = 0. Callers will have to deal with
349      * it outside of this function.
350      */
351     g_assert(map > 0);
352 
353     /* map here has at least one bit set, so no problem with clz */
354     return 31 - __builtin_clz(map);
355 }
356 
357 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
358 {
359     if (is_32_bit) {
360         switch (satp_mode) {
361         case VM_1_10_SV32:
362             return "sv32";
363         case VM_1_10_MBARE:
364             return "none";
365         }
366     } else {
367         switch (satp_mode) {
368         case VM_1_10_SV64:
369             return "sv64";
370         case VM_1_10_SV57:
371             return "sv57";
372         case VM_1_10_SV48:
373             return "sv48";
374         case VM_1_10_SV39:
375             return "sv39";
376         case VM_1_10_MBARE:
377             return "none";
378         }
379     }
380 
381     g_assert_not_reached();
382 }
383 
384 static void set_satp_mode_max_supported(RISCVCPU *cpu,
385                                         uint8_t satp_mode)
386 {
387     bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
388     const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
389 
390     for (int i = 0; i <= satp_mode; ++i) {
391         if (valid_vm[i]) {
392             cpu->cfg.satp_mode.supported |= (1 << i);
393         }
394     }
395 }
396 
397 /* Set the satp mode to the max supported */
398 static void set_satp_mode_default_map(RISCVCPU *cpu)
399 {
400     /*
401      * Bare CPUs do not default to the max available.
402      * Users must set a valid satp_mode in the command
403      * line.
404      */
405     if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) {
406         warn_report("No satp mode set. Defaulting to 'bare'");
407         cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE);
408         return;
409     }
410 
411     cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
412 }
413 #endif
414 
415 static void riscv_any_cpu_init(Object *obj)
416 {
417     RISCVCPU *cpu = RISCV_CPU(obj);
418     CPURISCVState *env = &cpu->env;
419 #if defined(TARGET_RISCV32)
420     riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
421 #elif defined(TARGET_RISCV64)
422     riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
423 #endif
424 
425 #ifndef CONFIG_USER_ONLY
426     set_satp_mode_max_supported(RISCV_CPU(obj),
427         riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ?
428         VM_1_10_SV32 : VM_1_10_SV57);
429 #endif
430 
431     env->priv_ver = PRIV_VERSION_LATEST;
432 
433     /* inherited from parent obj via riscv_cpu_init() */
434     cpu->cfg.ext_zifencei = true;
435     cpu->cfg.ext_zicsr = true;
436     cpu->cfg.mmu = true;
437     cpu->cfg.pmp = true;
438 }
439 
440 static void riscv_max_cpu_init(Object *obj)
441 {
442     RISCVCPU *cpu = RISCV_CPU(obj);
443     CPURISCVState *env = &cpu->env;
444     RISCVMXL mlx = MXL_RV64;
445 
446     cpu->cfg.mmu = true;
447     cpu->cfg.pmp = true;
448 
449 #ifdef TARGET_RISCV32
450     mlx = MXL_RV32;
451 #endif
452     riscv_cpu_set_misa(env, mlx, 0);
453     env->priv_ver = PRIV_VERSION_LATEST;
454 #ifndef CONFIG_USER_ONLY
455     set_satp_mode_max_supported(RISCV_CPU(obj), mlx == MXL_RV32 ?
456                                 VM_1_10_SV32 : VM_1_10_SV57);
457 #endif
458 }
459 
460 #if defined(TARGET_RISCV64)
461 static void rv64_base_cpu_init(Object *obj)
462 {
463     RISCVCPU *cpu = RISCV_CPU(obj);
464     CPURISCVState *env = &cpu->env;
465 
466     cpu->cfg.mmu = true;
467     cpu->cfg.pmp = true;
468 
469     /* We set this in the realise function */
470     riscv_cpu_set_misa(env, MXL_RV64, 0);
471     /* Set latest version of privileged specification */
472     env->priv_ver = PRIV_VERSION_LATEST;
473 #ifndef CONFIG_USER_ONLY
474     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
475 #endif
476 }
477 
478 static void rv64_sifive_u_cpu_init(Object *obj)
479 {
480     RISCVCPU *cpu = RISCV_CPU(obj);
481     CPURISCVState *env = &cpu->env;
482     riscv_cpu_set_misa(env, MXL_RV64,
483                        RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
484     env->priv_ver = PRIV_VERSION_1_10_0;
485 #ifndef CONFIG_USER_ONLY
486     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39);
487 #endif
488 
489     /* inherited from parent obj via riscv_cpu_init() */
490     cpu->cfg.ext_zifencei = true;
491     cpu->cfg.ext_zicsr = true;
492     cpu->cfg.mmu = true;
493     cpu->cfg.pmp = true;
494 }
495 
496 static void rv64_sifive_e_cpu_init(Object *obj)
497 {
498     CPURISCVState *env = &RISCV_CPU(obj)->env;
499     RISCVCPU *cpu = RISCV_CPU(obj);
500 
501     riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU);
502     env->priv_ver = PRIV_VERSION_1_10_0;
503 #ifndef CONFIG_USER_ONLY
504     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
505 #endif
506 
507     /* inherited from parent obj via riscv_cpu_init() */
508     cpu->cfg.ext_zifencei = true;
509     cpu->cfg.ext_zicsr = true;
510     cpu->cfg.pmp = true;
511 }
512 
513 static void rv64_thead_c906_cpu_init(Object *obj)
514 {
515     CPURISCVState *env = &RISCV_CPU(obj)->env;
516     RISCVCPU *cpu = RISCV_CPU(obj);
517 
518     riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU);
519     env->priv_ver = PRIV_VERSION_1_11_0;
520 
521     cpu->cfg.ext_zfa = true;
522     cpu->cfg.ext_zfh = true;
523     cpu->cfg.mmu = true;
524     cpu->cfg.ext_xtheadba = true;
525     cpu->cfg.ext_xtheadbb = true;
526     cpu->cfg.ext_xtheadbs = true;
527     cpu->cfg.ext_xtheadcmo = true;
528     cpu->cfg.ext_xtheadcondmov = true;
529     cpu->cfg.ext_xtheadfmemidx = true;
530     cpu->cfg.ext_xtheadmac = true;
531     cpu->cfg.ext_xtheadmemidx = true;
532     cpu->cfg.ext_xtheadmempair = true;
533     cpu->cfg.ext_xtheadsync = true;
534 
535     cpu->cfg.mvendorid = THEAD_VENDOR_ID;
536 #ifndef CONFIG_USER_ONLY
537     set_satp_mode_max_supported(cpu, VM_1_10_SV39);
538 #endif
539 
540     /* inherited from parent obj via riscv_cpu_init() */
541     cpu->cfg.pmp = true;
542 }
543 
544 static void rv64_veyron_v1_cpu_init(Object *obj)
545 {
546     CPURISCVState *env = &RISCV_CPU(obj)->env;
547     RISCVCPU *cpu = RISCV_CPU(obj);
548 
549     riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH);
550     env->priv_ver = PRIV_VERSION_1_12_0;
551 
552     /* Enable ISA extensions */
553     cpu->cfg.mmu = true;
554     cpu->cfg.ext_zifencei = true;
555     cpu->cfg.ext_zicsr = true;
556     cpu->cfg.pmp = true;
557     cpu->cfg.ext_zicbom = true;
558     cpu->cfg.cbom_blocksize = 64;
559     cpu->cfg.cboz_blocksize = 64;
560     cpu->cfg.ext_zicboz = true;
561     cpu->cfg.ext_smaia = true;
562     cpu->cfg.ext_ssaia = true;
563     cpu->cfg.ext_sscofpmf = true;
564     cpu->cfg.ext_sstc = true;
565     cpu->cfg.ext_svinval = true;
566     cpu->cfg.ext_svnapot = true;
567     cpu->cfg.ext_svpbmt = true;
568     cpu->cfg.ext_smstateen = true;
569     cpu->cfg.ext_zba = true;
570     cpu->cfg.ext_zbb = true;
571     cpu->cfg.ext_zbc = true;
572     cpu->cfg.ext_zbs = true;
573     cpu->cfg.ext_XVentanaCondOps = true;
574 
575     cpu->cfg.mvendorid = VEYRON_V1_MVENDORID;
576     cpu->cfg.marchid = VEYRON_V1_MARCHID;
577     cpu->cfg.mimpid = VEYRON_V1_MIMPID;
578 
579 #ifndef CONFIG_USER_ONLY
580     set_satp_mode_max_supported(cpu, VM_1_10_SV48);
581 #endif
582 }
583 
584 static void rv128_base_cpu_init(Object *obj)
585 {
586     RISCVCPU *cpu = RISCV_CPU(obj);
587     CPURISCVState *env = &cpu->env;
588 
589     if (qemu_tcg_mttcg_enabled()) {
590         /* Missing 128-bit aligned atomics */
591         error_report("128-bit RISC-V currently does not work with Multi "
592                      "Threaded TCG. Please use: -accel tcg,thread=single");
593         exit(EXIT_FAILURE);
594     }
595 
596     cpu->cfg.mmu = true;
597     cpu->cfg.pmp = true;
598 
599     /* We set this in the realise function */
600     riscv_cpu_set_misa(env, MXL_RV128, 0);
601     /* Set latest version of privileged specification */
602     env->priv_ver = PRIV_VERSION_LATEST;
603 #ifndef CONFIG_USER_ONLY
604     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
605 #endif
606 }
607 
608 static void rv64i_bare_cpu_init(Object *obj)
609 {
610     CPURISCVState *env = &RISCV_CPU(obj)->env;
611     riscv_cpu_set_misa(env, MXL_RV64, RVI);
612 
613     /* Remove the defaults from the parent class */
614     RISCV_CPU(obj)->cfg.ext_zicntr = false;
615     RISCV_CPU(obj)->cfg.ext_zihpm = false;
616 
617     /* Set to QEMU's first supported priv version */
618     env->priv_ver = PRIV_VERSION_1_10_0;
619 
620     /*
621      * Support all available satp_mode settings. The default
622      * value will be set to MBARE if the user doesn't set
623      * satp_mode manually (see set_satp_mode_default()).
624      */
625 #ifndef CONFIG_USER_ONLY
626     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV64);
627 #endif
628 }
629 #else
630 static void rv32_base_cpu_init(Object *obj)
631 {
632     RISCVCPU *cpu = RISCV_CPU(obj);
633     CPURISCVState *env = &cpu->env;
634 
635     cpu->cfg.mmu = true;
636     cpu->cfg.pmp = true;
637 
638     /* We set this in the realise function */
639     riscv_cpu_set_misa(env, MXL_RV32, 0);
640     /* Set latest version of privileged specification */
641     env->priv_ver = PRIV_VERSION_LATEST;
642 #ifndef CONFIG_USER_ONLY
643     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
644 #endif
645 }
646 
647 static void rv32_sifive_u_cpu_init(Object *obj)
648 {
649     RISCVCPU *cpu = RISCV_CPU(obj);
650     CPURISCVState *env = &cpu->env;
651     riscv_cpu_set_misa(env, MXL_RV32,
652                        RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
653     env->priv_ver = PRIV_VERSION_1_10_0;
654 #ifndef CONFIG_USER_ONLY
655     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
656 #endif
657 
658     /* inherited from parent obj via riscv_cpu_init() */
659     cpu->cfg.ext_zifencei = true;
660     cpu->cfg.ext_zicsr = true;
661     cpu->cfg.mmu = true;
662     cpu->cfg.pmp = true;
663 }
664 
665 static void rv32_sifive_e_cpu_init(Object *obj)
666 {
667     CPURISCVState *env = &RISCV_CPU(obj)->env;
668     RISCVCPU *cpu = RISCV_CPU(obj);
669 
670     riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU);
671     env->priv_ver = PRIV_VERSION_1_10_0;
672 #ifndef CONFIG_USER_ONLY
673     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
674 #endif
675 
676     /* inherited from parent obj via riscv_cpu_init() */
677     cpu->cfg.ext_zifencei = true;
678     cpu->cfg.ext_zicsr = true;
679     cpu->cfg.pmp = true;
680 }
681 
682 static void rv32_ibex_cpu_init(Object *obj)
683 {
684     CPURISCVState *env = &RISCV_CPU(obj)->env;
685     RISCVCPU *cpu = RISCV_CPU(obj);
686 
687     riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU);
688     env->priv_ver = PRIV_VERSION_1_12_0;
689 #ifndef CONFIG_USER_ONLY
690     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
691 #endif
692     /* inherited from parent obj via riscv_cpu_init() */
693     cpu->cfg.ext_zifencei = true;
694     cpu->cfg.ext_zicsr = true;
695     cpu->cfg.pmp = true;
696     cpu->cfg.ext_smepmp = true;
697 }
698 
699 static void rv32_imafcu_nommu_cpu_init(Object *obj)
700 {
701     CPURISCVState *env = &RISCV_CPU(obj)->env;
702     RISCVCPU *cpu = RISCV_CPU(obj);
703 
704     riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU);
705     env->priv_ver = PRIV_VERSION_1_10_0;
706 #ifndef CONFIG_USER_ONLY
707     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
708 #endif
709 
710     /* inherited from parent obj via riscv_cpu_init() */
711     cpu->cfg.ext_zifencei = true;
712     cpu->cfg.ext_zicsr = true;
713     cpu->cfg.pmp = true;
714 }
715 #endif
716 
717 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
718 {
719     ObjectClass *oc;
720     char *typename;
721     char **cpuname;
722 
723     cpuname = g_strsplit(cpu_model, ",", 1);
724     typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
725     oc = object_class_by_name(typename);
726     g_strfreev(cpuname);
727     g_free(typename);
728 
729     return oc;
730 }
731 
732 char *riscv_cpu_get_name(RISCVCPU *cpu)
733 {
734     RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu);
735     const char *typename = object_class_get_name(OBJECT_CLASS(rcc));
736 
737     g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX));
738 
739     return cpu_model_from_type(typename);
740 }
741 
742 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
743 {
744     RISCVCPU *cpu = RISCV_CPU(cs);
745     CPURISCVState *env = &cpu->env;
746     int i, j;
747     uint8_t *p;
748 
749 #if !defined(CONFIG_USER_ONLY)
750     if (riscv_has_ext(env, RVH)) {
751         qemu_fprintf(f, " %s %d\n", "V      =  ", env->virt_enabled);
752     }
753 #endif
754     qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc      ", env->pc);
755 #ifndef CONFIG_USER_ONLY
756     {
757         static const int dump_csrs[] = {
758             CSR_MHARTID,
759             CSR_MSTATUS,
760             CSR_MSTATUSH,
761             /*
762              * CSR_SSTATUS is intentionally omitted here as its value
763              * can be figured out by looking at CSR_MSTATUS
764              */
765             CSR_HSTATUS,
766             CSR_VSSTATUS,
767             CSR_MIP,
768             CSR_MIE,
769             CSR_MIDELEG,
770             CSR_HIDELEG,
771             CSR_MEDELEG,
772             CSR_HEDELEG,
773             CSR_MTVEC,
774             CSR_STVEC,
775             CSR_VSTVEC,
776             CSR_MEPC,
777             CSR_SEPC,
778             CSR_VSEPC,
779             CSR_MCAUSE,
780             CSR_SCAUSE,
781             CSR_VSCAUSE,
782             CSR_MTVAL,
783             CSR_STVAL,
784             CSR_HTVAL,
785             CSR_MTVAL2,
786             CSR_MSCRATCH,
787             CSR_SSCRATCH,
788             CSR_SATP,
789             CSR_MMTE,
790             CSR_UPMBASE,
791             CSR_UPMMASK,
792             CSR_SPMBASE,
793             CSR_SPMMASK,
794             CSR_MPMBASE,
795             CSR_MPMMASK,
796         };
797 
798         for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
799             int csrno = dump_csrs[i];
800             target_ulong val = 0;
801             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
802 
803             /*
804              * Rely on the smode, hmode, etc, predicates within csr.c
805              * to do the filtering of the registers that are present.
806              */
807             if (res == RISCV_EXCP_NONE) {
808                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
809                              csr_ops[csrno].name, val);
810             }
811         }
812     }
813 #endif
814 
815     for (i = 0; i < 32; i++) {
816         qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
817                      riscv_int_regnames[i], env->gpr[i]);
818         if ((i & 3) == 3) {
819             qemu_fprintf(f, "\n");
820         }
821     }
822     if (flags & CPU_DUMP_FPU) {
823         for (i = 0; i < 32; i++) {
824             qemu_fprintf(f, " %-8s %016" PRIx64,
825                          riscv_fpr_regnames[i], env->fpr[i]);
826             if ((i & 3) == 3) {
827                 qemu_fprintf(f, "\n");
828             }
829         }
830     }
831     if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) {
832         static const int dump_rvv_csrs[] = {
833                     CSR_VSTART,
834                     CSR_VXSAT,
835                     CSR_VXRM,
836                     CSR_VCSR,
837                     CSR_VL,
838                     CSR_VTYPE,
839                     CSR_VLENB,
840                 };
841         for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
842             int csrno = dump_rvv_csrs[i];
843             target_ulong val = 0;
844             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
845 
846             /*
847              * Rely on the smode, hmode, etc, predicates within csr.c
848              * to do the filtering of the registers that are present.
849              */
850             if (res == RISCV_EXCP_NONE) {
851                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
852                              csr_ops[csrno].name, val);
853             }
854         }
855         uint16_t vlenb = cpu->cfg.vlen >> 3;
856 
857         for (i = 0; i < 32; i++) {
858             qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]);
859             p = (uint8_t *)env->vreg;
860             for (j = vlenb - 1 ; j >= 0; j--) {
861                 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j)));
862             }
863             qemu_fprintf(f, "\n");
864         }
865     }
866 }
867 
868 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
869 {
870     RISCVCPU *cpu = RISCV_CPU(cs);
871     CPURISCVState *env = &cpu->env;
872 
873     if (env->xl == MXL_RV32) {
874         env->pc = (int32_t)value;
875     } else {
876         env->pc = value;
877     }
878 }
879 
880 static vaddr riscv_cpu_get_pc(CPUState *cs)
881 {
882     RISCVCPU *cpu = RISCV_CPU(cs);
883     CPURISCVState *env = &cpu->env;
884 
885     /* Match cpu_get_tb_cpu_state. */
886     if (env->xl == MXL_RV32) {
887         return env->pc & UINT32_MAX;
888     }
889     return env->pc;
890 }
891 
892 static bool riscv_cpu_has_work(CPUState *cs)
893 {
894 #ifndef CONFIG_USER_ONLY
895     RISCVCPU *cpu = RISCV_CPU(cs);
896     CPURISCVState *env = &cpu->env;
897     /*
898      * Definition of the WFI instruction requires it to ignore the privilege
899      * mode and delegation registers, but respect individual enables
900      */
901     return riscv_cpu_all_pending(env) != 0 ||
902         riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE ||
903         riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE;
904 #else
905     return true;
906 #endif
907 }
908 
909 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch)
910 {
911     return riscv_env_mmu_index(cpu_env(cs), ifetch);
912 }
913 
914 static void riscv_cpu_reset_hold(Object *obj)
915 {
916 #ifndef CONFIG_USER_ONLY
917     uint8_t iprio;
918     int i, irq, rdzero;
919 #endif
920     CPUState *cs = CPU(obj);
921     RISCVCPU *cpu = RISCV_CPU(cs);
922     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
923     CPURISCVState *env = &cpu->env;
924 
925     if (mcc->parent_phases.hold) {
926         mcc->parent_phases.hold(obj);
927     }
928 #ifndef CONFIG_USER_ONLY
929     env->misa_mxl = env->misa_mxl_max;
930     env->priv = PRV_M;
931     env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
932     if (env->misa_mxl > MXL_RV32) {
933         /*
934          * The reset status of SXL/UXL is undefined, but mstatus is WARL
935          * and we must ensure that the value after init is valid for read.
936          */
937         env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
938         env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
939         if (riscv_has_ext(env, RVH)) {
940             env->vsstatus = set_field(env->vsstatus,
941                                       MSTATUS64_SXL, env->misa_mxl);
942             env->vsstatus = set_field(env->vsstatus,
943                                       MSTATUS64_UXL, env->misa_mxl);
944             env->mstatus_hs = set_field(env->mstatus_hs,
945                                         MSTATUS64_SXL, env->misa_mxl);
946             env->mstatus_hs = set_field(env->mstatus_hs,
947                                         MSTATUS64_UXL, env->misa_mxl);
948         }
949     }
950     env->mcause = 0;
951     env->miclaim = MIP_SGEIP;
952     env->pc = env->resetvec;
953     env->bins = 0;
954     env->two_stage_lookup = false;
955 
956     env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
957                    (cpu->cfg.ext_svadu ? MENVCFG_ADUE : 0);
958     env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) |
959                    (cpu->cfg.ext_svadu ? HENVCFG_ADUE : 0);
960 
961     /* Initialized default priorities of local interrupts. */
962     for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
963         iprio = riscv_cpu_default_priority(i);
964         env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
965         env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
966         env->hviprio[i] = 0;
967     }
968     i = 0;
969     while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
970         if (!rdzero) {
971             env->hviprio[irq] = env->miprio[irq];
972         }
973         i++;
974     }
975     /* mmte is supposed to have pm.current hardwired to 1 */
976     env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
977 
978     /*
979      * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor
980      * extension is enabled.
981      */
982     if (riscv_has_ext(env, RVH)) {
983         env->mideleg |= HS_MODE_INTERRUPTS;
984     }
985 
986     /*
987      * Clear mseccfg and unlock all the PMP entries upon reset.
988      * This is allowed as per the priv and smepmp specifications
989      * and is needed to clear stale entries across reboots.
990      */
991     if (riscv_cpu_cfg(env)->ext_smepmp) {
992         env->mseccfg = 0;
993     }
994 
995     pmp_unlock_entries(env);
996 #endif
997     env->xl = riscv_cpu_mxl(env);
998     riscv_cpu_update_mask(env);
999     cs->exception_index = RISCV_EXCP_NONE;
1000     env->load_res = -1;
1001     set_default_nan_mode(1, &env->fp_status);
1002 
1003 #ifndef CONFIG_USER_ONLY
1004     if (cpu->cfg.debug) {
1005         riscv_trigger_reset_hold(env);
1006     }
1007 
1008     if (kvm_enabled()) {
1009         kvm_riscv_reset_vcpu(cpu);
1010     }
1011 #endif
1012 }
1013 
1014 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
1015 {
1016     RISCVCPU *cpu = RISCV_CPU(s);
1017     CPURISCVState *env = &cpu->env;
1018     info->target_info = &cpu->cfg;
1019 
1020     switch (env->xl) {
1021     case MXL_RV32:
1022         info->print_insn = print_insn_riscv32;
1023         break;
1024     case MXL_RV64:
1025         info->print_insn = print_insn_riscv64;
1026         break;
1027     case MXL_RV128:
1028         info->print_insn = print_insn_riscv128;
1029         break;
1030     default:
1031         g_assert_not_reached();
1032     }
1033 }
1034 
1035 #ifndef CONFIG_USER_ONLY
1036 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
1037 {
1038     bool rv32 = riscv_cpu_is_32bit(cpu);
1039     uint8_t satp_mode_map_max, satp_mode_supported_max;
1040 
1041     /* The CPU wants the OS to decide which satp mode to use */
1042     if (cpu->cfg.satp_mode.supported == 0) {
1043         return;
1044     }
1045 
1046     satp_mode_supported_max =
1047                     satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
1048 
1049     if (cpu->cfg.satp_mode.map == 0) {
1050         if (cpu->cfg.satp_mode.init == 0) {
1051             /* If unset by the user, we fallback to the default satp mode. */
1052             set_satp_mode_default_map(cpu);
1053         } else {
1054             /*
1055              * Find the lowest level that was disabled and then enable the
1056              * first valid level below which can be found in
1057              * valid_vm_1_10_32/64.
1058              */
1059             for (int i = 1; i < 16; ++i) {
1060                 if ((cpu->cfg.satp_mode.init & (1 << i)) &&
1061                     (cpu->cfg.satp_mode.supported & (1 << i))) {
1062                     for (int j = i - 1; j >= 0; --j) {
1063                         if (cpu->cfg.satp_mode.supported & (1 << j)) {
1064                             cpu->cfg.satp_mode.map |= (1 << j);
1065                             break;
1066                         }
1067                     }
1068                     break;
1069                 }
1070             }
1071         }
1072     }
1073 
1074     satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
1075 
1076     /* Make sure the user asked for a supported configuration (HW and qemu) */
1077     if (satp_mode_map_max > satp_mode_supported_max) {
1078         error_setg(errp, "satp_mode %s is higher than hw max capability %s",
1079                    satp_mode_str(satp_mode_map_max, rv32),
1080                    satp_mode_str(satp_mode_supported_max, rv32));
1081         return;
1082     }
1083 
1084     /*
1085      * Make sure the user did not ask for an invalid configuration as per
1086      * the specification.
1087      */
1088     if (!rv32) {
1089         for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1090             if (!(cpu->cfg.satp_mode.map & (1 << i)) &&
1091                 (cpu->cfg.satp_mode.init & (1 << i)) &&
1092                 (cpu->cfg.satp_mode.supported & (1 << i))) {
1093                 error_setg(errp, "cannot disable %s satp mode if %s "
1094                            "is enabled", satp_mode_str(i, false),
1095                            satp_mode_str(satp_mode_map_max, false));
1096                 return;
1097             }
1098         }
1099     }
1100 
1101     /* Finally expand the map so that all valid modes are set */
1102     for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1103         if (cpu->cfg.satp_mode.supported & (1 << i)) {
1104             cpu->cfg.satp_mode.map |= (1 << i);
1105         }
1106     }
1107 }
1108 #endif
1109 
1110 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1111 {
1112     Error *local_err = NULL;
1113 
1114 #ifndef CONFIG_USER_ONLY
1115     riscv_cpu_satp_mode_finalize(cpu, &local_err);
1116     if (local_err != NULL) {
1117         error_propagate(errp, local_err);
1118         return;
1119     }
1120 #endif
1121 
1122     if (tcg_enabled()) {
1123         riscv_tcg_cpu_finalize_features(cpu, &local_err);
1124         if (local_err != NULL) {
1125             error_propagate(errp, local_err);
1126             return;
1127         }
1128     } else if (kvm_enabled()) {
1129         riscv_kvm_cpu_finalize_features(cpu, &local_err);
1130         if (local_err != NULL) {
1131             error_propagate(errp, local_err);
1132             return;
1133         }
1134     }
1135 }
1136 
1137 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
1138 {
1139     CPUState *cs = CPU(dev);
1140     RISCVCPU *cpu = RISCV_CPU(dev);
1141     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
1142     Error *local_err = NULL;
1143 
1144     if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) {
1145         warn_report("The 'any' CPU is deprecated and will be "
1146                     "removed in the future.");
1147     }
1148 
1149     cpu_exec_realizefn(cs, &local_err);
1150     if (local_err != NULL) {
1151         error_propagate(errp, local_err);
1152         return;
1153     }
1154 
1155     riscv_cpu_finalize_features(cpu, &local_err);
1156     if (local_err != NULL) {
1157         error_propagate(errp, local_err);
1158         return;
1159     }
1160 
1161     riscv_cpu_register_gdb_regs_for_features(cs);
1162 
1163 #ifndef CONFIG_USER_ONLY
1164     if (cpu->cfg.debug) {
1165         riscv_trigger_realize(&cpu->env);
1166     }
1167 #endif
1168 
1169     qemu_init_vcpu(cs);
1170     cpu_reset(cs);
1171 
1172     mcc->parent_realize(dev, errp);
1173 }
1174 
1175 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu)
1176 {
1177     if (tcg_enabled()) {
1178         return riscv_cpu_tcg_compatible(cpu);
1179     }
1180 
1181     return true;
1182 }
1183 
1184 #ifndef CONFIG_USER_ONLY
1185 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
1186                                void *opaque, Error **errp)
1187 {
1188     RISCVSATPMap *satp_map = opaque;
1189     uint8_t satp = satp_mode_from_str(name);
1190     bool value;
1191 
1192     value = satp_map->map & (1 << satp);
1193 
1194     visit_type_bool(v, name, &value, errp);
1195 }
1196 
1197 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
1198                                void *opaque, Error **errp)
1199 {
1200     RISCVSATPMap *satp_map = opaque;
1201     uint8_t satp = satp_mode_from_str(name);
1202     bool value;
1203 
1204     if (!visit_type_bool(v, name, &value, errp)) {
1205         return;
1206     }
1207 
1208     satp_map->map = deposit32(satp_map->map, satp, 1, value);
1209     satp_map->init |= 1 << satp;
1210 }
1211 
1212 void riscv_add_satp_mode_properties(Object *obj)
1213 {
1214     RISCVCPU *cpu = RISCV_CPU(obj);
1215 
1216     if (cpu->env.misa_mxl == MXL_RV32) {
1217         object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
1218                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1219     } else {
1220         object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
1221                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1222         object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
1223                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1224         object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
1225                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1226         object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
1227                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1228     }
1229 }
1230 
1231 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
1232 {
1233     RISCVCPU *cpu = RISCV_CPU(opaque);
1234     CPURISCVState *env = &cpu->env;
1235 
1236     if (irq < IRQ_LOCAL_MAX) {
1237         switch (irq) {
1238         case IRQ_U_SOFT:
1239         case IRQ_S_SOFT:
1240         case IRQ_VS_SOFT:
1241         case IRQ_M_SOFT:
1242         case IRQ_U_TIMER:
1243         case IRQ_S_TIMER:
1244         case IRQ_VS_TIMER:
1245         case IRQ_M_TIMER:
1246         case IRQ_U_EXT:
1247         case IRQ_VS_EXT:
1248         case IRQ_M_EXT:
1249             if (kvm_enabled()) {
1250                 kvm_riscv_set_irq(cpu, irq, level);
1251             } else {
1252                 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level));
1253             }
1254              break;
1255         case IRQ_S_EXT:
1256             if (kvm_enabled()) {
1257                 kvm_riscv_set_irq(cpu, irq, level);
1258             } else {
1259                 env->external_seip = level;
1260                 riscv_cpu_update_mip(env, 1 << irq,
1261                                      BOOL_TO_MASK(level | env->software_seip));
1262             }
1263             break;
1264         default:
1265             g_assert_not_reached();
1266         }
1267     } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
1268         /* Require H-extension for handling guest local interrupts */
1269         if (!riscv_has_ext(env, RVH)) {
1270             g_assert_not_reached();
1271         }
1272 
1273         /* Compute bit position in HGEIP CSR */
1274         irq = irq - IRQ_LOCAL_MAX + 1;
1275         if (env->geilen < irq) {
1276             g_assert_not_reached();
1277         }
1278 
1279         /* Update HGEIP CSR */
1280         env->hgeip &= ~((target_ulong)1 << irq);
1281         if (level) {
1282             env->hgeip |= (target_ulong)1 << irq;
1283         }
1284 
1285         /* Update mip.SGEIP bit */
1286         riscv_cpu_update_mip(env, MIP_SGEIP,
1287                              BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
1288     } else {
1289         g_assert_not_reached();
1290     }
1291 }
1292 #endif /* CONFIG_USER_ONLY */
1293 
1294 static bool riscv_cpu_is_dynamic(Object *cpu_obj)
1295 {
1296     return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
1297 }
1298 
1299 static void riscv_cpu_post_init(Object *obj)
1300 {
1301     accel_cpu_instance_init(CPU(obj));
1302 }
1303 
1304 static void riscv_cpu_init(Object *obj)
1305 {
1306     RISCVCPU *cpu = RISCV_CPU(obj);
1307 
1308 #ifndef CONFIG_USER_ONLY
1309     qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
1310                       IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
1311 #endif /* CONFIG_USER_ONLY */
1312 
1313     general_user_opts = g_hash_table_new(g_str_hash, g_str_equal);
1314 
1315     /*
1316      * The timer and performance counters extensions were supported
1317      * in QEMU before they were added as discrete extensions in the
1318      * ISA. To keep compatibility we'll always default them to 'true'
1319      * for all CPUs. Each accelerator will decide what to do when
1320      * users disable them.
1321      */
1322     RISCV_CPU(obj)->cfg.ext_zicntr = true;
1323     RISCV_CPU(obj)->cfg.ext_zihpm = true;
1324 
1325     /* Default values for non-bool cpu properties */
1326     cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16);
1327     cpu->cfg.vlen = 128;
1328     cpu->cfg.elen = 64;
1329     cpu->cfg.cbom_blocksize = 64;
1330     cpu->cfg.cbop_blocksize = 64;
1331     cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1332 }
1333 
1334 typedef struct misa_ext_info {
1335     const char *name;
1336     const char *description;
1337 } MISAExtInfo;
1338 
1339 #define MISA_INFO_IDX(_bit) \
1340     __builtin_ctz(_bit)
1341 
1342 #define MISA_EXT_INFO(_bit, _propname, _descr) \
1343     [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr}
1344 
1345 static const MISAExtInfo misa_ext_info_arr[] = {
1346     MISA_EXT_INFO(RVA, "a", "Atomic instructions"),
1347     MISA_EXT_INFO(RVC, "c", "Compressed instructions"),
1348     MISA_EXT_INFO(RVD, "d", "Double-precision float point"),
1349     MISA_EXT_INFO(RVF, "f", "Single-precision float point"),
1350     MISA_EXT_INFO(RVI, "i", "Base integer instruction set"),
1351     MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"),
1352     MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"),
1353     MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
1354     MISA_EXT_INFO(RVU, "u", "User-level instructions"),
1355     MISA_EXT_INFO(RVH, "h", "Hypervisor"),
1356     MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
1357     MISA_EXT_INFO(RVV, "v", "Vector operations"),
1358     MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
1359     MISA_EXT_INFO(RVB, "x-b", "Bit manipulation (Zba_Zbb_Zbs)")
1360 };
1361 
1362 static int riscv_validate_misa_info_idx(uint32_t bit)
1363 {
1364     int idx;
1365 
1366     /*
1367      * Our lowest valid input (RVA) is 1 and
1368      * __builtin_ctz() is UB with zero.
1369      */
1370     g_assert(bit != 0);
1371     idx = MISA_INFO_IDX(bit);
1372 
1373     g_assert(idx < ARRAY_SIZE(misa_ext_info_arr));
1374     return idx;
1375 }
1376 
1377 const char *riscv_get_misa_ext_name(uint32_t bit)
1378 {
1379     int idx = riscv_validate_misa_info_idx(bit);
1380     const char *val = misa_ext_info_arr[idx].name;
1381 
1382     g_assert(val != NULL);
1383     return val;
1384 }
1385 
1386 const char *riscv_get_misa_ext_description(uint32_t bit)
1387 {
1388     int idx = riscv_validate_misa_info_idx(bit);
1389     const char *val = misa_ext_info_arr[idx].description;
1390 
1391     g_assert(val != NULL);
1392     return val;
1393 }
1394 
1395 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \
1396     {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
1397      .enabled = _defval}
1398 
1399 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
1400     /* Defaults for standard extensions */
1401     MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
1402     MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
1403     MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
1404     MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
1405     MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
1406     MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false),
1407     MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
1408     MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true),
1409     MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false),
1410     MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false),
1411     MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false),
1412     MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false),
1413     MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false),
1414     MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
1415 
1416     MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
1417     MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
1418     MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
1419     MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
1420     MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false),
1421     MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false),
1422 
1423     MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true),
1424     MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true),
1425 
1426     MULTI_EXT_CFG_BOOL("zba", ext_zba, true),
1427     MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true),
1428     MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true),
1429     MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false),
1430     MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false),
1431     MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false),
1432     MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true),
1433     MULTI_EXT_CFG_BOOL("zk", ext_zk, false),
1434     MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false),
1435     MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false),
1436     MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false),
1437     MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false),
1438     MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false),
1439     MULTI_EXT_CFG_BOOL("zks", ext_zks, false),
1440     MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false),
1441     MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false),
1442     MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false),
1443 
1444     MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false),
1445     MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false),
1446     MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false),
1447     MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false),
1448 
1449     MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true),
1450     MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true),
1451     MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true),
1452 
1453     MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false),
1454 
1455     MULTI_EXT_CFG_BOOL("zca", ext_zca, false),
1456     MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false),
1457     MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false),
1458     MULTI_EXT_CFG_BOOL("zce", ext_zce, false),
1459     MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false),
1460     MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false),
1461     MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false),
1462     MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false),
1463 
1464     /* Vector cryptography extensions */
1465     MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false),
1466     MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false),
1467     MULTI_EXT_CFG_BOOL("zvkb", ext_zvkg, false),
1468     MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false),
1469     MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false),
1470     MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false),
1471     MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false),
1472     MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false),
1473     MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false),
1474     MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false),
1475     MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false),
1476     MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false),
1477     MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false),
1478     MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false),
1479     MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false),
1480     MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false),
1481 
1482     DEFINE_PROP_END_OF_LIST(),
1483 };
1484 
1485 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = {
1486     MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false),
1487     MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false),
1488     MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false),
1489     MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false),
1490     MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false),
1491     MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false),
1492     MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false),
1493     MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false),
1494     MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false),
1495     MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false),
1496     MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false),
1497     MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false),
1498 
1499     DEFINE_PROP_END_OF_LIST(),
1500 };
1501 
1502 /* These are experimental so mark with 'x-' */
1503 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
1504     MULTI_EXT_CFG_BOOL("x-smaia", ext_smaia, false),
1505     MULTI_EXT_CFG_BOOL("x-ssaia", ext_ssaia, false),
1506 
1507     MULTI_EXT_CFG_BOOL("x-zvfh", ext_zvfh, false),
1508     MULTI_EXT_CFG_BOOL("x-zvfhmin", ext_zvfhmin, false),
1509 
1510     MULTI_EXT_CFG_BOOL("x-zfbfmin", ext_zfbfmin, false),
1511     MULTI_EXT_CFG_BOOL("x-zvfbfmin", ext_zvfbfmin, false),
1512     MULTI_EXT_CFG_BOOL("x-zvfbfwma", ext_zvfbfwma, false),
1513 
1514     DEFINE_PROP_END_OF_LIST(),
1515 };
1516 
1517 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
1518     MULTI_EXT_CFG_BOOL("svade", svade, true),
1519     MULTI_EXT_CFG_BOOL("zic64b", zic64b, true),
1520 
1521     DEFINE_PROP_END_OF_LIST(),
1522 };
1523 
1524 /* Deprecated entries marked for future removal */
1525 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = {
1526     MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true),
1527     MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true),
1528     MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true),
1529     MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true),
1530     MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true),
1531     MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true),
1532     MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false),
1533     MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false),
1534     MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false),
1535     MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false),
1536     MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false),
1537 
1538     DEFINE_PROP_END_OF_LIST(),
1539 };
1540 
1541 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname,
1542                              Error **errp)
1543 {
1544     g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1545     error_setg(errp, "CPU '%s' does not allow changing the value of '%s'",
1546                cpuname, propname);
1547 }
1548 
1549 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name,
1550                              void *opaque, Error **errp)
1551 {
1552     RISCVCPU *cpu = RISCV_CPU(obj);
1553     uint8_t pmu_num, curr_pmu_num;
1554     uint32_t pmu_mask;
1555 
1556     visit_type_uint8(v, name, &pmu_num, errp);
1557 
1558     curr_pmu_num = ctpop32(cpu->cfg.pmu_mask);
1559 
1560     if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) {
1561         cpu_set_prop_err(cpu, name, errp);
1562         error_append_hint(errp, "Current '%s' val: %u\n",
1563                           name, curr_pmu_num);
1564         return;
1565     }
1566 
1567     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1568         error_setg(errp, "Number of counters exceeds maximum available");
1569         return;
1570     }
1571 
1572     if (pmu_num == 0) {
1573         pmu_mask = 0;
1574     } else {
1575         pmu_mask = MAKE_64BIT_MASK(3, pmu_num);
1576     }
1577 
1578     warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\"");
1579     cpu->cfg.pmu_mask = pmu_mask;
1580     cpu_option_add_user_setting("pmu-mask", pmu_mask);
1581 }
1582 
1583 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name,
1584                              void *opaque, Error **errp)
1585 {
1586     RISCVCPU *cpu = RISCV_CPU(obj);
1587     uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask);
1588 
1589     visit_type_uint8(v, name, &pmu_num, errp);
1590 }
1591 
1592 static const PropertyInfo prop_pmu_num = {
1593     .name = "pmu-num",
1594     .get = prop_pmu_num_get,
1595     .set = prop_pmu_num_set,
1596 };
1597 
1598 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name,
1599                              void *opaque, Error **errp)
1600 {
1601     RISCVCPU *cpu = RISCV_CPU(obj);
1602     uint32_t value;
1603     uint8_t pmu_num;
1604 
1605     visit_type_uint32(v, name, &value, errp);
1606 
1607     if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) {
1608         cpu_set_prop_err(cpu, name, errp);
1609         error_append_hint(errp, "Current '%s' val: %x\n",
1610                           name, cpu->cfg.pmu_mask);
1611         return;
1612     }
1613 
1614     pmu_num = ctpop32(value);
1615 
1616     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1617         error_setg(errp, "Number of counters exceeds maximum available");
1618         return;
1619     }
1620 
1621     cpu_option_add_user_setting(name, value);
1622     cpu->cfg.pmu_mask = value;
1623 }
1624 
1625 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name,
1626                              void *opaque, Error **errp)
1627 {
1628     uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask;
1629 
1630     visit_type_uint8(v, name, &pmu_mask, errp);
1631 }
1632 
1633 static const PropertyInfo prop_pmu_mask = {
1634     .name = "pmu-mask",
1635     .get = prop_pmu_mask_get,
1636     .set = prop_pmu_mask_set,
1637 };
1638 
1639 static void prop_mmu_set(Object *obj, Visitor *v, const char *name,
1640                          void *opaque, Error **errp)
1641 {
1642     RISCVCPU *cpu = RISCV_CPU(obj);
1643     bool value;
1644 
1645     visit_type_bool(v, name, &value, errp);
1646 
1647     if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) {
1648         cpu_set_prop_err(cpu, "mmu", errp);
1649         return;
1650     }
1651 
1652     cpu_option_add_user_setting(name, value);
1653     cpu->cfg.mmu = value;
1654 }
1655 
1656 static void prop_mmu_get(Object *obj, Visitor *v, const char *name,
1657                          void *opaque, Error **errp)
1658 {
1659     bool value = RISCV_CPU(obj)->cfg.mmu;
1660 
1661     visit_type_bool(v, name, &value, errp);
1662 }
1663 
1664 static const PropertyInfo prop_mmu = {
1665     .name = "mmu",
1666     .get = prop_mmu_get,
1667     .set = prop_mmu_set,
1668 };
1669 
1670 static void prop_pmp_set(Object *obj, Visitor *v, const char *name,
1671                          void *opaque, Error **errp)
1672 {
1673     RISCVCPU *cpu = RISCV_CPU(obj);
1674     bool value;
1675 
1676     visit_type_bool(v, name, &value, errp);
1677 
1678     if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) {
1679         cpu_set_prop_err(cpu, name, errp);
1680         return;
1681     }
1682 
1683     cpu_option_add_user_setting(name, value);
1684     cpu->cfg.pmp = value;
1685 }
1686 
1687 static void prop_pmp_get(Object *obj, Visitor *v, const char *name,
1688                          void *opaque, Error **errp)
1689 {
1690     bool value = RISCV_CPU(obj)->cfg.pmp;
1691 
1692     visit_type_bool(v, name, &value, errp);
1693 }
1694 
1695 static const PropertyInfo prop_pmp = {
1696     .name = "pmp",
1697     .get = prop_pmp_get,
1698     .set = prop_pmp_set,
1699 };
1700 
1701 static int priv_spec_from_str(const char *priv_spec_str)
1702 {
1703     int priv_version = -1;
1704 
1705     if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) {
1706         priv_version = PRIV_VERSION_1_12_0;
1707     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) {
1708         priv_version = PRIV_VERSION_1_11_0;
1709     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) {
1710         priv_version = PRIV_VERSION_1_10_0;
1711     }
1712 
1713     return priv_version;
1714 }
1715 
1716 static const char *priv_spec_to_str(int priv_version)
1717 {
1718     switch (priv_version) {
1719     case PRIV_VERSION_1_10_0:
1720         return PRIV_VER_1_10_0_STR;
1721     case PRIV_VERSION_1_11_0:
1722         return PRIV_VER_1_11_0_STR;
1723     case PRIV_VERSION_1_12_0:
1724         return PRIV_VER_1_12_0_STR;
1725     default:
1726         return NULL;
1727     }
1728 }
1729 
1730 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name,
1731                                void *opaque, Error **errp)
1732 {
1733     RISCVCPU *cpu = RISCV_CPU(obj);
1734     g_autofree char *value = NULL;
1735     int priv_version = -1;
1736 
1737     visit_type_str(v, name, &value, errp);
1738 
1739     priv_version = priv_spec_from_str(value);
1740     if (priv_version < 0) {
1741         error_setg(errp, "Unsupported privilege spec version '%s'", value);
1742         return;
1743     }
1744 
1745     if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) {
1746         cpu_set_prop_err(cpu, name, errp);
1747         error_append_hint(errp, "Current '%s' val: %s\n", name,
1748                           object_property_get_str(obj, name, NULL));
1749         return;
1750     }
1751 
1752     cpu_option_add_user_setting(name, priv_version);
1753     cpu->env.priv_ver = priv_version;
1754 }
1755 
1756 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name,
1757                                void *opaque, Error **errp)
1758 {
1759     RISCVCPU *cpu = RISCV_CPU(obj);
1760     const char *value = priv_spec_to_str(cpu->env.priv_ver);
1761 
1762     visit_type_str(v, name, (char **)&value, errp);
1763 }
1764 
1765 static const PropertyInfo prop_priv_spec = {
1766     .name = "priv_spec",
1767     .get = prop_priv_spec_get,
1768     .set = prop_priv_spec_set,
1769 };
1770 
1771 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name,
1772                                void *opaque, Error **errp)
1773 {
1774     RISCVCPU *cpu = RISCV_CPU(obj);
1775     g_autofree char *value = NULL;
1776 
1777     visit_type_str(v, name, &value, errp);
1778 
1779     if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) {
1780         error_setg(errp, "Unsupported vector spec version '%s'", value);
1781         return;
1782     }
1783 
1784     cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0);
1785     cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1786 }
1787 
1788 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name,
1789                                void *opaque, Error **errp)
1790 {
1791     const char *value = VEXT_VER_1_00_0_STR;
1792 
1793     visit_type_str(v, name, (char **)&value, errp);
1794 }
1795 
1796 static const PropertyInfo prop_vext_spec = {
1797     .name = "vext_spec",
1798     .get = prop_vext_spec_get,
1799     .set = prop_vext_spec_set,
1800 };
1801 
1802 static void prop_vlen_set(Object *obj, Visitor *v, const char *name,
1803                          void *opaque, Error **errp)
1804 {
1805     RISCVCPU *cpu = RISCV_CPU(obj);
1806     uint16_t value;
1807 
1808     if (!visit_type_uint16(v, name, &value, errp)) {
1809         return;
1810     }
1811 
1812     if (!is_power_of_2(value)) {
1813         error_setg(errp, "Vector extension VLEN must be power of 2");
1814         return;
1815     }
1816 
1817     if (value != cpu->cfg.vlen && riscv_cpu_is_vendor(obj)) {
1818         cpu_set_prop_err(cpu, name, errp);
1819         error_append_hint(errp, "Current '%s' val: %u\n",
1820                           name, cpu->cfg.vlen);
1821         return;
1822     }
1823 
1824     cpu_option_add_user_setting(name, value);
1825     cpu->cfg.vlen = value;
1826 }
1827 
1828 static void prop_vlen_get(Object *obj, Visitor *v, const char *name,
1829                          void *opaque, Error **errp)
1830 {
1831     uint16_t value = RISCV_CPU(obj)->cfg.vlen;
1832 
1833     visit_type_uint16(v, name, &value, errp);
1834 }
1835 
1836 static const PropertyInfo prop_vlen = {
1837     .name = "vlen",
1838     .get = prop_vlen_get,
1839     .set = prop_vlen_set,
1840 };
1841 
1842 static void prop_elen_set(Object *obj, Visitor *v, const char *name,
1843                          void *opaque, Error **errp)
1844 {
1845     RISCVCPU *cpu = RISCV_CPU(obj);
1846     uint16_t value;
1847 
1848     if (!visit_type_uint16(v, name, &value, errp)) {
1849         return;
1850     }
1851 
1852     if (!is_power_of_2(value)) {
1853         error_setg(errp, "Vector extension ELEN must be power of 2");
1854         return;
1855     }
1856 
1857     if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) {
1858         cpu_set_prop_err(cpu, name, errp);
1859         error_append_hint(errp, "Current '%s' val: %u\n",
1860                           name, cpu->cfg.elen);
1861         return;
1862     }
1863 
1864     cpu_option_add_user_setting(name, value);
1865     cpu->cfg.elen = value;
1866 }
1867 
1868 static void prop_elen_get(Object *obj, Visitor *v, const char *name,
1869                          void *opaque, Error **errp)
1870 {
1871     uint16_t value = RISCV_CPU(obj)->cfg.elen;
1872 
1873     visit_type_uint16(v, name, &value, errp);
1874 }
1875 
1876 static const PropertyInfo prop_elen = {
1877     .name = "elen",
1878     .get = prop_elen_get,
1879     .set = prop_elen_set,
1880 };
1881 
1882 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name,
1883                                   void *opaque, Error **errp)
1884 {
1885     RISCVCPU *cpu = RISCV_CPU(obj);
1886     uint16_t value;
1887 
1888     if (!visit_type_uint16(v, name, &value, errp)) {
1889         return;
1890     }
1891 
1892     if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) {
1893         cpu_set_prop_err(cpu, name, errp);
1894         error_append_hint(errp, "Current '%s' val: %u\n",
1895                           name, cpu->cfg.cbom_blocksize);
1896         return;
1897     }
1898 
1899     cpu_option_add_user_setting(name, value);
1900     cpu->cfg.cbom_blocksize = value;
1901 }
1902 
1903 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name,
1904                          void *opaque, Error **errp)
1905 {
1906     uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize;
1907 
1908     visit_type_uint16(v, name, &value, errp);
1909 }
1910 
1911 static const PropertyInfo prop_cbom_blksize = {
1912     .name = "cbom_blocksize",
1913     .get = prop_cbom_blksize_get,
1914     .set = prop_cbom_blksize_set,
1915 };
1916 
1917 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name,
1918                                   void *opaque, Error **errp)
1919 {
1920     RISCVCPU *cpu = RISCV_CPU(obj);
1921     uint16_t value;
1922 
1923     if (!visit_type_uint16(v, name, &value, errp)) {
1924         return;
1925     }
1926 
1927     if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) {
1928         cpu_set_prop_err(cpu, name, errp);
1929         error_append_hint(errp, "Current '%s' val: %u\n",
1930                           name, cpu->cfg.cbop_blocksize);
1931         return;
1932     }
1933 
1934     cpu_option_add_user_setting(name, value);
1935     cpu->cfg.cbop_blocksize = value;
1936 }
1937 
1938 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name,
1939                          void *opaque, Error **errp)
1940 {
1941     uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize;
1942 
1943     visit_type_uint16(v, name, &value, errp);
1944 }
1945 
1946 static const PropertyInfo prop_cbop_blksize = {
1947     .name = "cbop_blocksize",
1948     .get = prop_cbop_blksize_get,
1949     .set = prop_cbop_blksize_set,
1950 };
1951 
1952 Property riscv_cpu_options[] = {
1953     DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64),
1954 
1955     DEFINE_PROP_END_OF_LIST(),
1956 };
1957 
1958 /*
1959  * RVA22U64 defines some 'named features' or 'synthetic extensions'
1960  * that are cache related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa
1961  * and Zicclsm. We do not implement caching in QEMU so we'll consider
1962  * all these named features as always enabled.
1963  *
1964  * There's no riscv,isa update for them (nor for zic64b, despite it
1965  * having a cfg offset) at this moment.
1966  */
1967 static RISCVCPUProfile RVA22U64 = {
1968     .parent = NULL,
1969     .name = "rva22u64",
1970     .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU,
1971     .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
1972     .satp_mode = RISCV_PROFILE_ATTR_UNUSED,
1973     .ext_offsets = {
1974         CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause),
1975         CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb),
1976         CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin),
1977         CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr),
1978         CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom),
1979         CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz),
1980 
1981         /* mandatory named features for this profile */
1982         CPU_CFG_OFFSET(zic64b),
1983 
1984         RISCV_PROFILE_EXT_LIST_END
1985     }
1986 };
1987 
1988 /*
1989  * As with RVA22U64, RVA22S64 also defines 'named features'.
1990  *
1991  * Cache related features that we consider enabled since we don't
1992  * implement cache: Ssccptr
1993  *
1994  * Other named features that we already implement: Sstvecd, Sstvala,
1995  * Sscounterenw
1996  *
1997  * Named features that we need to enable: svade
1998  *
1999  * The remaining features/extensions comes from RVA22U64.
2000  */
2001 static RISCVCPUProfile RVA22S64 = {
2002     .parent = &RVA22U64,
2003     .name = "rva22s64",
2004     .misa_ext = RVS,
2005     .priv_spec = PRIV_VERSION_1_12_0,
2006     .satp_mode = VM_1_10_SV39,
2007     .ext_offsets = {
2008         /* rva22s64 exts */
2009         CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt),
2010         CPU_CFG_OFFSET(ext_svinval),
2011 
2012         /* rva22s64 named features */
2013         CPU_CFG_OFFSET(svade),
2014 
2015         RISCV_PROFILE_EXT_LIST_END
2016     }
2017 };
2018 
2019 RISCVCPUProfile *riscv_profiles[] = {
2020     &RVA22U64,
2021     &RVA22S64,
2022     NULL,
2023 };
2024 
2025 static Property riscv_cpu_properties[] = {
2026     DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
2027 
2028     {.name = "pmu-mask", .info = &prop_pmu_mask},
2029     {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */
2030 
2031     {.name = "mmu", .info = &prop_mmu},
2032     {.name = "pmp", .info = &prop_pmp},
2033 
2034     {.name = "priv_spec", .info = &prop_priv_spec},
2035     {.name = "vext_spec", .info = &prop_vext_spec},
2036 
2037     {.name = "vlen", .info = &prop_vlen},
2038     {.name = "elen", .info = &prop_elen},
2039 
2040     {.name = "cbom_blocksize", .info = &prop_cbom_blksize},
2041     {.name = "cbop_blocksize", .info = &prop_cbop_blksize},
2042 
2043 #ifndef CONFIG_USER_ONLY
2044     DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
2045 #endif
2046 
2047     DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
2048 
2049     DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
2050     DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
2051 
2052     /*
2053      * write_misa() is marked as experimental for now so mark
2054      * it with -x and default to 'false'.
2055      */
2056     DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
2057     DEFINE_PROP_END_OF_LIST(),
2058 };
2059 
2060 #if defined(TARGET_RISCV64)
2061 static void rva22u64_profile_cpu_init(Object *obj)
2062 {
2063     rv64i_bare_cpu_init(obj);
2064 
2065     RVA22U64.enabled = true;
2066 }
2067 
2068 static void rva22s64_profile_cpu_init(Object *obj)
2069 {
2070     rv64i_bare_cpu_init(obj);
2071 
2072     RVA22S64.enabled = true;
2073 }
2074 #endif
2075 
2076 static const gchar *riscv_gdb_arch_name(CPUState *cs)
2077 {
2078     RISCVCPU *cpu = RISCV_CPU(cs);
2079     CPURISCVState *env = &cpu->env;
2080 
2081     switch (riscv_cpu_mxl(env)) {
2082     case MXL_RV32:
2083         return "riscv:rv32";
2084     case MXL_RV64:
2085     case MXL_RV128:
2086         return "riscv:rv64";
2087     default:
2088         g_assert_not_reached();
2089     }
2090 }
2091 
2092 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
2093 {
2094     RISCVCPU *cpu = RISCV_CPU(cs);
2095 
2096     if (strcmp(xmlname, "riscv-csr.xml") == 0) {
2097         return cpu->dyn_csr_xml;
2098     } else if (strcmp(xmlname, "riscv-vector.xml") == 0) {
2099         return cpu->dyn_vreg_xml;
2100     }
2101 
2102     return NULL;
2103 }
2104 
2105 #ifndef CONFIG_USER_ONLY
2106 static int64_t riscv_get_arch_id(CPUState *cs)
2107 {
2108     RISCVCPU *cpu = RISCV_CPU(cs);
2109 
2110     return cpu->env.mhartid;
2111 }
2112 
2113 #include "hw/core/sysemu-cpu-ops.h"
2114 
2115 static const struct SysemuCPUOps riscv_sysemu_ops = {
2116     .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
2117     .write_elf64_note = riscv_cpu_write_elf64_note,
2118     .write_elf32_note = riscv_cpu_write_elf32_note,
2119     .legacy_vmsd = &vmstate_riscv_cpu,
2120 };
2121 #endif
2122 
2123 static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name,
2124                               void *opaque, Error **errp)
2125 {
2126     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2127     RISCVCPU *cpu = RISCV_CPU(obj);
2128     uint32_t prev_val = cpu->cfg.mvendorid;
2129     uint32_t value;
2130 
2131     if (!visit_type_uint32(v, name, &value, errp)) {
2132         return;
2133     }
2134 
2135     if (!dynamic_cpu && prev_val != value) {
2136         error_setg(errp, "Unable to change %s mvendorid (0x%x)",
2137                    object_get_typename(obj), prev_val);
2138         return;
2139     }
2140 
2141     cpu->cfg.mvendorid = value;
2142 }
2143 
2144 static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name,
2145                               void *opaque, Error **errp)
2146 {
2147     uint32_t value = RISCV_CPU(obj)->cfg.mvendorid;
2148 
2149     visit_type_uint32(v, name, &value, errp);
2150 }
2151 
2152 static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name,
2153                            void *opaque, Error **errp)
2154 {
2155     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2156     RISCVCPU *cpu = RISCV_CPU(obj);
2157     uint64_t prev_val = cpu->cfg.mimpid;
2158     uint64_t value;
2159 
2160     if (!visit_type_uint64(v, name, &value, errp)) {
2161         return;
2162     }
2163 
2164     if (!dynamic_cpu && prev_val != value) {
2165         error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")",
2166                    object_get_typename(obj), prev_val);
2167         return;
2168     }
2169 
2170     cpu->cfg.mimpid = value;
2171 }
2172 
2173 static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name,
2174                            void *opaque, Error **errp)
2175 {
2176     uint64_t value = RISCV_CPU(obj)->cfg.mimpid;
2177 
2178     visit_type_uint64(v, name, &value, errp);
2179 }
2180 
2181 static void cpu_set_marchid(Object *obj, Visitor *v, const char *name,
2182                             void *opaque, Error **errp)
2183 {
2184     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2185     RISCVCPU *cpu = RISCV_CPU(obj);
2186     uint64_t prev_val = cpu->cfg.marchid;
2187     uint64_t value, invalid_val;
2188     uint32_t mxlen = 0;
2189 
2190     if (!visit_type_uint64(v, name, &value, errp)) {
2191         return;
2192     }
2193 
2194     if (!dynamic_cpu && prev_val != value) {
2195         error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")",
2196                    object_get_typename(obj), prev_val);
2197         return;
2198     }
2199 
2200     switch (riscv_cpu_mxl(&cpu->env)) {
2201     case MXL_RV32:
2202         mxlen = 32;
2203         break;
2204     case MXL_RV64:
2205     case MXL_RV128:
2206         mxlen = 64;
2207         break;
2208     default:
2209         g_assert_not_reached();
2210     }
2211 
2212     invalid_val = 1LL << (mxlen - 1);
2213 
2214     if (value == invalid_val) {
2215         error_setg(errp, "Unable to set marchid with MSB (%u) bit set "
2216                          "and the remaining bits zero", mxlen);
2217         return;
2218     }
2219 
2220     cpu->cfg.marchid = value;
2221 }
2222 
2223 static void cpu_get_marchid(Object *obj, Visitor *v, const char *name,
2224                            void *opaque, Error **errp)
2225 {
2226     uint64_t value = RISCV_CPU(obj)->cfg.marchid;
2227 
2228     visit_type_uint64(v, name, &value, errp);
2229 }
2230 
2231 static void riscv_cpu_class_init(ObjectClass *c, void *data)
2232 {
2233     RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2234     CPUClass *cc = CPU_CLASS(c);
2235     DeviceClass *dc = DEVICE_CLASS(c);
2236     ResettableClass *rc = RESETTABLE_CLASS(c);
2237 
2238     device_class_set_parent_realize(dc, riscv_cpu_realize,
2239                                     &mcc->parent_realize);
2240 
2241     resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL,
2242                                        &mcc->parent_phases);
2243 
2244     cc->class_by_name = riscv_cpu_class_by_name;
2245     cc->has_work = riscv_cpu_has_work;
2246     cc->mmu_index = riscv_cpu_mmu_index;
2247     cc->dump_state = riscv_cpu_dump_state;
2248     cc->set_pc = riscv_cpu_set_pc;
2249     cc->get_pc = riscv_cpu_get_pc;
2250     cc->gdb_read_register = riscv_cpu_gdb_read_register;
2251     cc->gdb_write_register = riscv_cpu_gdb_write_register;
2252     cc->gdb_num_core_regs = 33;
2253     cc->gdb_stop_before_watchpoint = true;
2254     cc->disas_set_info = riscv_cpu_disas_set_info;
2255 #ifndef CONFIG_USER_ONLY
2256     cc->sysemu_ops = &riscv_sysemu_ops;
2257     cc->get_arch_id = riscv_get_arch_id;
2258 #endif
2259     cc->gdb_arch_name = riscv_gdb_arch_name;
2260     cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml;
2261 
2262     object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid,
2263                               cpu_set_mvendorid, NULL, NULL);
2264 
2265     object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid,
2266                               cpu_set_mimpid, NULL, NULL);
2267 
2268     object_class_property_add(c, "marchid", "uint64", cpu_get_marchid,
2269                               cpu_set_marchid, NULL, NULL);
2270 
2271     device_class_set_props(dc, riscv_cpu_properties);
2272 }
2273 
2274 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
2275                                  int max_str_len)
2276 {
2277     const RISCVIsaExtData *edata;
2278     char *old = *isa_str;
2279     char *new = *isa_str;
2280 
2281     for (edata = isa_edata_arr; edata && edata->name; edata++) {
2282         if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2283             new = g_strconcat(old, "_", edata->name, NULL);
2284             g_free(old);
2285             old = new;
2286         }
2287     }
2288 
2289     *isa_str = new;
2290 }
2291 
2292 char *riscv_isa_string(RISCVCPU *cpu)
2293 {
2294     int i;
2295     const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
2296     char *isa_str = g_new(char, maxlen);
2297     char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS);
2298     for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2299         if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2300             *p++ = qemu_tolower(riscv_single_letter_exts[i]);
2301         }
2302     }
2303     *p = '\0';
2304     if (!cpu->cfg.short_isa_string) {
2305         riscv_isa_string_ext(cpu, &isa_str, maxlen);
2306     }
2307     return isa_str;
2308 }
2309 
2310 #define DEFINE_CPU(type_name, initfn)      \
2311     {                                      \
2312         .name = type_name,                 \
2313         .parent = TYPE_RISCV_CPU,          \
2314         .instance_init = initfn            \
2315     }
2316 
2317 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \
2318     {                                         \
2319         .name = type_name,                    \
2320         .parent = TYPE_RISCV_DYNAMIC_CPU,     \
2321         .instance_init = initfn               \
2322     }
2323 
2324 #define DEFINE_VENDOR_CPU(type_name, initfn) \
2325     {                                        \
2326         .name = type_name,                   \
2327         .parent = TYPE_RISCV_VENDOR_CPU,     \
2328         .instance_init = initfn              \
2329     }
2330 
2331 #define DEFINE_BARE_CPU(type_name, initfn) \
2332     {                                      \
2333         .name = type_name,                 \
2334         .parent = TYPE_RISCV_BARE_CPU,     \
2335         .instance_init = initfn            \
2336     }
2337 
2338 #define DEFINE_PROFILE_CPU(type_name, initfn) \
2339     {                                         \
2340         .name = type_name,                    \
2341         .parent = TYPE_RISCV_BARE_CPU,        \
2342         .instance_init = initfn               \
2343     }
2344 
2345 static const TypeInfo riscv_cpu_type_infos[] = {
2346     {
2347         .name = TYPE_RISCV_CPU,
2348         .parent = TYPE_CPU,
2349         .instance_size = sizeof(RISCVCPU),
2350         .instance_align = __alignof(RISCVCPU),
2351         .instance_init = riscv_cpu_init,
2352         .instance_post_init = riscv_cpu_post_init,
2353         .abstract = true,
2354         .class_size = sizeof(RISCVCPUClass),
2355         .class_init = riscv_cpu_class_init,
2356     },
2357     {
2358         .name = TYPE_RISCV_DYNAMIC_CPU,
2359         .parent = TYPE_RISCV_CPU,
2360         .abstract = true,
2361     },
2362     {
2363         .name = TYPE_RISCV_VENDOR_CPU,
2364         .parent = TYPE_RISCV_CPU,
2365         .abstract = true,
2366     },
2367     {
2368         .name = TYPE_RISCV_BARE_CPU,
2369         .parent = TYPE_RISCV_CPU,
2370         .abstract = true,
2371     },
2372     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY,      riscv_any_cpu_init),
2373     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX,      riscv_max_cpu_init),
2374 #if defined(TARGET_RISCV32)
2375     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32,   rv32_base_cpu_init),
2376     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX,        rv32_ibex_cpu_init),
2377     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31,  rv32_sifive_e_cpu_init),
2378     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34,  rv32_imafcu_nommu_cpu_init),
2379     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34,  rv32_sifive_u_cpu_init),
2380 #elif defined(TARGET_RISCV64)
2381     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64,   rv64_base_cpu_init),
2382     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51,  rv64_sifive_e_cpu_init),
2383     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54,  rv64_sifive_u_cpu_init),
2384     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C,    rv64_sifive_u_cpu_init),
2385     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906,  rv64_thead_c906_cpu_init),
2386     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1,   rv64_veyron_v1_cpu_init),
2387     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128,  rv128_base_cpu_init),
2388     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, rv64i_bare_cpu_init),
2389     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, rva22u64_profile_cpu_init),
2390     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, rva22s64_profile_cpu_init),
2391 #endif
2392 };
2393 
2394 DEFINE_TYPES(riscv_cpu_type_infos)
2395