xref: /openbmc/qemu/target/riscv/cpu.c (revision c45eff30)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "cpu_vendorid.h"
26 #include "pmu.h"
27 #include "internals.h"
28 #include "time_helper.h"
29 #include "exec/exec-all.h"
30 #include "qapi/error.h"
31 #include "qapi/visitor.h"
32 #include "qemu/error-report.h"
33 #include "hw/qdev-properties.h"
34 #include "migration/vmstate.h"
35 #include "fpu/softfloat-helpers.h"
36 #include "sysemu/kvm.h"
37 #include "kvm_riscv.h"
38 #include "tcg/tcg.h"
39 
40 /* RISC-V CPU definitions */
41 
42 #define RISCV_CPU_MARCHID   ((QEMU_VERSION_MAJOR << 16) | \
43                              (QEMU_VERSION_MINOR << 8)  | \
44                              (QEMU_VERSION_MICRO))
45 #define RISCV_CPU_MIMPID    RISCV_CPU_MARCHID
46 
47 static const char riscv_single_letter_exts[] = "IEMAFDQCPVH";
48 
49 struct isa_ext_data {
50     const char *name;
51     bool multi_letter;
52     int min_version;
53     int ext_enable_offset;
54 };
55 
56 #define ISA_EXT_DATA_ENTRY(_name, _m_letter, _min_ver, _prop) \
57     {#_name, _m_letter, _min_ver, offsetof(struct RISCVCPUConfig, _prop)}
58 
59 /**
60  * Here are the ordering rules of extension naming defined by RISC-V
61  * specification :
62  * 1. All extensions should be separated from other multi-letter extensions
63  *    by an underscore.
64  * 2. The first letter following the 'Z' conventionally indicates the most
65  *    closely related alphabetical extension category, IMAFDQLCBKJTPVH.
66  *    If multiple 'Z' extensions are named, they should be ordered first
67  *    by category, then alphabetically within a category.
68  * 3. Standard supervisor-level extensions (starts with 'S') should be
69  *    listed after standard unprivileged extensions.  If multiple
70  *    supervisor-level extensions are listed, they should be ordered
71  *    alphabetically.
72  * 4. Non-standard extensions (starts with 'X') must be listed after all
73  *    standard extensions. They must be separated from other multi-letter
74  *    extensions by an underscore.
75  */
76 static const struct isa_ext_data isa_edata_arr[] = {
77     ISA_EXT_DATA_ENTRY(h, false, PRIV_VERSION_1_12_0, ext_h),
78     ISA_EXT_DATA_ENTRY(v, false, PRIV_VERSION_1_10_0, ext_v),
79     ISA_EXT_DATA_ENTRY(zicbom, true, PRIV_VERSION_1_12_0, ext_icbom),
80     ISA_EXT_DATA_ENTRY(zicboz, true, PRIV_VERSION_1_12_0, ext_icboz),
81     ISA_EXT_DATA_ENTRY(zicond, true, PRIV_VERSION_1_12_0, ext_zicond),
82     ISA_EXT_DATA_ENTRY(zicsr, true, PRIV_VERSION_1_10_0, ext_icsr),
83     ISA_EXT_DATA_ENTRY(zifencei, true, PRIV_VERSION_1_10_0, ext_ifencei),
84     ISA_EXT_DATA_ENTRY(zihintpause, true, PRIV_VERSION_1_10_0, ext_zihintpause),
85     ISA_EXT_DATA_ENTRY(zawrs, true, PRIV_VERSION_1_12_0, ext_zawrs),
86     ISA_EXT_DATA_ENTRY(zfh, true, PRIV_VERSION_1_11_0, ext_zfh),
87     ISA_EXT_DATA_ENTRY(zfhmin, true, PRIV_VERSION_1_11_0, ext_zfhmin),
88     ISA_EXT_DATA_ENTRY(zfinx, true, PRIV_VERSION_1_12_0, ext_zfinx),
89     ISA_EXT_DATA_ENTRY(zdinx, true, PRIV_VERSION_1_12_0, ext_zdinx),
90     ISA_EXT_DATA_ENTRY(zca, true, PRIV_VERSION_1_12_0, ext_zca),
91     ISA_EXT_DATA_ENTRY(zcb, true, PRIV_VERSION_1_12_0, ext_zcb),
92     ISA_EXT_DATA_ENTRY(zcf, true, PRIV_VERSION_1_12_0, ext_zcf),
93     ISA_EXT_DATA_ENTRY(zcd, true, PRIV_VERSION_1_12_0, ext_zcd),
94     ISA_EXT_DATA_ENTRY(zce, true, PRIV_VERSION_1_12_0, ext_zce),
95     ISA_EXT_DATA_ENTRY(zcmp, true, PRIV_VERSION_1_12_0, ext_zcmp),
96     ISA_EXT_DATA_ENTRY(zcmt, true, PRIV_VERSION_1_12_0, ext_zcmt),
97     ISA_EXT_DATA_ENTRY(zba, true, PRIV_VERSION_1_12_0, ext_zba),
98     ISA_EXT_DATA_ENTRY(zbb, true, PRIV_VERSION_1_12_0, ext_zbb),
99     ISA_EXT_DATA_ENTRY(zbc, true, PRIV_VERSION_1_12_0, ext_zbc),
100     ISA_EXT_DATA_ENTRY(zbkb, true, PRIV_VERSION_1_12_0, ext_zbkb),
101     ISA_EXT_DATA_ENTRY(zbkc, true, PRIV_VERSION_1_12_0, ext_zbkc),
102     ISA_EXT_DATA_ENTRY(zbkx, true, PRIV_VERSION_1_12_0, ext_zbkx),
103     ISA_EXT_DATA_ENTRY(zbs, true, PRIV_VERSION_1_12_0, ext_zbs),
104     ISA_EXT_DATA_ENTRY(zk, true, PRIV_VERSION_1_12_0, ext_zk),
105     ISA_EXT_DATA_ENTRY(zkn, true, PRIV_VERSION_1_12_0, ext_zkn),
106     ISA_EXT_DATA_ENTRY(zknd, true, PRIV_VERSION_1_12_0, ext_zknd),
107     ISA_EXT_DATA_ENTRY(zkne, true, PRIV_VERSION_1_12_0, ext_zkne),
108     ISA_EXT_DATA_ENTRY(zknh, true, PRIV_VERSION_1_12_0, ext_zknh),
109     ISA_EXT_DATA_ENTRY(zkr, true, PRIV_VERSION_1_12_0, ext_zkr),
110     ISA_EXT_DATA_ENTRY(zks, true, PRIV_VERSION_1_12_0, ext_zks),
111     ISA_EXT_DATA_ENTRY(zksed, true, PRIV_VERSION_1_12_0, ext_zksed),
112     ISA_EXT_DATA_ENTRY(zksh, true, PRIV_VERSION_1_12_0, ext_zksh),
113     ISA_EXT_DATA_ENTRY(zkt, true, PRIV_VERSION_1_12_0, ext_zkt),
114     ISA_EXT_DATA_ENTRY(zve32f, true, PRIV_VERSION_1_10_0, ext_zve32f),
115     ISA_EXT_DATA_ENTRY(zve64f, true, PRIV_VERSION_1_10_0, ext_zve64f),
116     ISA_EXT_DATA_ENTRY(zve64d, true, PRIV_VERSION_1_10_0, ext_zve64d),
117     ISA_EXT_DATA_ENTRY(zvfh, true, PRIV_VERSION_1_12_0, ext_zvfh),
118     ISA_EXT_DATA_ENTRY(zvfhmin, true, PRIV_VERSION_1_12_0, ext_zvfhmin),
119     ISA_EXT_DATA_ENTRY(zhinx, true, PRIV_VERSION_1_12_0, ext_zhinx),
120     ISA_EXT_DATA_ENTRY(zhinxmin, true, PRIV_VERSION_1_12_0, ext_zhinxmin),
121     ISA_EXT_DATA_ENTRY(smaia, true, PRIV_VERSION_1_12_0, ext_smaia),
122     ISA_EXT_DATA_ENTRY(ssaia, true, PRIV_VERSION_1_12_0, ext_ssaia),
123     ISA_EXT_DATA_ENTRY(sscofpmf, true, PRIV_VERSION_1_12_0, ext_sscofpmf),
124     ISA_EXT_DATA_ENTRY(sstc, true, PRIV_VERSION_1_12_0, ext_sstc),
125     ISA_EXT_DATA_ENTRY(svadu, true, PRIV_VERSION_1_12_0, ext_svadu),
126     ISA_EXT_DATA_ENTRY(svinval, true, PRIV_VERSION_1_12_0, ext_svinval),
127     ISA_EXT_DATA_ENTRY(svnapot, true, PRIV_VERSION_1_12_0, ext_svnapot),
128     ISA_EXT_DATA_ENTRY(svpbmt, true, PRIV_VERSION_1_12_0, ext_svpbmt),
129     ISA_EXT_DATA_ENTRY(xtheadba, true, PRIV_VERSION_1_11_0, ext_xtheadba),
130     ISA_EXT_DATA_ENTRY(xtheadbb, true, PRIV_VERSION_1_11_0, ext_xtheadbb),
131     ISA_EXT_DATA_ENTRY(xtheadbs, true, PRIV_VERSION_1_11_0, ext_xtheadbs),
132     ISA_EXT_DATA_ENTRY(xtheadcmo, true, PRIV_VERSION_1_11_0, ext_xtheadcmo),
133     ISA_EXT_DATA_ENTRY(xtheadcondmov, true, PRIV_VERSION_1_11_0, ext_xtheadcondmov),
134     ISA_EXT_DATA_ENTRY(xtheadfmemidx, true, PRIV_VERSION_1_11_0, ext_xtheadfmemidx),
135     ISA_EXT_DATA_ENTRY(xtheadfmv, true, PRIV_VERSION_1_11_0, ext_xtheadfmv),
136     ISA_EXT_DATA_ENTRY(xtheadmac, true, PRIV_VERSION_1_11_0, ext_xtheadmac),
137     ISA_EXT_DATA_ENTRY(xtheadmemidx, true, PRIV_VERSION_1_11_0, ext_xtheadmemidx),
138     ISA_EXT_DATA_ENTRY(xtheadmempair, true, PRIV_VERSION_1_11_0, ext_xtheadmempair),
139     ISA_EXT_DATA_ENTRY(xtheadsync, true, PRIV_VERSION_1_11_0, ext_xtheadsync),
140     ISA_EXT_DATA_ENTRY(xventanacondops, true, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
141 };
142 
143 static bool isa_ext_is_enabled(RISCVCPU *cpu,
144                                const struct isa_ext_data *edata)
145 {
146     bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset;
147 
148     return *ext_enabled;
149 }
150 
151 static void isa_ext_update_enabled(RISCVCPU *cpu,
152                                    const struct isa_ext_data *edata, bool en)
153 {
154     bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset;
155 
156     *ext_enabled = en;
157 }
158 
159 const char * const riscv_int_regnames[] = {
160     "x0/zero", "x1/ra",  "x2/sp",  "x3/gp",  "x4/tp",  "x5/t0",   "x6/t1",
161     "x7/t2",   "x8/s0",  "x9/s1",  "x10/a0", "x11/a1", "x12/a2",  "x13/a3",
162     "x14/a4",  "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3",  "x20/s4",
163     "x21/s5",  "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
164     "x28/t3",  "x29/t4", "x30/t5", "x31/t6"
165 };
166 
167 const char * const riscv_int_regnamesh[] = {
168     "x0h/zeroh", "x1h/rah",  "x2h/sph",   "x3h/gph",   "x4h/tph",  "x5h/t0h",
169     "x6h/t1h",   "x7h/t2h",  "x8h/s0h",   "x9h/s1h",   "x10h/a0h", "x11h/a1h",
170     "x12h/a2h",  "x13h/a3h", "x14h/a4h",  "x15h/a5h",  "x16h/a6h", "x17h/a7h",
171     "x18h/s2h",  "x19h/s3h", "x20h/s4h",  "x21h/s5h",  "x22h/s6h", "x23h/s7h",
172     "x24h/s8h",  "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
173     "x30h/t5h",  "x31h/t6h"
174 };
175 
176 const char * const riscv_fpr_regnames[] = {
177     "f0/ft0",   "f1/ft1",  "f2/ft2",   "f3/ft3",   "f4/ft4",  "f5/ft5",
178     "f6/ft6",   "f7/ft7",  "f8/fs0",   "f9/fs1",   "f10/fa0", "f11/fa1",
179     "f12/fa2",  "f13/fa3", "f14/fa4",  "f15/fa5",  "f16/fa6", "f17/fa7",
180     "f18/fs2",  "f19/fs3", "f20/fs4",  "f21/fs5",  "f22/fs6", "f23/fs7",
181     "f24/fs8",  "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
182     "f30/ft10", "f31/ft11"
183 };
184 
185 static const char * const riscv_excp_names[] = {
186     "misaligned_fetch",
187     "fault_fetch",
188     "illegal_instruction",
189     "breakpoint",
190     "misaligned_load",
191     "fault_load",
192     "misaligned_store",
193     "fault_store",
194     "user_ecall",
195     "supervisor_ecall",
196     "hypervisor_ecall",
197     "machine_ecall",
198     "exec_page_fault",
199     "load_page_fault",
200     "reserved",
201     "store_page_fault",
202     "reserved",
203     "reserved",
204     "reserved",
205     "reserved",
206     "guest_exec_page_fault",
207     "guest_load_page_fault",
208     "reserved",
209     "guest_store_page_fault",
210 };
211 
212 static const char * const riscv_intr_names[] = {
213     "u_software",
214     "s_software",
215     "vs_software",
216     "m_software",
217     "u_timer",
218     "s_timer",
219     "vs_timer",
220     "m_timer",
221     "u_external",
222     "s_external",
223     "vs_external",
224     "m_external",
225     "reserved",
226     "reserved",
227     "reserved",
228     "reserved"
229 };
230 
231 static void register_cpu_props(Object *obj);
232 
233 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
234 {
235     if (async) {
236         return (cause < ARRAY_SIZE(riscv_intr_names)) ?
237                riscv_intr_names[cause] : "(unknown)";
238     } else {
239         return (cause < ARRAY_SIZE(riscv_excp_names)) ?
240                riscv_excp_names[cause] : "(unknown)";
241     }
242 }
243 
244 static void set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext)
245 {
246     env->misa_mxl_max = env->misa_mxl = mxl;
247     env->misa_ext_mask = env->misa_ext = ext;
248 }
249 
250 static void set_priv_version(CPURISCVState *env, int priv_ver)
251 {
252     env->priv_ver = priv_ver;
253 }
254 
255 static void set_vext_version(CPURISCVState *env, int vext_ver)
256 {
257     env->vext_ver = vext_ver;
258 }
259 
260 #ifndef CONFIG_USER_ONLY
261 static uint8_t satp_mode_from_str(const char *satp_mode_str)
262 {
263     if (!strncmp(satp_mode_str, "mbare", 5)) {
264         return VM_1_10_MBARE;
265     }
266 
267     if (!strncmp(satp_mode_str, "sv32", 4)) {
268         return VM_1_10_SV32;
269     }
270 
271     if (!strncmp(satp_mode_str, "sv39", 4)) {
272         return VM_1_10_SV39;
273     }
274 
275     if (!strncmp(satp_mode_str, "sv48", 4)) {
276         return VM_1_10_SV48;
277     }
278 
279     if (!strncmp(satp_mode_str, "sv57", 4)) {
280         return VM_1_10_SV57;
281     }
282 
283     if (!strncmp(satp_mode_str, "sv64", 4)) {
284         return VM_1_10_SV64;
285     }
286 
287     g_assert_not_reached();
288 }
289 
290 uint8_t satp_mode_max_from_map(uint32_t map)
291 {
292     /* map here has at least one bit set, so no problem with clz */
293     return 31 - __builtin_clz(map);
294 }
295 
296 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
297 {
298     if (is_32_bit) {
299         switch (satp_mode) {
300         case VM_1_10_SV32:
301             return "sv32";
302         case VM_1_10_MBARE:
303             return "none";
304         }
305     } else {
306         switch (satp_mode) {
307         case VM_1_10_SV64:
308             return "sv64";
309         case VM_1_10_SV57:
310             return "sv57";
311         case VM_1_10_SV48:
312             return "sv48";
313         case VM_1_10_SV39:
314             return "sv39";
315         case VM_1_10_MBARE:
316             return "none";
317         }
318     }
319 
320     g_assert_not_reached();
321 }
322 
323 static void set_satp_mode_max_supported(RISCVCPU *cpu,
324                                         uint8_t satp_mode)
325 {
326     bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
327     const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
328 
329     for (int i = 0; i <= satp_mode; ++i) {
330         if (valid_vm[i]) {
331             cpu->cfg.satp_mode.supported |= (1 << i);
332         }
333     }
334 }
335 
336 /* Set the satp mode to the max supported */
337 static void set_satp_mode_default_map(RISCVCPU *cpu)
338 {
339     cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
340 }
341 #endif
342 
343 static void riscv_any_cpu_init(Object *obj)
344 {
345     CPURISCVState *env = &RISCV_CPU(obj)->env;
346 #if defined(TARGET_RISCV32)
347     set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
348 #elif defined(TARGET_RISCV64)
349     set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
350 #endif
351 
352 #ifndef CONFIG_USER_ONLY
353     set_satp_mode_max_supported(RISCV_CPU(obj),
354         riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ?
355         VM_1_10_SV32 : VM_1_10_SV57);
356 #endif
357 
358     set_priv_version(env, PRIV_VERSION_1_12_0);
359     register_cpu_props(obj);
360 }
361 
362 #if defined(TARGET_RISCV64)
363 static void rv64_base_cpu_init(Object *obj)
364 {
365     CPURISCVState *env = &RISCV_CPU(obj)->env;
366     /* We set this in the realise function */
367     set_misa(env, MXL_RV64, 0);
368     register_cpu_props(obj);
369     /* Set latest version of privileged specification */
370     set_priv_version(env, PRIV_VERSION_1_12_0);
371 #ifndef CONFIG_USER_ONLY
372     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
373 #endif
374 }
375 
376 static void rv64_sifive_u_cpu_init(Object *obj)
377 {
378     CPURISCVState *env = &RISCV_CPU(obj)->env;
379     set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
380     register_cpu_props(obj);
381     set_priv_version(env, PRIV_VERSION_1_10_0);
382 #ifndef CONFIG_USER_ONLY
383     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39);
384 #endif
385 }
386 
387 static void rv64_sifive_e_cpu_init(Object *obj)
388 {
389     CPURISCVState *env = &RISCV_CPU(obj)->env;
390     RISCVCPU *cpu = RISCV_CPU(obj);
391 
392     set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU);
393     register_cpu_props(obj);
394     set_priv_version(env, PRIV_VERSION_1_10_0);
395     cpu->cfg.mmu = false;
396 #ifndef CONFIG_USER_ONLY
397     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
398 #endif
399 }
400 
401 static void rv64_thead_c906_cpu_init(Object *obj)
402 {
403     CPURISCVState *env = &RISCV_CPU(obj)->env;
404     RISCVCPU *cpu = RISCV_CPU(obj);
405 
406     set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
407     set_priv_version(env, PRIV_VERSION_1_11_0);
408 
409     cpu->cfg.ext_g = true;
410     cpu->cfg.ext_c = true;
411     cpu->cfg.ext_u = true;
412     cpu->cfg.ext_s = true;
413     cpu->cfg.ext_icsr = true;
414     cpu->cfg.ext_zfh = true;
415     cpu->cfg.mmu = true;
416     cpu->cfg.ext_xtheadba = true;
417     cpu->cfg.ext_xtheadbb = true;
418     cpu->cfg.ext_xtheadbs = true;
419     cpu->cfg.ext_xtheadcmo = true;
420     cpu->cfg.ext_xtheadcondmov = true;
421     cpu->cfg.ext_xtheadfmemidx = true;
422     cpu->cfg.ext_xtheadmac = true;
423     cpu->cfg.ext_xtheadmemidx = true;
424     cpu->cfg.ext_xtheadmempair = true;
425     cpu->cfg.ext_xtheadsync = true;
426 
427     cpu->cfg.mvendorid = THEAD_VENDOR_ID;
428 #ifndef CONFIG_USER_ONLY
429     set_satp_mode_max_supported(cpu, VM_1_10_SV39);
430 #endif
431 }
432 
433 static void rv128_base_cpu_init(Object *obj)
434 {
435     if (qemu_tcg_mttcg_enabled()) {
436         /* Missing 128-bit aligned atomics */
437         error_report("128-bit RISC-V currently does not work with Multi "
438                      "Threaded TCG. Please use: -accel tcg,thread=single");
439         exit(EXIT_FAILURE);
440     }
441     CPURISCVState *env = &RISCV_CPU(obj)->env;
442     /* We set this in the realise function */
443     set_misa(env, MXL_RV128, 0);
444     register_cpu_props(obj);
445     /* Set latest version of privileged specification */
446     set_priv_version(env, PRIV_VERSION_1_12_0);
447 #ifndef CONFIG_USER_ONLY
448     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
449 #endif
450 }
451 #else
452 static void rv32_base_cpu_init(Object *obj)
453 {
454     CPURISCVState *env = &RISCV_CPU(obj)->env;
455     /* We set this in the realise function */
456     set_misa(env, MXL_RV32, 0);
457     register_cpu_props(obj);
458     /* Set latest version of privileged specification */
459     set_priv_version(env, PRIV_VERSION_1_12_0);
460 #ifndef CONFIG_USER_ONLY
461     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
462 #endif
463 }
464 
465 static void rv32_sifive_u_cpu_init(Object *obj)
466 {
467     CPURISCVState *env = &RISCV_CPU(obj)->env;
468     set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
469     register_cpu_props(obj);
470     set_priv_version(env, PRIV_VERSION_1_10_0);
471 #ifndef CONFIG_USER_ONLY
472     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
473 #endif
474 }
475 
476 static void rv32_sifive_e_cpu_init(Object *obj)
477 {
478     CPURISCVState *env = &RISCV_CPU(obj)->env;
479     RISCVCPU *cpu = RISCV_CPU(obj);
480 
481     set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU);
482     register_cpu_props(obj);
483     set_priv_version(env, PRIV_VERSION_1_10_0);
484     cpu->cfg.mmu = false;
485 #ifndef CONFIG_USER_ONLY
486     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
487 #endif
488 }
489 
490 static void rv32_ibex_cpu_init(Object *obj)
491 {
492     CPURISCVState *env = &RISCV_CPU(obj)->env;
493     RISCVCPU *cpu = RISCV_CPU(obj);
494 
495     set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU);
496     register_cpu_props(obj);
497     set_priv_version(env, PRIV_VERSION_1_11_0);
498     cpu->cfg.mmu = false;
499 #ifndef CONFIG_USER_ONLY
500     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
501 #endif
502     cpu->cfg.epmp = true;
503 }
504 
505 static void rv32_imafcu_nommu_cpu_init(Object *obj)
506 {
507     CPURISCVState *env = &RISCV_CPU(obj)->env;
508     RISCVCPU *cpu = RISCV_CPU(obj);
509 
510     set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU);
511     register_cpu_props(obj);
512     set_priv_version(env, PRIV_VERSION_1_10_0);
513     cpu->cfg.mmu = false;
514 #ifndef CONFIG_USER_ONLY
515     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
516 #endif
517 }
518 #endif
519 
520 #if defined(CONFIG_KVM)
521 static void riscv_host_cpu_init(Object *obj)
522 {
523     CPURISCVState *env = &RISCV_CPU(obj)->env;
524 #if defined(TARGET_RISCV32)
525     set_misa(env, MXL_RV32, 0);
526 #elif defined(TARGET_RISCV64)
527     set_misa(env, MXL_RV64, 0);
528 #endif
529     register_cpu_props(obj);
530 }
531 #endif
532 
533 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
534 {
535     ObjectClass *oc;
536     char *typename;
537     char **cpuname;
538 
539     cpuname = g_strsplit(cpu_model, ",", 1);
540     typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
541     oc = object_class_by_name(typename);
542     g_strfreev(cpuname);
543     g_free(typename);
544     if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU) ||
545         object_class_is_abstract(oc)) {
546         return NULL;
547     }
548     return oc;
549 }
550 
551 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
552 {
553     RISCVCPU *cpu = RISCV_CPU(cs);
554     CPURISCVState *env = &cpu->env;
555     int i;
556 
557 #if !defined(CONFIG_USER_ONLY)
558     if (riscv_has_ext(env, RVH)) {
559         qemu_fprintf(f, " %s %d\n", "V      =  ", env->virt_enabled);
560     }
561 #endif
562     qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc      ", env->pc);
563 #ifndef CONFIG_USER_ONLY
564     {
565         static const int dump_csrs[] = {
566             CSR_MHARTID,
567             CSR_MSTATUS,
568             CSR_MSTATUSH,
569             /*
570              * CSR_SSTATUS is intentionally omitted here as its value
571              * can be figured out by looking at CSR_MSTATUS
572              */
573             CSR_HSTATUS,
574             CSR_VSSTATUS,
575             CSR_MIP,
576             CSR_MIE,
577             CSR_MIDELEG,
578             CSR_HIDELEG,
579             CSR_MEDELEG,
580             CSR_HEDELEG,
581             CSR_MTVEC,
582             CSR_STVEC,
583             CSR_VSTVEC,
584             CSR_MEPC,
585             CSR_SEPC,
586             CSR_VSEPC,
587             CSR_MCAUSE,
588             CSR_SCAUSE,
589             CSR_VSCAUSE,
590             CSR_MTVAL,
591             CSR_STVAL,
592             CSR_HTVAL,
593             CSR_MTVAL2,
594             CSR_MSCRATCH,
595             CSR_SSCRATCH,
596             CSR_SATP,
597             CSR_MMTE,
598             CSR_UPMBASE,
599             CSR_UPMMASK,
600             CSR_SPMBASE,
601             CSR_SPMMASK,
602             CSR_MPMBASE,
603             CSR_MPMMASK,
604         };
605 
606         for (int i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
607             int csrno = dump_csrs[i];
608             target_ulong val = 0;
609             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
610 
611             /*
612              * Rely on the smode, hmode, etc, predicates within csr.c
613              * to do the filtering of the registers that are present.
614              */
615             if (res == RISCV_EXCP_NONE) {
616                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
617                              csr_ops[csrno].name, val);
618             }
619         }
620     }
621 #endif
622 
623     for (i = 0; i < 32; i++) {
624         qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
625                      riscv_int_regnames[i], env->gpr[i]);
626         if ((i & 3) == 3) {
627             qemu_fprintf(f, "\n");
628         }
629     }
630     if (flags & CPU_DUMP_FPU) {
631         for (i = 0; i < 32; i++) {
632             qemu_fprintf(f, " %-8s %016" PRIx64,
633                          riscv_fpr_regnames[i], env->fpr[i]);
634             if ((i & 3) == 3) {
635                 qemu_fprintf(f, "\n");
636             }
637         }
638     }
639 }
640 
641 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
642 {
643     RISCVCPU *cpu = RISCV_CPU(cs);
644     CPURISCVState *env = &cpu->env;
645 
646     if (env->xl == MXL_RV32) {
647         env->pc = (int32_t)value;
648     } else {
649         env->pc = value;
650     }
651 }
652 
653 static vaddr riscv_cpu_get_pc(CPUState *cs)
654 {
655     RISCVCPU *cpu = RISCV_CPU(cs);
656     CPURISCVState *env = &cpu->env;
657 
658     /* Match cpu_get_tb_cpu_state. */
659     if (env->xl == MXL_RV32) {
660         return env->pc & UINT32_MAX;
661     }
662     return env->pc;
663 }
664 
665 static void riscv_cpu_synchronize_from_tb(CPUState *cs,
666                                           const TranslationBlock *tb)
667 {
668     RISCVCPU *cpu = RISCV_CPU(cs);
669     CPURISCVState *env = &cpu->env;
670     RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
671 
672     tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL));
673 
674     if (xl == MXL_RV32) {
675         env->pc = (int32_t) tb->pc;
676     } else {
677         env->pc = tb->pc;
678     }
679 }
680 
681 static bool riscv_cpu_has_work(CPUState *cs)
682 {
683 #ifndef CONFIG_USER_ONLY
684     RISCVCPU *cpu = RISCV_CPU(cs);
685     CPURISCVState *env = &cpu->env;
686     /*
687      * Definition of the WFI instruction requires it to ignore the privilege
688      * mode and delegation registers, but respect individual enables
689      */
690     return riscv_cpu_all_pending(env) != 0;
691 #else
692     return true;
693 #endif
694 }
695 
696 static void riscv_restore_state_to_opc(CPUState *cs,
697                                        const TranslationBlock *tb,
698                                        const uint64_t *data)
699 {
700     RISCVCPU *cpu = RISCV_CPU(cs);
701     CPURISCVState *env = &cpu->env;
702     RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
703 
704     if (xl == MXL_RV32) {
705         env->pc = (int32_t)data[0];
706     } else {
707         env->pc = data[0];
708     }
709     env->bins = data[1];
710 }
711 
712 static void riscv_cpu_reset_hold(Object *obj)
713 {
714 #ifndef CONFIG_USER_ONLY
715     uint8_t iprio;
716     int i, irq, rdzero;
717 #endif
718     CPUState *cs = CPU(obj);
719     RISCVCPU *cpu = RISCV_CPU(cs);
720     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
721     CPURISCVState *env = &cpu->env;
722 
723     if (mcc->parent_phases.hold) {
724         mcc->parent_phases.hold(obj);
725     }
726 #ifndef CONFIG_USER_ONLY
727     env->misa_mxl = env->misa_mxl_max;
728     env->priv = PRV_M;
729     env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
730     if (env->misa_mxl > MXL_RV32) {
731         /*
732          * The reset status of SXL/UXL is undefined, but mstatus is WARL
733          * and we must ensure that the value after init is valid for read.
734          */
735         env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
736         env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
737         if (riscv_has_ext(env, RVH)) {
738             env->vsstatus = set_field(env->vsstatus,
739                                       MSTATUS64_SXL, env->misa_mxl);
740             env->vsstatus = set_field(env->vsstatus,
741                                       MSTATUS64_UXL, env->misa_mxl);
742             env->mstatus_hs = set_field(env->mstatus_hs,
743                                         MSTATUS64_SXL, env->misa_mxl);
744             env->mstatus_hs = set_field(env->mstatus_hs,
745                                         MSTATUS64_UXL, env->misa_mxl);
746         }
747     }
748     env->mcause = 0;
749     env->miclaim = MIP_SGEIP;
750     env->pc = env->resetvec;
751     env->bins = 0;
752     env->two_stage_lookup = false;
753 
754     env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
755                    (cpu->cfg.ext_svadu ? MENVCFG_HADE : 0);
756     env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) |
757                    (cpu->cfg.ext_svadu ? HENVCFG_HADE : 0);
758 
759     /* Initialized default priorities of local interrupts. */
760     for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
761         iprio = riscv_cpu_default_priority(i);
762         env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
763         env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
764         env->hviprio[i] = 0;
765     }
766     i = 0;
767     while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
768         if (!rdzero) {
769             env->hviprio[irq] = env->miprio[irq];
770         }
771         i++;
772     }
773     /* mmte is supposed to have pm.current hardwired to 1 */
774     env->mmte |= (PM_EXT_INITIAL | MMTE_M_PM_CURRENT);
775 #endif
776     env->xl = riscv_cpu_mxl(env);
777     riscv_cpu_update_mask(env);
778     cs->exception_index = RISCV_EXCP_NONE;
779     env->load_res = -1;
780     set_default_nan_mode(1, &env->fp_status);
781 
782 #ifndef CONFIG_USER_ONLY
783     if (cpu->cfg.debug) {
784         riscv_trigger_init(env);
785     }
786 
787     if (kvm_enabled()) {
788         kvm_riscv_reset_vcpu(cpu);
789     }
790 #endif
791 }
792 
793 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
794 {
795     RISCVCPU *cpu = RISCV_CPU(s);
796 
797     switch (riscv_cpu_mxl(&cpu->env)) {
798     case MXL_RV32:
799         info->print_insn = print_insn_riscv32;
800         break;
801     case MXL_RV64:
802         info->print_insn = print_insn_riscv64;
803         break;
804     case MXL_RV128:
805         info->print_insn = print_insn_riscv128;
806         break;
807     default:
808         g_assert_not_reached();
809     }
810 }
811 
812 /*
813  * Check consistency between chosen extensions while setting
814  * cpu->cfg accordingly, doing a set_misa() in the end.
815  */
816 static void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
817 {
818     CPURISCVState *env = &cpu->env;
819     uint32_t ext = 0;
820 
821     /* Do some ISA extension error checking */
822     if (cpu->cfg.ext_g && !(cpu->cfg.ext_i && cpu->cfg.ext_m &&
823                             cpu->cfg.ext_a && cpu->cfg.ext_f &&
824                             cpu->cfg.ext_d &&
825                             cpu->cfg.ext_icsr && cpu->cfg.ext_ifencei)) {
826         warn_report("Setting G will also set IMAFD_Zicsr_Zifencei");
827         cpu->cfg.ext_i = true;
828         cpu->cfg.ext_m = true;
829         cpu->cfg.ext_a = true;
830         cpu->cfg.ext_f = true;
831         cpu->cfg.ext_d = true;
832         cpu->cfg.ext_icsr = true;
833         cpu->cfg.ext_ifencei = true;
834     }
835 
836     if (cpu->cfg.ext_i && cpu->cfg.ext_e) {
837         error_setg(errp,
838                    "I and E extensions are incompatible");
839         return;
840     }
841 
842     if (!cpu->cfg.ext_i && !cpu->cfg.ext_e) {
843         error_setg(errp,
844                    "Either I or E extension must be set");
845         return;
846     }
847 
848     if (cpu->cfg.ext_s && !cpu->cfg.ext_u) {
849         error_setg(errp,
850                    "Setting S extension without U extension is illegal");
851         return;
852     }
853 
854     if (cpu->cfg.ext_h && !cpu->cfg.ext_i) {
855         error_setg(errp,
856                    "H depends on an I base integer ISA with 32 x registers");
857         return;
858     }
859 
860     if (cpu->cfg.ext_h && !cpu->cfg.ext_s) {
861         error_setg(errp, "H extension implicitly requires S-mode");
862         return;
863     }
864 
865     if (cpu->cfg.ext_f && !cpu->cfg.ext_icsr) {
866         error_setg(errp, "F extension requires Zicsr");
867         return;
868     }
869 
870     if ((cpu->cfg.ext_zawrs) && !cpu->cfg.ext_a) {
871         error_setg(errp, "Zawrs extension requires A extension");
872         return;
873     }
874 
875     if (cpu->cfg.ext_zfh) {
876         cpu->cfg.ext_zfhmin = true;
877     }
878 
879     if (cpu->cfg.ext_zfhmin && !cpu->cfg.ext_f) {
880         error_setg(errp, "Zfh/Zfhmin extensions require F extension");
881         return;
882     }
883 
884     if (cpu->cfg.ext_d && !cpu->cfg.ext_f) {
885         error_setg(errp, "D extension requires F extension");
886         return;
887     }
888 
889     /* The V vector extension depends on the Zve64d extension */
890     if (cpu->cfg.ext_v) {
891         cpu->cfg.ext_zve64d = true;
892     }
893 
894     /* The Zve64d extension depends on the Zve64f extension */
895     if (cpu->cfg.ext_zve64d) {
896         cpu->cfg.ext_zve64f = true;
897     }
898 
899     /* The Zve64f extension depends on the Zve32f extension */
900     if (cpu->cfg.ext_zve64f) {
901         cpu->cfg.ext_zve32f = true;
902     }
903 
904     if (cpu->cfg.ext_zve64d && !cpu->cfg.ext_d) {
905         error_setg(errp, "Zve64d/V extensions require D extension");
906         return;
907     }
908 
909     if (cpu->cfg.ext_zve32f && !cpu->cfg.ext_f) {
910         error_setg(errp, "Zve32f/Zve64f extensions require F extension");
911         return;
912     }
913 
914     if (cpu->cfg.ext_zvfh) {
915         cpu->cfg.ext_zvfhmin = true;
916     }
917 
918     if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) {
919         error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension");
920         return;
921     }
922 
923     if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) {
924         error_setg(errp, "Zvfh extensions requires Zfhmin extension");
925         return;
926     }
927 
928     /* Set the ISA extensions, checks should have happened above */
929     if (cpu->cfg.ext_zhinx) {
930         cpu->cfg.ext_zhinxmin = true;
931     }
932 
933     if (cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) {
934         cpu->cfg.ext_zfinx = true;
935     }
936 
937     if (cpu->cfg.ext_zfinx) {
938         if (!cpu->cfg.ext_icsr) {
939             error_setg(errp, "Zfinx extension requires Zicsr");
940             return;
941         }
942         if (cpu->cfg.ext_f) {
943             error_setg(errp,
944                        "Zfinx cannot be supported together with F extension");
945             return;
946         }
947     }
948 
949     if (cpu->cfg.ext_zce) {
950         cpu->cfg.ext_zca = true;
951         cpu->cfg.ext_zcb = true;
952         cpu->cfg.ext_zcmp = true;
953         cpu->cfg.ext_zcmt = true;
954         if (cpu->cfg.ext_f && env->misa_mxl_max == MXL_RV32) {
955             cpu->cfg.ext_zcf = true;
956         }
957     }
958 
959     if (cpu->cfg.ext_c) {
960         cpu->cfg.ext_zca = true;
961         if (cpu->cfg.ext_f && env->misa_mxl_max == MXL_RV32) {
962             cpu->cfg.ext_zcf = true;
963         }
964         if (cpu->cfg.ext_d) {
965             cpu->cfg.ext_zcd = true;
966         }
967     }
968 
969     if (env->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) {
970         error_setg(errp, "Zcf extension is only relevant to RV32");
971         return;
972     }
973 
974     if (!cpu->cfg.ext_f && cpu->cfg.ext_zcf) {
975         error_setg(errp, "Zcf extension requires F extension");
976         return;
977     }
978 
979     if (!cpu->cfg.ext_d && cpu->cfg.ext_zcd) {
980         error_setg(errp, "Zcd extension requires D extension");
981         return;
982     }
983 
984     if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb ||
985          cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) {
986         error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca "
987                          "extension");
988         return;
989     }
990 
991     if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) {
992         error_setg(errp, "Zcmp/Zcmt extensions are incompatible with "
993                          "Zcd extension");
994         return;
995     }
996 
997     if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_icsr) {
998         error_setg(errp, "Zcmt extension requires Zicsr extension");
999         return;
1000     }
1001 
1002     if (cpu->cfg.ext_zk) {
1003         cpu->cfg.ext_zkn = true;
1004         cpu->cfg.ext_zkr = true;
1005         cpu->cfg.ext_zkt = true;
1006     }
1007 
1008     if (cpu->cfg.ext_zkn) {
1009         cpu->cfg.ext_zbkb = true;
1010         cpu->cfg.ext_zbkc = true;
1011         cpu->cfg.ext_zbkx = true;
1012         cpu->cfg.ext_zkne = true;
1013         cpu->cfg.ext_zknd = true;
1014         cpu->cfg.ext_zknh = true;
1015     }
1016 
1017     if (cpu->cfg.ext_zks) {
1018         cpu->cfg.ext_zbkb = true;
1019         cpu->cfg.ext_zbkc = true;
1020         cpu->cfg.ext_zbkx = true;
1021         cpu->cfg.ext_zksed = true;
1022         cpu->cfg.ext_zksh = true;
1023     }
1024 
1025     if (cpu->cfg.ext_i) {
1026         ext |= RVI;
1027     }
1028     if (cpu->cfg.ext_e) {
1029         ext |= RVE;
1030     }
1031     if (cpu->cfg.ext_m) {
1032         ext |= RVM;
1033     }
1034     if (cpu->cfg.ext_a) {
1035         ext |= RVA;
1036     }
1037     if (cpu->cfg.ext_f) {
1038         ext |= RVF;
1039     }
1040     if (cpu->cfg.ext_d) {
1041         ext |= RVD;
1042     }
1043     if (cpu->cfg.ext_c) {
1044         ext |= RVC;
1045     }
1046     if (cpu->cfg.ext_s) {
1047         ext |= RVS;
1048     }
1049     if (cpu->cfg.ext_u) {
1050         ext |= RVU;
1051     }
1052     if (cpu->cfg.ext_h) {
1053         ext |= RVH;
1054     }
1055     if (cpu->cfg.ext_v) {
1056         int vext_version = VEXT_VERSION_1_00_0;
1057         ext |= RVV;
1058         if (!is_power_of_2(cpu->cfg.vlen)) {
1059             error_setg(errp,
1060                        "Vector extension VLEN must be power of 2");
1061             return;
1062         }
1063         if (cpu->cfg.vlen > RV_VLEN_MAX || cpu->cfg.vlen < 128) {
1064             error_setg(errp,
1065                        "Vector extension implementation only supports VLEN "
1066                        "in the range [128, %d]", RV_VLEN_MAX);
1067             return;
1068         }
1069         if (!is_power_of_2(cpu->cfg.elen)) {
1070             error_setg(errp,
1071                        "Vector extension ELEN must be power of 2");
1072             return;
1073         }
1074         if (cpu->cfg.elen > 64 || cpu->cfg.elen < 8) {
1075             error_setg(errp,
1076                        "Vector extension implementation only supports ELEN "
1077                        "in the range [8, 64]");
1078             return;
1079         }
1080         if (cpu->cfg.vext_spec) {
1081             if (!g_strcmp0(cpu->cfg.vext_spec, "v1.0")) {
1082                 vext_version = VEXT_VERSION_1_00_0;
1083             } else {
1084                 error_setg(errp,
1085                            "Unsupported vector spec version '%s'",
1086                            cpu->cfg.vext_spec);
1087                 return;
1088             }
1089         } else {
1090             qemu_log("vector version is not specified, "
1091                      "use the default value v1.0\n");
1092         }
1093         set_vext_version(env, vext_version);
1094     }
1095     if (cpu->cfg.ext_j) {
1096         ext |= RVJ;
1097     }
1098 
1099     set_misa(env, env->misa_mxl, ext);
1100 }
1101 
1102 #ifndef CONFIG_USER_ONLY
1103 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
1104 {
1105     bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
1106     uint8_t satp_mode_map_max;
1107     uint8_t satp_mode_supported_max =
1108                         satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
1109 
1110     if (cpu->cfg.satp_mode.map == 0) {
1111         if (cpu->cfg.satp_mode.init == 0) {
1112             /* If unset by the user, we fallback to the default satp mode. */
1113             set_satp_mode_default_map(cpu);
1114         } else {
1115             /*
1116              * Find the lowest level that was disabled and then enable the
1117              * first valid level below which can be found in
1118              * valid_vm_1_10_32/64.
1119              */
1120             for (int i = 1; i < 16; ++i) {
1121                 if ((cpu->cfg.satp_mode.init & (1 << i)) &&
1122                     (cpu->cfg.satp_mode.supported & (1 << i))) {
1123                     for (int j = i - 1; j >= 0; --j) {
1124                         if (cpu->cfg.satp_mode.supported & (1 << j)) {
1125                             cpu->cfg.satp_mode.map |= (1 << j);
1126                             break;
1127                         }
1128                     }
1129                     break;
1130                 }
1131             }
1132         }
1133     }
1134 
1135     satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
1136 
1137     /* Make sure the user asked for a supported configuration (HW and qemu) */
1138     if (satp_mode_map_max > satp_mode_supported_max) {
1139         error_setg(errp, "satp_mode %s is higher than hw max capability %s",
1140                    satp_mode_str(satp_mode_map_max, rv32),
1141                    satp_mode_str(satp_mode_supported_max, rv32));
1142         return;
1143     }
1144 
1145     /*
1146      * Make sure the user did not ask for an invalid configuration as per
1147      * the specification.
1148      */
1149     if (!rv32) {
1150         for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1151             if (!(cpu->cfg.satp_mode.map & (1 << i)) &&
1152                 (cpu->cfg.satp_mode.init & (1 << i)) &&
1153                 (cpu->cfg.satp_mode.supported & (1 << i))) {
1154                 error_setg(errp, "cannot disable %s satp mode if %s "
1155                            "is enabled", satp_mode_str(i, false),
1156                            satp_mode_str(satp_mode_map_max, false));
1157                 return;
1158             }
1159         }
1160     }
1161 
1162     /* Finally expand the map so that all valid modes are set */
1163     for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1164         if (cpu->cfg.satp_mode.supported & (1 << i)) {
1165             cpu->cfg.satp_mode.map |= (1 << i);
1166         }
1167     }
1168 }
1169 #endif
1170 
1171 static void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1172 {
1173 #ifndef CONFIG_USER_ONLY
1174     Error *local_err = NULL;
1175 
1176     riscv_cpu_satp_mode_finalize(cpu, &local_err);
1177     if (local_err != NULL) {
1178         error_propagate(errp, local_err);
1179         return;
1180     }
1181 #endif
1182 }
1183 
1184 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
1185 {
1186     CPUState *cs = CPU(dev);
1187     RISCVCPU *cpu = RISCV_CPU(dev);
1188     CPURISCVState *env = &cpu->env;
1189     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
1190     CPUClass *cc = CPU_CLASS(mcc);
1191     int i, priv_version = -1;
1192     Error *local_err = NULL;
1193 
1194     cpu_exec_realizefn(cs, &local_err);
1195     if (local_err != NULL) {
1196         error_propagate(errp, local_err);
1197         return;
1198     }
1199 
1200     if (cpu->cfg.priv_spec) {
1201         if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) {
1202             priv_version = PRIV_VERSION_1_12_0;
1203         } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) {
1204             priv_version = PRIV_VERSION_1_11_0;
1205         } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) {
1206             priv_version = PRIV_VERSION_1_10_0;
1207         } else {
1208             error_setg(errp,
1209                        "Unsupported privilege spec version '%s'",
1210                        cpu->cfg.priv_spec);
1211             return;
1212         }
1213     }
1214 
1215     if (priv_version >= PRIV_VERSION_1_10_0) {
1216         set_priv_version(env, priv_version);
1217     }
1218 
1219     /* Force disable extensions if priv spec version does not match */
1220     for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) {
1221         if (isa_ext_is_enabled(cpu, &isa_edata_arr[i]) &&
1222             (env->priv_ver < isa_edata_arr[i].min_version)) {
1223             isa_ext_update_enabled(cpu, &isa_edata_arr[i], false);
1224 #ifndef CONFIG_USER_ONLY
1225             warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx
1226                         " because privilege spec version does not match",
1227                         isa_edata_arr[i].name, env->mhartid);
1228 #else
1229             warn_report("disabling %s extension because "
1230                         "privilege spec version does not match",
1231                         isa_edata_arr[i].name);
1232 #endif
1233         }
1234     }
1235 
1236     if (cpu->cfg.epmp && !cpu->cfg.pmp) {
1237         /*
1238          * Enhanced PMP should only be available
1239          * on harts with PMP support
1240          */
1241         error_setg(errp, "Invalid configuration: EPMP requires PMP support");
1242         return;
1243     }
1244 
1245 
1246 #ifndef CONFIG_USER_ONLY
1247     if (cpu->cfg.ext_sstc) {
1248         riscv_timer_init(cpu);
1249     }
1250 #endif /* CONFIG_USER_ONLY */
1251 
1252     /* Validate that MISA_MXL is set properly. */
1253     switch (env->misa_mxl_max) {
1254 #ifdef TARGET_RISCV64
1255     case MXL_RV64:
1256     case MXL_RV128:
1257         cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
1258         break;
1259 #endif
1260     case MXL_RV32:
1261         cc->gdb_core_xml_file = "riscv-32bit-cpu.xml";
1262         break;
1263     default:
1264         g_assert_not_reached();
1265     }
1266     assert(env->misa_mxl_max == env->misa_mxl);
1267 
1268     riscv_cpu_validate_set_extensions(cpu, &local_err);
1269     if (local_err != NULL) {
1270         error_propagate(errp, local_err);
1271         return;
1272     }
1273 
1274 #ifndef CONFIG_USER_ONLY
1275     if (cpu->cfg.pmu_num) {
1276         if (!riscv_pmu_init(cpu, cpu->cfg.pmu_num) && cpu->cfg.ext_sscofpmf) {
1277             cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1278                                           riscv_pmu_timer_cb, cpu);
1279         }
1280      }
1281 #endif
1282 
1283     riscv_cpu_finalize_features(cpu, &local_err);
1284     if (local_err != NULL) {
1285         error_propagate(errp, local_err);
1286         return;
1287     }
1288 
1289     riscv_cpu_register_gdb_regs_for_features(cs);
1290 
1291     qemu_init_vcpu(cs);
1292     cpu_reset(cs);
1293 
1294     mcc->parent_realize(dev, errp);
1295 }
1296 
1297 #ifndef CONFIG_USER_ONLY
1298 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
1299                                void *opaque, Error **errp)
1300 {
1301     RISCVSATPMap *satp_map = opaque;
1302     uint8_t satp = satp_mode_from_str(name);
1303     bool value;
1304 
1305     value = satp_map->map & (1 << satp);
1306 
1307     visit_type_bool(v, name, &value, errp);
1308 }
1309 
1310 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
1311                                void *opaque, Error **errp)
1312 {
1313     RISCVSATPMap *satp_map = opaque;
1314     uint8_t satp = satp_mode_from_str(name);
1315     bool value;
1316 
1317     if (!visit_type_bool(v, name, &value, errp)) {
1318         return;
1319     }
1320 
1321     satp_map->map = deposit32(satp_map->map, satp, 1, value);
1322     satp_map->init |= 1 << satp;
1323 }
1324 
1325 static void riscv_add_satp_mode_properties(Object *obj)
1326 {
1327     RISCVCPU *cpu = RISCV_CPU(obj);
1328 
1329     if (cpu->env.misa_mxl == MXL_RV32) {
1330         object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
1331                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1332     } else {
1333         object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
1334                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1335         object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
1336                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1337         object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
1338                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1339         object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
1340                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1341     }
1342 }
1343 
1344 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
1345 {
1346     RISCVCPU *cpu = RISCV_CPU(opaque);
1347     CPURISCVState *env = &cpu->env;
1348 
1349     if (irq < IRQ_LOCAL_MAX) {
1350         switch (irq) {
1351         case IRQ_U_SOFT:
1352         case IRQ_S_SOFT:
1353         case IRQ_VS_SOFT:
1354         case IRQ_M_SOFT:
1355         case IRQ_U_TIMER:
1356         case IRQ_S_TIMER:
1357         case IRQ_VS_TIMER:
1358         case IRQ_M_TIMER:
1359         case IRQ_U_EXT:
1360         case IRQ_VS_EXT:
1361         case IRQ_M_EXT:
1362             if (kvm_enabled()) {
1363                 kvm_riscv_set_irq(cpu, irq, level);
1364             } else {
1365                 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level));
1366             }
1367              break;
1368         case IRQ_S_EXT:
1369             if (kvm_enabled()) {
1370                 kvm_riscv_set_irq(cpu, irq, level);
1371             } else {
1372                 env->external_seip = level;
1373                 riscv_cpu_update_mip(env, 1 << irq,
1374                                      BOOL_TO_MASK(level | env->software_seip));
1375             }
1376             break;
1377         default:
1378             g_assert_not_reached();
1379         }
1380     } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
1381         /* Require H-extension for handling guest local interrupts */
1382         if (!riscv_has_ext(env, RVH)) {
1383             g_assert_not_reached();
1384         }
1385 
1386         /* Compute bit position in HGEIP CSR */
1387         irq = irq - IRQ_LOCAL_MAX + 1;
1388         if (env->geilen < irq) {
1389             g_assert_not_reached();
1390         }
1391 
1392         /* Update HGEIP CSR */
1393         env->hgeip &= ~((target_ulong)1 << irq);
1394         if (level) {
1395             env->hgeip |= (target_ulong)1 << irq;
1396         }
1397 
1398         /* Update mip.SGEIP bit */
1399         riscv_cpu_update_mip(env, MIP_SGEIP,
1400                              BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
1401     } else {
1402         g_assert_not_reached();
1403     }
1404 }
1405 #endif /* CONFIG_USER_ONLY */
1406 
1407 static void riscv_cpu_init(Object *obj)
1408 {
1409     RISCVCPU *cpu = RISCV_CPU(obj);
1410 
1411     cpu->cfg.ext_ifencei = true;
1412     cpu->cfg.ext_icsr = true;
1413     cpu->cfg.mmu = true;
1414     cpu->cfg.pmp = true;
1415 
1416     cpu_set_cpustate_pointers(cpu);
1417 
1418 #ifndef CONFIG_USER_ONLY
1419     qdev_init_gpio_in(DEVICE(cpu), riscv_cpu_set_irq,
1420                       IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
1421 #endif /* CONFIG_USER_ONLY */
1422 }
1423 
1424 static Property riscv_cpu_extensions[] = {
1425     /* Defaults for standard extensions */
1426     DEFINE_PROP_BOOL("i", RISCVCPU, cfg.ext_i, true),
1427     DEFINE_PROP_BOOL("e", RISCVCPU, cfg.ext_e, false),
1428     DEFINE_PROP_BOOL("g", RISCVCPU, cfg.ext_g, false),
1429     DEFINE_PROP_BOOL("m", RISCVCPU, cfg.ext_m, true),
1430     DEFINE_PROP_BOOL("a", RISCVCPU, cfg.ext_a, true),
1431     DEFINE_PROP_BOOL("f", RISCVCPU, cfg.ext_f, true),
1432     DEFINE_PROP_BOOL("d", RISCVCPU, cfg.ext_d, true),
1433     DEFINE_PROP_BOOL("c", RISCVCPU, cfg.ext_c, true),
1434     DEFINE_PROP_BOOL("s", RISCVCPU, cfg.ext_s, true),
1435     DEFINE_PROP_BOOL("u", RISCVCPU, cfg.ext_u, true),
1436     DEFINE_PROP_BOOL("v", RISCVCPU, cfg.ext_v, false),
1437     DEFINE_PROP_BOOL("h", RISCVCPU, cfg.ext_h, true),
1438     DEFINE_PROP_UINT8("pmu-num", RISCVCPU, cfg.pmu_num, 16),
1439     DEFINE_PROP_BOOL("sscofpmf", RISCVCPU, cfg.ext_sscofpmf, false),
1440     DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true),
1441     DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true),
1442     DEFINE_PROP_BOOL("Zihintpause", RISCVCPU, cfg.ext_zihintpause, true),
1443     DEFINE_PROP_BOOL("Zawrs", RISCVCPU, cfg.ext_zawrs, true),
1444     DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false),
1445     DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false),
1446     DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false),
1447     DEFINE_PROP_BOOL("Zve64f", RISCVCPU, cfg.ext_zve64f, false),
1448     DEFINE_PROP_BOOL("Zve64d", RISCVCPU, cfg.ext_zve64d, false),
1449     DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true),
1450     DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true),
1451     DEFINE_PROP_BOOL("sstc", RISCVCPU, cfg.ext_sstc, true),
1452 
1453     DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec),
1454     DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec),
1455     DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128),
1456     DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64),
1457 
1458     DEFINE_PROP_BOOL("svadu", RISCVCPU, cfg.ext_svadu, true),
1459 
1460     DEFINE_PROP_BOOL("svinval", RISCVCPU, cfg.ext_svinval, false),
1461     DEFINE_PROP_BOOL("svnapot", RISCVCPU, cfg.ext_svnapot, false),
1462     DEFINE_PROP_BOOL("svpbmt", RISCVCPU, cfg.ext_svpbmt, false),
1463 
1464     DEFINE_PROP_BOOL("zba", RISCVCPU, cfg.ext_zba, true),
1465     DEFINE_PROP_BOOL("zbb", RISCVCPU, cfg.ext_zbb, true),
1466     DEFINE_PROP_BOOL("zbc", RISCVCPU, cfg.ext_zbc, true),
1467     DEFINE_PROP_BOOL("zbkb", RISCVCPU, cfg.ext_zbkb, false),
1468     DEFINE_PROP_BOOL("zbkc", RISCVCPU, cfg.ext_zbkc, false),
1469     DEFINE_PROP_BOOL("zbkx", RISCVCPU, cfg.ext_zbkx, false),
1470     DEFINE_PROP_BOOL("zbs", RISCVCPU, cfg.ext_zbs, true),
1471     DEFINE_PROP_BOOL("zk", RISCVCPU, cfg.ext_zk, false),
1472     DEFINE_PROP_BOOL("zkn", RISCVCPU, cfg.ext_zkn, false),
1473     DEFINE_PROP_BOOL("zknd", RISCVCPU, cfg.ext_zknd, false),
1474     DEFINE_PROP_BOOL("zkne", RISCVCPU, cfg.ext_zkne, false),
1475     DEFINE_PROP_BOOL("zknh", RISCVCPU, cfg.ext_zknh, false),
1476     DEFINE_PROP_BOOL("zkr", RISCVCPU, cfg.ext_zkr, false),
1477     DEFINE_PROP_BOOL("zks", RISCVCPU, cfg.ext_zks, false),
1478     DEFINE_PROP_BOOL("zksed", RISCVCPU, cfg.ext_zksed, false),
1479     DEFINE_PROP_BOOL("zksh", RISCVCPU, cfg.ext_zksh, false),
1480     DEFINE_PROP_BOOL("zkt", RISCVCPU, cfg.ext_zkt, false),
1481 
1482     DEFINE_PROP_BOOL("zdinx", RISCVCPU, cfg.ext_zdinx, false),
1483     DEFINE_PROP_BOOL("zfinx", RISCVCPU, cfg.ext_zfinx, false),
1484     DEFINE_PROP_BOOL("zhinx", RISCVCPU, cfg.ext_zhinx, false),
1485     DEFINE_PROP_BOOL("zhinxmin", RISCVCPU, cfg.ext_zhinxmin, false),
1486 
1487     DEFINE_PROP_BOOL("zicbom", RISCVCPU, cfg.ext_icbom, true),
1488     DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64),
1489     DEFINE_PROP_BOOL("zicboz", RISCVCPU, cfg.ext_icboz, true),
1490     DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64),
1491 
1492     DEFINE_PROP_BOOL("zmmul", RISCVCPU, cfg.ext_zmmul, false),
1493 
1494     /* Vendor-specific custom extensions */
1495     DEFINE_PROP_BOOL("xtheadba", RISCVCPU, cfg.ext_xtheadba, false),
1496     DEFINE_PROP_BOOL("xtheadbb", RISCVCPU, cfg.ext_xtheadbb, false),
1497     DEFINE_PROP_BOOL("xtheadbs", RISCVCPU, cfg.ext_xtheadbs, false),
1498     DEFINE_PROP_BOOL("xtheadcmo", RISCVCPU, cfg.ext_xtheadcmo, false),
1499     DEFINE_PROP_BOOL("xtheadcondmov", RISCVCPU, cfg.ext_xtheadcondmov, false),
1500     DEFINE_PROP_BOOL("xtheadfmemidx", RISCVCPU, cfg.ext_xtheadfmemidx, false),
1501     DEFINE_PROP_BOOL("xtheadfmv", RISCVCPU, cfg.ext_xtheadfmv, false),
1502     DEFINE_PROP_BOOL("xtheadmac", RISCVCPU, cfg.ext_xtheadmac, false),
1503     DEFINE_PROP_BOOL("xtheadmemidx", RISCVCPU, cfg.ext_xtheadmemidx, false),
1504     DEFINE_PROP_BOOL("xtheadmempair", RISCVCPU, cfg.ext_xtheadmempair, false),
1505     DEFINE_PROP_BOOL("xtheadsync", RISCVCPU, cfg.ext_xtheadsync, false),
1506     DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false),
1507 
1508     /* These are experimental so mark with 'x-' */
1509     DEFINE_PROP_BOOL("x-zicond", RISCVCPU, cfg.ext_zicond, false),
1510     DEFINE_PROP_BOOL("x-j", RISCVCPU, cfg.ext_j, false),
1511 
1512     DEFINE_PROP_BOOL("x-zca", RISCVCPU, cfg.ext_zca, false),
1513     DEFINE_PROP_BOOL("x-zcb", RISCVCPU, cfg.ext_zcb, false),
1514     DEFINE_PROP_BOOL("x-zcd", RISCVCPU, cfg.ext_zcd, false),
1515     DEFINE_PROP_BOOL("x-zce", RISCVCPU, cfg.ext_zce, false),
1516     DEFINE_PROP_BOOL("x-zcf", RISCVCPU, cfg.ext_zcf, false),
1517     DEFINE_PROP_BOOL("x-zcmp", RISCVCPU, cfg.ext_zcmp, false),
1518     DEFINE_PROP_BOOL("x-zcmt", RISCVCPU, cfg.ext_zcmt, false),
1519 
1520     /* ePMP 0.9.3 */
1521     DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false),
1522     DEFINE_PROP_BOOL("x-smaia", RISCVCPU, cfg.ext_smaia, false),
1523     DEFINE_PROP_BOOL("x-ssaia", RISCVCPU, cfg.ext_ssaia, false),
1524 
1525     DEFINE_PROP_BOOL("x-zvfh", RISCVCPU, cfg.ext_zvfh, false),
1526     DEFINE_PROP_BOOL("x-zvfhmin", RISCVCPU, cfg.ext_zvfhmin, false),
1527 
1528     DEFINE_PROP_END_OF_LIST(),
1529 };
1530 
1531 /*
1532  * Register CPU props based on env.misa_ext. If a non-zero
1533  * value was set, register only the required cpu->cfg.ext_*
1534  * properties and leave. env.misa_ext = 0 means that we want
1535  * all the default properties to be registered.
1536  */
1537 static void register_cpu_props(Object *obj)
1538 {
1539     RISCVCPU *cpu = RISCV_CPU(obj);
1540     uint32_t misa_ext = cpu->env.misa_ext;
1541     Property *prop;
1542     DeviceState *dev = DEVICE(obj);
1543 
1544     /*
1545      * If misa_ext is not zero, set cfg properties now to
1546      * allow them to be read during riscv_cpu_realize()
1547      * later on.
1548      */
1549     if (cpu->env.misa_ext != 0) {
1550         cpu->cfg.ext_i = misa_ext & RVI;
1551         cpu->cfg.ext_e = misa_ext & RVE;
1552         cpu->cfg.ext_m = misa_ext & RVM;
1553         cpu->cfg.ext_a = misa_ext & RVA;
1554         cpu->cfg.ext_f = misa_ext & RVF;
1555         cpu->cfg.ext_d = misa_ext & RVD;
1556         cpu->cfg.ext_v = misa_ext & RVV;
1557         cpu->cfg.ext_c = misa_ext & RVC;
1558         cpu->cfg.ext_s = misa_ext & RVS;
1559         cpu->cfg.ext_u = misa_ext & RVU;
1560         cpu->cfg.ext_h = misa_ext & RVH;
1561         cpu->cfg.ext_j = misa_ext & RVJ;
1562 
1563         /*
1564          * We don't want to set the default riscv_cpu_extensions
1565          * in this case.
1566          */
1567         return;
1568     }
1569 
1570     for (prop = riscv_cpu_extensions; prop && prop->name; prop++) {
1571         qdev_property_add_static(dev, prop);
1572     }
1573 
1574 #ifndef CONFIG_USER_ONLY
1575     riscv_add_satp_mode_properties(obj);
1576 #endif
1577 }
1578 
1579 static Property riscv_cpu_properties[] = {
1580     DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
1581 
1582     DEFINE_PROP_UINT32("mvendorid", RISCVCPU, cfg.mvendorid, 0),
1583     DEFINE_PROP_UINT64("marchid", RISCVCPU, cfg.marchid, RISCV_CPU_MARCHID),
1584     DEFINE_PROP_UINT64("mimpid", RISCVCPU, cfg.mimpid, RISCV_CPU_MIMPID),
1585 
1586 #ifndef CONFIG_USER_ONLY
1587     DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
1588 #endif
1589 
1590     DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
1591 
1592     DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
1593     DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
1594 
1595     /*
1596      * write_misa() is marked as experimental for now so mark
1597      * it with -x and default to 'false'.
1598      */
1599     DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
1600     DEFINE_PROP_END_OF_LIST(),
1601 };
1602 
1603 static gchar *riscv_gdb_arch_name(CPUState *cs)
1604 {
1605     RISCVCPU *cpu = RISCV_CPU(cs);
1606     CPURISCVState *env = &cpu->env;
1607 
1608     switch (riscv_cpu_mxl(env)) {
1609     case MXL_RV32:
1610         return g_strdup("riscv:rv32");
1611     case MXL_RV64:
1612     case MXL_RV128:
1613         return g_strdup("riscv:rv64");
1614     default:
1615         g_assert_not_reached();
1616     }
1617 }
1618 
1619 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
1620 {
1621     RISCVCPU *cpu = RISCV_CPU(cs);
1622 
1623     if (strcmp(xmlname, "riscv-csr.xml") == 0) {
1624         return cpu->dyn_csr_xml;
1625     } else if (strcmp(xmlname, "riscv-vector.xml") == 0) {
1626         return cpu->dyn_vreg_xml;
1627     }
1628 
1629     return NULL;
1630 }
1631 
1632 #ifndef CONFIG_USER_ONLY
1633 static int64_t riscv_get_arch_id(CPUState *cs)
1634 {
1635     RISCVCPU *cpu = RISCV_CPU(cs);
1636 
1637     return cpu->env.mhartid;
1638 }
1639 
1640 #include "hw/core/sysemu-cpu-ops.h"
1641 
1642 static const struct SysemuCPUOps riscv_sysemu_ops = {
1643     .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
1644     .write_elf64_note = riscv_cpu_write_elf64_note,
1645     .write_elf32_note = riscv_cpu_write_elf32_note,
1646     .legacy_vmsd = &vmstate_riscv_cpu,
1647 };
1648 #endif
1649 
1650 #include "hw/core/tcg-cpu-ops.h"
1651 
1652 static const struct TCGCPUOps riscv_tcg_ops = {
1653     .initialize = riscv_translate_init,
1654     .synchronize_from_tb = riscv_cpu_synchronize_from_tb,
1655     .restore_state_to_opc = riscv_restore_state_to_opc,
1656 
1657 #ifndef CONFIG_USER_ONLY
1658     .tlb_fill = riscv_cpu_tlb_fill,
1659     .cpu_exec_interrupt = riscv_cpu_exec_interrupt,
1660     .do_interrupt = riscv_cpu_do_interrupt,
1661     .do_transaction_failed = riscv_cpu_do_transaction_failed,
1662     .do_unaligned_access = riscv_cpu_do_unaligned_access,
1663     .debug_excp_handler = riscv_cpu_debug_excp_handler,
1664     .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint,
1665     .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint,
1666 #endif /* !CONFIG_USER_ONLY */
1667 };
1668 
1669 static void riscv_cpu_class_init(ObjectClass *c, void *data)
1670 {
1671     RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
1672     CPUClass *cc = CPU_CLASS(c);
1673     DeviceClass *dc = DEVICE_CLASS(c);
1674     ResettableClass *rc = RESETTABLE_CLASS(c);
1675 
1676     device_class_set_parent_realize(dc, riscv_cpu_realize,
1677                                     &mcc->parent_realize);
1678 
1679     resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL,
1680                                        &mcc->parent_phases);
1681 
1682     cc->class_by_name = riscv_cpu_class_by_name;
1683     cc->has_work = riscv_cpu_has_work;
1684     cc->dump_state = riscv_cpu_dump_state;
1685     cc->set_pc = riscv_cpu_set_pc;
1686     cc->get_pc = riscv_cpu_get_pc;
1687     cc->gdb_read_register = riscv_cpu_gdb_read_register;
1688     cc->gdb_write_register = riscv_cpu_gdb_write_register;
1689     cc->gdb_num_core_regs = 33;
1690     cc->gdb_stop_before_watchpoint = true;
1691     cc->disas_set_info = riscv_cpu_disas_set_info;
1692 #ifndef CONFIG_USER_ONLY
1693     cc->sysemu_ops = &riscv_sysemu_ops;
1694     cc->get_arch_id = riscv_get_arch_id;
1695 #endif
1696     cc->gdb_arch_name = riscv_gdb_arch_name;
1697     cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml;
1698     cc->tcg_ops = &riscv_tcg_ops;
1699 
1700     device_class_set_props(dc, riscv_cpu_properties);
1701 }
1702 
1703 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, int max_str_len)
1704 {
1705     char *old = *isa_str;
1706     char *new = *isa_str;
1707     int i;
1708 
1709     for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) {
1710         if (isa_edata_arr[i].multi_letter &&
1711             isa_ext_is_enabled(cpu, &isa_edata_arr[i])) {
1712             new = g_strconcat(old, "_", isa_edata_arr[i].name, NULL);
1713             g_free(old);
1714             old = new;
1715         }
1716     }
1717 
1718     *isa_str = new;
1719 }
1720 
1721 char *riscv_isa_string(RISCVCPU *cpu)
1722 {
1723     int i;
1724     const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
1725     char *isa_str = g_new(char, maxlen);
1726     char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS);
1727     for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
1728         if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
1729             *p++ = qemu_tolower(riscv_single_letter_exts[i]);
1730         }
1731     }
1732     *p = '\0';
1733     if (!cpu->cfg.short_isa_string) {
1734         riscv_isa_string_ext(cpu, &isa_str, maxlen);
1735     }
1736     return isa_str;
1737 }
1738 
1739 static gint riscv_cpu_list_compare(gconstpointer a, gconstpointer b)
1740 {
1741     ObjectClass *class_a = (ObjectClass *)a;
1742     ObjectClass *class_b = (ObjectClass *)b;
1743     const char *name_a, *name_b;
1744 
1745     name_a = object_class_get_name(class_a);
1746     name_b = object_class_get_name(class_b);
1747     return strcmp(name_a, name_b);
1748 }
1749 
1750 static void riscv_cpu_list_entry(gpointer data, gpointer user_data)
1751 {
1752     const char *typename = object_class_get_name(OBJECT_CLASS(data));
1753     int len = strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX);
1754 
1755     qemu_printf("%.*s\n", len, typename);
1756 }
1757 
1758 void riscv_cpu_list(void)
1759 {
1760     GSList *list;
1761 
1762     list = object_class_get_list(TYPE_RISCV_CPU, false);
1763     list = g_slist_sort(list, riscv_cpu_list_compare);
1764     g_slist_foreach(list, riscv_cpu_list_entry, NULL);
1765     g_slist_free(list);
1766 }
1767 
1768 #define DEFINE_CPU(type_name, initfn)      \
1769     {                                      \
1770         .name = type_name,                 \
1771         .parent = TYPE_RISCV_CPU,          \
1772         .instance_init = initfn            \
1773     }
1774 
1775 static const TypeInfo riscv_cpu_type_infos[] = {
1776     {
1777         .name = TYPE_RISCV_CPU,
1778         .parent = TYPE_CPU,
1779         .instance_size = sizeof(RISCVCPU),
1780         .instance_align = __alignof__(RISCVCPU),
1781         .instance_init = riscv_cpu_init,
1782         .abstract = true,
1783         .class_size = sizeof(RISCVCPUClass),
1784         .class_init = riscv_cpu_class_init,
1785     },
1786     DEFINE_CPU(TYPE_RISCV_CPU_ANY,              riscv_any_cpu_init),
1787 #if defined(CONFIG_KVM)
1788     DEFINE_CPU(TYPE_RISCV_CPU_HOST,             riscv_host_cpu_init),
1789 #endif
1790 #if defined(TARGET_RISCV32)
1791     DEFINE_CPU(TYPE_RISCV_CPU_BASE32,           rv32_base_cpu_init),
1792     DEFINE_CPU(TYPE_RISCV_CPU_IBEX,             rv32_ibex_cpu_init),
1793     DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31,       rv32_sifive_e_cpu_init),
1794     DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34,       rv32_imafcu_nommu_cpu_init),
1795     DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34,       rv32_sifive_u_cpu_init),
1796 #elif defined(TARGET_RISCV64)
1797     DEFINE_CPU(TYPE_RISCV_CPU_BASE64,           rv64_base_cpu_init),
1798     DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51,       rv64_sifive_e_cpu_init),
1799     DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54,       rv64_sifive_u_cpu_init),
1800     DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C,         rv64_sifive_u_cpu_init),
1801     DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906,       rv64_thead_c906_cpu_init),
1802     DEFINE_CPU(TYPE_RISCV_CPU_BASE128,          rv128_base_cpu_init),
1803 #endif
1804 };
1805 
1806 DEFINE_TYPES(riscv_cpu_type_infos)
1807