xref: /openbmc/qemu/target/riscv/cpu.c (revision 62a09b9b)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "internals.h"
26 #include "exec/exec-all.h"
27 #include "qapi/error.h"
28 #include "qemu/error-report.h"
29 #include "hw/qdev-properties.h"
30 #include "migration/vmstate.h"
31 #include "fpu/softfloat-helpers.h"
32 #include "sysemu/kvm.h"
33 #include "kvm_riscv.h"
34 
35 /* RISC-V CPU definitions */
36 
37 #define RISCV_CPU_MARCHID   ((QEMU_VERSION_MAJOR << 16) | \
38                              (QEMU_VERSION_MINOR << 8)  | \
39                              (QEMU_VERSION_MICRO))
40 #define RISCV_CPU_MIMPID    RISCV_CPU_MARCHID
41 
42 static const char riscv_single_letter_exts[] = "IEMAFDQCPVH";
43 
44 struct isa_ext_data {
45     const char *name;
46     bool multi_letter;
47     int min_version;
48     int ext_enable_offset;
49 };
50 
51 #define ISA_EXT_DATA_ENTRY(_name, _m_letter, _min_ver, _prop) \
52 {#_name, _m_letter, _min_ver, offsetof(struct RISCVCPUConfig, _prop)}
53 
54 /**
55  * Here are the ordering rules of extension naming defined by RISC-V
56  * specification :
57  * 1. All extensions should be separated from other multi-letter extensions
58  *    by an underscore.
59  * 2. The first letter following the 'Z' conventionally indicates the most
60  *    closely related alphabetical extension category, IMAFDQLCBKJTPVH.
61  *    If multiple 'Z' extensions are named, they should be ordered first
62  *    by category, then alphabetically within a category.
63  * 3. Standard supervisor-level extensions (starts with 'S') should be
64  *    listed after standard unprivileged extensions.  If multiple
65  *    supervisor-level extensions are listed, they should be ordered
66  *    alphabetically.
67  * 4. Non-standard extensions (starts with 'X') must be listed after all
68  *    standard extensions. They must be separated from other multi-letter
69  *    extensions by an underscore.
70  */
71 static const struct isa_ext_data isa_edata_arr[] = {
72     ISA_EXT_DATA_ENTRY(h, false, PRIV_VERSION_1_12_0, ext_h),
73     ISA_EXT_DATA_ENTRY(v, false, PRIV_VERSION_1_12_0, ext_v),
74     ISA_EXT_DATA_ENTRY(zicsr, true, PRIV_VERSION_1_10_0, ext_icsr),
75     ISA_EXT_DATA_ENTRY(zifencei, true, PRIV_VERSION_1_10_0, ext_ifencei),
76     ISA_EXT_DATA_ENTRY(zfh, true, PRIV_VERSION_1_12_0, ext_zfh),
77     ISA_EXT_DATA_ENTRY(zfhmin, true, PRIV_VERSION_1_12_0, ext_zfhmin),
78     ISA_EXT_DATA_ENTRY(zfinx, true, PRIV_VERSION_1_12_0, ext_zfinx),
79     ISA_EXT_DATA_ENTRY(zdinx, true, PRIV_VERSION_1_12_0, ext_zdinx),
80     ISA_EXT_DATA_ENTRY(zba, true, PRIV_VERSION_1_12_0, ext_zba),
81     ISA_EXT_DATA_ENTRY(zbb, true, PRIV_VERSION_1_12_0, ext_zbb),
82     ISA_EXT_DATA_ENTRY(zbc, true, PRIV_VERSION_1_12_0, ext_zbc),
83     ISA_EXT_DATA_ENTRY(zbkb, true, PRIV_VERSION_1_12_0, ext_zbkb),
84     ISA_EXT_DATA_ENTRY(zbkc, true, PRIV_VERSION_1_12_0, ext_zbkc),
85     ISA_EXT_DATA_ENTRY(zbkx, true, PRIV_VERSION_1_12_0, ext_zbkx),
86     ISA_EXT_DATA_ENTRY(zbs, true, PRIV_VERSION_1_12_0, ext_zbs),
87     ISA_EXT_DATA_ENTRY(zk, true, PRIV_VERSION_1_12_0, ext_zk),
88     ISA_EXT_DATA_ENTRY(zkn, true, PRIV_VERSION_1_12_0, ext_zkn),
89     ISA_EXT_DATA_ENTRY(zknd, true, PRIV_VERSION_1_12_0, ext_zknd),
90     ISA_EXT_DATA_ENTRY(zkne, true, PRIV_VERSION_1_12_0, ext_zkne),
91     ISA_EXT_DATA_ENTRY(zknh, true, PRIV_VERSION_1_12_0, ext_zknh),
92     ISA_EXT_DATA_ENTRY(zkr, true, PRIV_VERSION_1_12_0, ext_zkr),
93     ISA_EXT_DATA_ENTRY(zks, true, PRIV_VERSION_1_12_0, ext_zks),
94     ISA_EXT_DATA_ENTRY(zksed, true, PRIV_VERSION_1_12_0, ext_zksed),
95     ISA_EXT_DATA_ENTRY(zksh, true, PRIV_VERSION_1_12_0, ext_zksh),
96     ISA_EXT_DATA_ENTRY(zkt, true, PRIV_VERSION_1_12_0, ext_zkt),
97     ISA_EXT_DATA_ENTRY(zve32f, true, PRIV_VERSION_1_12_0, ext_zve32f),
98     ISA_EXT_DATA_ENTRY(zve64f, true, PRIV_VERSION_1_12_0, ext_zve64f),
99     ISA_EXT_DATA_ENTRY(zhinx, true, PRIV_VERSION_1_12_0, ext_zhinx),
100     ISA_EXT_DATA_ENTRY(zhinxmin, true, PRIV_VERSION_1_12_0, ext_zhinxmin),
101     ISA_EXT_DATA_ENTRY(svinval, true, PRIV_VERSION_1_12_0, ext_svinval),
102     ISA_EXT_DATA_ENTRY(svnapot, true, PRIV_VERSION_1_12_0, ext_svnapot),
103     ISA_EXT_DATA_ENTRY(svpbmt, true, PRIV_VERSION_1_12_0, ext_svpbmt),
104 };
105 
106 static bool isa_ext_is_enabled(RISCVCPU *cpu,
107                                const struct isa_ext_data *edata)
108 {
109     bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset;
110 
111     return *ext_enabled;
112 }
113 
114 static void isa_ext_update_enabled(RISCVCPU *cpu,
115                                    const struct isa_ext_data *edata, bool en)
116 {
117     bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset;
118 
119     *ext_enabled = en;
120 }
121 
122 const char * const riscv_int_regnames[] = {
123   "x0/zero", "x1/ra",  "x2/sp",  "x3/gp",  "x4/tp",  "x5/t0",   "x6/t1",
124   "x7/t2",   "x8/s0",  "x9/s1",  "x10/a0", "x11/a1", "x12/a2",  "x13/a3",
125   "x14/a4",  "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3",  "x20/s4",
126   "x21/s5",  "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
127   "x28/t3",  "x29/t4", "x30/t5", "x31/t6"
128 };
129 
130 const char * const riscv_int_regnamesh[] = {
131   "x0h/zeroh", "x1h/rah",  "x2h/sph",   "x3h/gph",   "x4h/tph",  "x5h/t0h",
132   "x6h/t1h",   "x7h/t2h",  "x8h/s0h",   "x9h/s1h",   "x10h/a0h", "x11h/a1h",
133   "x12h/a2h",  "x13h/a3h", "x14h/a4h",  "x15h/a5h",  "x16h/a6h", "x17h/a7h",
134   "x18h/s2h",  "x19h/s3h", "x20h/s4h",  "x21h/s5h",  "x22h/s6h", "x23h/s7h",
135   "x24h/s8h",  "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
136   "x30h/t5h",  "x31h/t6h"
137 };
138 
139 const char * const riscv_fpr_regnames[] = {
140   "f0/ft0",   "f1/ft1",  "f2/ft2",   "f3/ft3",   "f4/ft4",  "f5/ft5",
141   "f6/ft6",   "f7/ft7",  "f8/fs0",   "f9/fs1",   "f10/fa0", "f11/fa1",
142   "f12/fa2",  "f13/fa3", "f14/fa4",  "f15/fa5",  "f16/fa6", "f17/fa7",
143   "f18/fs2",  "f19/fs3", "f20/fs4",  "f21/fs5",  "f22/fs6", "f23/fs7",
144   "f24/fs8",  "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
145   "f30/ft10", "f31/ft11"
146 };
147 
148 static const char * const riscv_excp_names[] = {
149     "misaligned_fetch",
150     "fault_fetch",
151     "illegal_instruction",
152     "breakpoint",
153     "misaligned_load",
154     "fault_load",
155     "misaligned_store",
156     "fault_store",
157     "user_ecall",
158     "supervisor_ecall",
159     "hypervisor_ecall",
160     "machine_ecall",
161     "exec_page_fault",
162     "load_page_fault",
163     "reserved",
164     "store_page_fault",
165     "reserved",
166     "reserved",
167     "reserved",
168     "reserved",
169     "guest_exec_page_fault",
170     "guest_load_page_fault",
171     "reserved",
172     "guest_store_page_fault",
173 };
174 
175 static const char * const riscv_intr_names[] = {
176     "u_software",
177     "s_software",
178     "vs_software",
179     "m_software",
180     "u_timer",
181     "s_timer",
182     "vs_timer",
183     "m_timer",
184     "u_external",
185     "s_external",
186     "vs_external",
187     "m_external",
188     "reserved",
189     "reserved",
190     "reserved",
191     "reserved"
192 };
193 
194 static void register_cpu_props(DeviceState *dev);
195 
196 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
197 {
198     if (async) {
199         return (cause < ARRAY_SIZE(riscv_intr_names)) ?
200                riscv_intr_names[cause] : "(unknown)";
201     } else {
202         return (cause < ARRAY_SIZE(riscv_excp_names)) ?
203                riscv_excp_names[cause] : "(unknown)";
204     }
205 }
206 
207 static void set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext)
208 {
209     env->misa_mxl_max = env->misa_mxl = mxl;
210     env->misa_ext_mask = env->misa_ext = ext;
211 }
212 
213 static void set_priv_version(CPURISCVState *env, int priv_ver)
214 {
215     env->priv_ver = priv_ver;
216 }
217 
218 static void set_vext_version(CPURISCVState *env, int vext_ver)
219 {
220     env->vext_ver = vext_ver;
221 }
222 
223 static void set_resetvec(CPURISCVState *env, target_ulong resetvec)
224 {
225 #ifndef CONFIG_USER_ONLY
226     env->resetvec = resetvec;
227 #endif
228 }
229 
230 static void riscv_any_cpu_init(Object *obj)
231 {
232     CPURISCVState *env = &RISCV_CPU(obj)->env;
233 #if defined(TARGET_RISCV32)
234     set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
235 #elif defined(TARGET_RISCV64)
236     set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
237 #endif
238     set_priv_version(env, PRIV_VERSION_1_12_0);
239     register_cpu_props(DEVICE(obj));
240 }
241 
242 #if defined(TARGET_RISCV64)
243 static void rv64_base_cpu_init(Object *obj)
244 {
245     CPURISCVState *env = &RISCV_CPU(obj)->env;
246     /* We set this in the realise function */
247     set_misa(env, MXL_RV64, 0);
248     register_cpu_props(DEVICE(obj));
249     /* Set latest version of privileged specification */
250     set_priv_version(env, PRIV_VERSION_1_12_0);
251 }
252 
253 static void rv64_sifive_u_cpu_init(Object *obj)
254 {
255     CPURISCVState *env = &RISCV_CPU(obj)->env;
256     set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
257     set_priv_version(env, PRIV_VERSION_1_10_0);
258 }
259 
260 static void rv64_sifive_e_cpu_init(Object *obj)
261 {
262     CPURISCVState *env = &RISCV_CPU(obj)->env;
263     RISCVCPU *cpu = RISCV_CPU(obj);
264 
265     set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU);
266     set_priv_version(env, PRIV_VERSION_1_10_0);
267     cpu->cfg.mmu = false;
268 }
269 
270 static void rv128_base_cpu_init(Object *obj)
271 {
272     if (qemu_tcg_mttcg_enabled()) {
273         /* Missing 128-bit aligned atomics */
274         error_report("128-bit RISC-V currently does not work with Multi "
275                      "Threaded TCG. Please use: -accel tcg,thread=single");
276         exit(EXIT_FAILURE);
277     }
278     CPURISCVState *env = &RISCV_CPU(obj)->env;
279     /* We set this in the realise function */
280     set_misa(env, MXL_RV128, 0);
281     register_cpu_props(DEVICE(obj));
282     /* Set latest version of privileged specification */
283     set_priv_version(env, PRIV_VERSION_1_12_0);
284 }
285 #else
286 static void rv32_base_cpu_init(Object *obj)
287 {
288     CPURISCVState *env = &RISCV_CPU(obj)->env;
289     /* We set this in the realise function */
290     set_misa(env, MXL_RV32, 0);
291     register_cpu_props(DEVICE(obj));
292     /* Set latest version of privileged specification */
293     set_priv_version(env, PRIV_VERSION_1_12_0);
294 }
295 
296 static void rv32_sifive_u_cpu_init(Object *obj)
297 {
298     CPURISCVState *env = &RISCV_CPU(obj)->env;
299     set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
300     set_priv_version(env, PRIV_VERSION_1_10_0);
301 }
302 
303 static void rv32_sifive_e_cpu_init(Object *obj)
304 {
305     CPURISCVState *env = &RISCV_CPU(obj)->env;
306     RISCVCPU *cpu = RISCV_CPU(obj);
307 
308     set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU);
309     set_priv_version(env, PRIV_VERSION_1_10_0);
310     cpu->cfg.mmu = false;
311 }
312 
313 static void rv32_ibex_cpu_init(Object *obj)
314 {
315     CPURISCVState *env = &RISCV_CPU(obj)->env;
316     RISCVCPU *cpu = RISCV_CPU(obj);
317 
318     set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU);
319     set_priv_version(env, PRIV_VERSION_1_11_0);
320     cpu->cfg.mmu = false;
321     cpu->cfg.epmp = true;
322 }
323 
324 static void rv32_imafcu_nommu_cpu_init(Object *obj)
325 {
326     CPURISCVState *env = &RISCV_CPU(obj)->env;
327     RISCVCPU *cpu = RISCV_CPU(obj);
328 
329     set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU);
330     set_priv_version(env, PRIV_VERSION_1_10_0);
331     set_resetvec(env, DEFAULT_RSTVEC);
332     cpu->cfg.mmu = false;
333 }
334 #endif
335 
336 #if defined(CONFIG_KVM)
337 static void riscv_host_cpu_init(Object *obj)
338 {
339     CPURISCVState *env = &RISCV_CPU(obj)->env;
340 #if defined(TARGET_RISCV32)
341     set_misa(env, MXL_RV32, 0);
342 #elif defined(TARGET_RISCV64)
343     set_misa(env, MXL_RV64, 0);
344 #endif
345     register_cpu_props(DEVICE(obj));
346 }
347 #endif
348 
349 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
350 {
351     ObjectClass *oc;
352     char *typename;
353     char **cpuname;
354 
355     cpuname = g_strsplit(cpu_model, ",", 1);
356     typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
357     oc = object_class_by_name(typename);
358     g_strfreev(cpuname);
359     g_free(typename);
360     if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU) ||
361         object_class_is_abstract(oc)) {
362         return NULL;
363     }
364     return oc;
365 }
366 
367 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
368 {
369     RISCVCPU *cpu = RISCV_CPU(cs);
370     CPURISCVState *env = &cpu->env;
371     int i;
372 
373 #if !defined(CONFIG_USER_ONLY)
374     if (riscv_has_ext(env, RVH)) {
375         qemu_fprintf(f, " %s %d\n", "V      =  ", riscv_cpu_virt_enabled(env));
376     }
377 #endif
378     qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc      ", env->pc);
379 #ifndef CONFIG_USER_ONLY
380     {
381         static const int dump_csrs[] = {
382             CSR_MHARTID,
383             CSR_MSTATUS,
384             CSR_MSTATUSH,
385             CSR_HSTATUS,
386             CSR_VSSTATUS,
387             CSR_MIP,
388             CSR_MIE,
389             CSR_MIDELEG,
390             CSR_HIDELEG,
391             CSR_MEDELEG,
392             CSR_HEDELEG,
393             CSR_MTVEC,
394             CSR_STVEC,
395             CSR_VSTVEC,
396             CSR_MEPC,
397             CSR_SEPC,
398             CSR_VSEPC,
399             CSR_MCAUSE,
400             CSR_SCAUSE,
401             CSR_VSCAUSE,
402             CSR_MTVAL,
403             CSR_STVAL,
404             CSR_HTVAL,
405             CSR_MTVAL2,
406             CSR_MSCRATCH,
407             CSR_SSCRATCH,
408             CSR_SATP,
409             CSR_MMTE,
410             CSR_UPMBASE,
411             CSR_UPMMASK,
412             CSR_SPMBASE,
413             CSR_SPMMASK,
414             CSR_MPMBASE,
415             CSR_MPMMASK,
416         };
417 
418         for (int i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
419             int csrno = dump_csrs[i];
420             target_ulong val = 0;
421             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
422 
423             /*
424              * Rely on the smode, hmode, etc, predicates within csr.c
425              * to do the filtering of the registers that are present.
426              */
427             if (res == RISCV_EXCP_NONE) {
428                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
429                              csr_ops[csrno].name, val);
430             }
431         }
432     }
433 #endif
434 
435     for (i = 0; i < 32; i++) {
436         qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
437                      riscv_int_regnames[i], env->gpr[i]);
438         if ((i & 3) == 3) {
439             qemu_fprintf(f, "\n");
440         }
441     }
442     if (flags & CPU_DUMP_FPU) {
443         for (i = 0; i < 32; i++) {
444             qemu_fprintf(f, " %-8s %016" PRIx64,
445                          riscv_fpr_regnames[i], env->fpr[i]);
446             if ((i & 3) == 3) {
447                 qemu_fprintf(f, "\n");
448             }
449         }
450     }
451 }
452 
453 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
454 {
455     RISCVCPU *cpu = RISCV_CPU(cs);
456     CPURISCVState *env = &cpu->env;
457 
458     if (env->xl == MXL_RV32) {
459         env->pc = (int32_t)value;
460     } else {
461         env->pc = value;
462     }
463 }
464 
465 static void riscv_cpu_synchronize_from_tb(CPUState *cs,
466                                           const TranslationBlock *tb)
467 {
468     RISCVCPU *cpu = RISCV_CPU(cs);
469     CPURISCVState *env = &cpu->env;
470     RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
471 
472     if (xl == MXL_RV32) {
473         env->pc = (int32_t)tb->pc;
474     } else {
475         env->pc = tb->pc;
476     }
477 }
478 
479 static bool riscv_cpu_has_work(CPUState *cs)
480 {
481 #ifndef CONFIG_USER_ONLY
482     RISCVCPU *cpu = RISCV_CPU(cs);
483     CPURISCVState *env = &cpu->env;
484     /*
485      * Definition of the WFI instruction requires it to ignore the privilege
486      * mode and delegation registers, but respect individual enables
487      */
488     return riscv_cpu_all_pending(env) != 0;
489 #else
490     return true;
491 #endif
492 }
493 
494 void restore_state_to_opc(CPURISCVState *env, TranslationBlock *tb,
495                           target_ulong *data)
496 {
497     RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
498     if (xl == MXL_RV32) {
499         env->pc = (int32_t)data[0];
500     } else {
501         env->pc = data[0];
502     }
503     env->bins = data[1];
504 }
505 
506 static void riscv_cpu_reset(DeviceState *dev)
507 {
508 #ifndef CONFIG_USER_ONLY
509     uint8_t iprio;
510     int i, irq, rdzero;
511 #endif
512     CPUState *cs = CPU(dev);
513     RISCVCPU *cpu = RISCV_CPU(cs);
514     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
515     CPURISCVState *env = &cpu->env;
516 
517     mcc->parent_reset(dev);
518 #ifndef CONFIG_USER_ONLY
519     env->misa_mxl = env->misa_mxl_max;
520     env->priv = PRV_M;
521     env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
522     if (env->misa_mxl > MXL_RV32) {
523         /*
524          * The reset status of SXL/UXL is undefined, but mstatus is WARL
525          * and we must ensure that the value after init is valid for read.
526          */
527         env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
528         env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
529         if (riscv_has_ext(env, RVH)) {
530             env->vsstatus = set_field(env->vsstatus,
531                                       MSTATUS64_SXL, env->misa_mxl);
532             env->vsstatus = set_field(env->vsstatus,
533                                       MSTATUS64_UXL, env->misa_mxl);
534             env->mstatus_hs = set_field(env->mstatus_hs,
535                                         MSTATUS64_SXL, env->misa_mxl);
536             env->mstatus_hs = set_field(env->mstatus_hs,
537                                         MSTATUS64_UXL, env->misa_mxl);
538         }
539     }
540     env->mcause = 0;
541     env->miclaim = MIP_SGEIP;
542     env->pc = env->resetvec;
543     env->bins = 0;
544     env->two_stage_lookup = false;
545 
546     /* Initialized default priorities of local interrupts. */
547     for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
548         iprio = riscv_cpu_default_priority(i);
549         env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
550         env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
551         env->hviprio[i] = 0;
552     }
553     i = 0;
554     while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
555         if (!rdzero) {
556             env->hviprio[irq] = env->miprio[irq];
557         }
558         i++;
559     }
560     /* mmte is supposed to have pm.current hardwired to 1 */
561     env->mmte |= (PM_EXT_INITIAL | MMTE_M_PM_CURRENT);
562 #endif
563     env->xl = riscv_cpu_mxl(env);
564     riscv_cpu_update_mask(env);
565     cs->exception_index = RISCV_EXCP_NONE;
566     env->load_res = -1;
567     set_default_nan_mode(1, &env->fp_status);
568 
569 #ifndef CONFIG_USER_ONLY
570     if (riscv_feature(env, RISCV_FEATURE_DEBUG)) {
571         riscv_trigger_init(env);
572     }
573 
574     if (kvm_enabled()) {
575         kvm_riscv_reset_vcpu(cpu);
576     }
577 #endif
578 }
579 
580 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
581 {
582     RISCVCPU *cpu = RISCV_CPU(s);
583 
584     switch (riscv_cpu_mxl(&cpu->env)) {
585     case MXL_RV32:
586         info->print_insn = print_insn_riscv32;
587         break;
588     case MXL_RV64:
589         info->print_insn = print_insn_riscv64;
590         break;
591     case MXL_RV128:
592         info->print_insn = print_insn_riscv128;
593         break;
594     default:
595         g_assert_not_reached();
596     }
597 }
598 
599 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
600 {
601     CPUState *cs = CPU(dev);
602     RISCVCPU *cpu = RISCV_CPU(dev);
603     CPURISCVState *env = &cpu->env;
604     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
605     CPUClass *cc = CPU_CLASS(mcc);
606     int i, priv_version = -1;
607     Error *local_err = NULL;
608 
609     cpu_exec_realizefn(cs, &local_err);
610     if (local_err != NULL) {
611         error_propagate(errp, local_err);
612         return;
613     }
614 
615     if (cpu->cfg.priv_spec) {
616         if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) {
617             priv_version = PRIV_VERSION_1_12_0;
618         } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) {
619             priv_version = PRIV_VERSION_1_11_0;
620         } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) {
621             priv_version = PRIV_VERSION_1_10_0;
622         } else {
623             error_setg(errp,
624                        "Unsupported privilege spec version '%s'",
625                        cpu->cfg.priv_spec);
626             return;
627         }
628     }
629 
630     if (priv_version >= PRIV_VERSION_1_10_0) {
631         set_priv_version(env, priv_version);
632     }
633 
634     /* Force disable extensions if priv spec version does not match */
635     for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) {
636         if (isa_ext_is_enabled(cpu, &isa_edata_arr[i]) &&
637             (env->priv_ver < isa_edata_arr[i].min_version)) {
638             isa_ext_update_enabled(cpu, &isa_edata_arr[i], false);
639 #ifndef CONFIG_USER_ONLY
640             warn_report("disabling %s extension for hart 0x%lx because "
641                         "privilege spec version does not match",
642                         isa_edata_arr[i].name, (unsigned long)env->mhartid);
643 #else
644             warn_report("disabling %s extension because "
645                         "privilege spec version does not match",
646                         isa_edata_arr[i].name);
647 #endif
648         }
649     }
650 
651     if (cpu->cfg.mmu) {
652         riscv_set_feature(env, RISCV_FEATURE_MMU);
653     }
654 
655     if (cpu->cfg.pmp) {
656         riscv_set_feature(env, RISCV_FEATURE_PMP);
657 
658         /*
659          * Enhanced PMP should only be available
660          * on harts with PMP support
661          */
662         if (cpu->cfg.epmp) {
663             riscv_set_feature(env, RISCV_FEATURE_EPMP);
664         }
665     }
666 
667     if (cpu->cfg.aia) {
668         riscv_set_feature(env, RISCV_FEATURE_AIA);
669     }
670 
671     if (cpu->cfg.debug) {
672         riscv_set_feature(env, RISCV_FEATURE_DEBUG);
673     }
674 
675     set_resetvec(env, cpu->cfg.resetvec);
676 
677     /* Validate that MISA_MXL is set properly. */
678     switch (env->misa_mxl_max) {
679 #ifdef TARGET_RISCV64
680     case MXL_RV64:
681     case MXL_RV128:
682         cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
683         break;
684 #endif
685     case MXL_RV32:
686         cc->gdb_core_xml_file = "riscv-32bit-cpu.xml";
687         break;
688     default:
689         g_assert_not_reached();
690     }
691     assert(env->misa_mxl_max == env->misa_mxl);
692 
693     /* If only MISA_EXT is unset for misa, then set it from properties */
694     if (env->misa_ext == 0) {
695         uint32_t ext = 0;
696 
697         /* Do some ISA extension error checking */
698         if (cpu->cfg.ext_g && !(cpu->cfg.ext_i && cpu->cfg.ext_m &&
699                                 cpu->cfg.ext_a && cpu->cfg.ext_f &&
700                                 cpu->cfg.ext_d &&
701                                 cpu->cfg.ext_icsr && cpu->cfg.ext_ifencei)) {
702             warn_report("Setting G will also set IMAFD_Zicsr_Zifencei");
703             cpu->cfg.ext_i = true;
704             cpu->cfg.ext_m = true;
705             cpu->cfg.ext_a = true;
706             cpu->cfg.ext_f = true;
707             cpu->cfg.ext_d = true;
708             cpu->cfg.ext_icsr = true;
709             cpu->cfg.ext_ifencei = true;
710         }
711 
712         if (cpu->cfg.ext_i && cpu->cfg.ext_e) {
713             error_setg(errp,
714                        "I and E extensions are incompatible");
715             return;
716         }
717 
718         if (!cpu->cfg.ext_i && !cpu->cfg.ext_e) {
719             error_setg(errp,
720                        "Either I or E extension must be set");
721             return;
722         }
723 
724         if (cpu->cfg.ext_s && !cpu->cfg.ext_u) {
725             error_setg(errp,
726                        "Setting S extension without U extension is illegal");
727             return;
728         }
729 
730         if (cpu->cfg.ext_h && !cpu->cfg.ext_i) {
731             error_setg(errp,
732                        "H depends on an I base integer ISA with 32 x registers");
733             return;
734         }
735 
736         if (cpu->cfg.ext_h && !cpu->cfg.ext_s) {
737             error_setg(errp, "H extension implicitly requires S-mode");
738             return;
739         }
740 
741         if (cpu->cfg.ext_f && !cpu->cfg.ext_icsr) {
742             error_setg(errp, "F extension requires Zicsr");
743             return;
744         }
745 
746         if ((cpu->cfg.ext_zfh || cpu->cfg.ext_zfhmin) && !cpu->cfg.ext_f) {
747             error_setg(errp, "Zfh/Zfhmin extensions require F extension");
748             return;
749         }
750 
751         if (cpu->cfg.ext_d && !cpu->cfg.ext_f) {
752             error_setg(errp, "D extension requires F extension");
753             return;
754         }
755 
756         if (cpu->cfg.ext_v && !cpu->cfg.ext_d) {
757             error_setg(errp, "V extension requires D extension");
758             return;
759         }
760 
761         if ((cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) && !cpu->cfg.ext_f) {
762             error_setg(errp, "Zve32f/Zve64f extensions require F extension");
763             return;
764         }
765 
766         /* Set the ISA extensions, checks should have happened above */
767         if (cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinx ||
768             cpu->cfg.ext_zhinxmin) {
769             cpu->cfg.ext_zfinx = true;
770         }
771 
772         if (cpu->cfg.ext_zfinx) {
773             if (!cpu->cfg.ext_icsr) {
774                 error_setg(errp, "Zfinx extension requires Zicsr");
775                 return;
776             }
777             if (cpu->cfg.ext_f) {
778                 error_setg(errp,
779                     "Zfinx cannot be supported together with F extension");
780                 return;
781             }
782         }
783 
784         if (cpu->cfg.ext_zk) {
785             cpu->cfg.ext_zkn = true;
786             cpu->cfg.ext_zkr = true;
787             cpu->cfg.ext_zkt = true;
788         }
789 
790         if (cpu->cfg.ext_zkn) {
791             cpu->cfg.ext_zbkb = true;
792             cpu->cfg.ext_zbkc = true;
793             cpu->cfg.ext_zbkx = true;
794             cpu->cfg.ext_zkne = true;
795             cpu->cfg.ext_zknd = true;
796             cpu->cfg.ext_zknh = true;
797         }
798 
799         if (cpu->cfg.ext_zks) {
800             cpu->cfg.ext_zbkb = true;
801             cpu->cfg.ext_zbkc = true;
802             cpu->cfg.ext_zbkx = true;
803             cpu->cfg.ext_zksed = true;
804             cpu->cfg.ext_zksh = true;
805         }
806 
807         if (cpu->cfg.ext_i) {
808             ext |= RVI;
809         }
810         if (cpu->cfg.ext_e) {
811             ext |= RVE;
812         }
813         if (cpu->cfg.ext_m) {
814             ext |= RVM;
815         }
816         if (cpu->cfg.ext_a) {
817             ext |= RVA;
818         }
819         if (cpu->cfg.ext_f) {
820             ext |= RVF;
821         }
822         if (cpu->cfg.ext_d) {
823             ext |= RVD;
824         }
825         if (cpu->cfg.ext_c) {
826             ext |= RVC;
827         }
828         if (cpu->cfg.ext_s) {
829             ext |= RVS;
830         }
831         if (cpu->cfg.ext_u) {
832             ext |= RVU;
833         }
834         if (cpu->cfg.ext_h) {
835             ext |= RVH;
836         }
837         if (cpu->cfg.ext_v) {
838             int vext_version = VEXT_VERSION_1_00_0;
839             ext |= RVV;
840             if (!is_power_of_2(cpu->cfg.vlen)) {
841                 error_setg(errp,
842                         "Vector extension VLEN must be power of 2");
843                 return;
844             }
845             if (cpu->cfg.vlen > RV_VLEN_MAX || cpu->cfg.vlen < 128) {
846                 error_setg(errp,
847                         "Vector extension implementation only supports VLEN "
848                         "in the range [128, %d]", RV_VLEN_MAX);
849                 return;
850             }
851             if (!is_power_of_2(cpu->cfg.elen)) {
852                 error_setg(errp,
853                         "Vector extension ELEN must be power of 2");
854                 return;
855             }
856             if (cpu->cfg.elen > 64 || cpu->cfg.vlen < 8) {
857                 error_setg(errp,
858                         "Vector extension implementation only supports ELEN "
859                         "in the range [8, 64]");
860                 return;
861             }
862             if (cpu->cfg.vext_spec) {
863                 if (!g_strcmp0(cpu->cfg.vext_spec, "v1.0")) {
864                     vext_version = VEXT_VERSION_1_00_0;
865                 } else {
866                     error_setg(errp,
867                            "Unsupported vector spec version '%s'",
868                            cpu->cfg.vext_spec);
869                     return;
870                 }
871             } else {
872                 qemu_log("vector version is not specified, "
873                          "use the default value v1.0\n");
874             }
875             set_vext_version(env, vext_version);
876         }
877         if (cpu->cfg.ext_j) {
878             ext |= RVJ;
879         }
880 
881         set_misa(env, env->misa_mxl, ext);
882     }
883 
884     riscv_cpu_register_gdb_regs_for_features(cs);
885 
886     qemu_init_vcpu(cs);
887     cpu_reset(cs);
888 
889     mcc->parent_realize(dev, errp);
890 }
891 
892 #ifndef CONFIG_USER_ONLY
893 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
894 {
895     RISCVCPU *cpu = RISCV_CPU(opaque);
896     CPURISCVState *env = &cpu->env;
897 
898     if (irq < IRQ_LOCAL_MAX) {
899         switch (irq) {
900         case IRQ_U_SOFT:
901         case IRQ_S_SOFT:
902         case IRQ_VS_SOFT:
903         case IRQ_M_SOFT:
904         case IRQ_U_TIMER:
905         case IRQ_S_TIMER:
906         case IRQ_VS_TIMER:
907         case IRQ_M_TIMER:
908         case IRQ_U_EXT:
909         case IRQ_VS_EXT:
910         case IRQ_M_EXT:
911             if (kvm_enabled()) {
912                 kvm_riscv_set_irq(cpu, irq, level);
913             } else {
914                 riscv_cpu_update_mip(cpu, 1 << irq, BOOL_TO_MASK(level));
915             }
916              break;
917         case IRQ_S_EXT:
918             if (kvm_enabled()) {
919                 kvm_riscv_set_irq(cpu, irq, level);
920             } else {
921                 env->external_seip = level;
922                 riscv_cpu_update_mip(cpu, 1 << irq,
923                                      BOOL_TO_MASK(level | env->software_seip));
924             }
925             break;
926         default:
927             g_assert_not_reached();
928         }
929     } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
930         /* Require H-extension for handling guest local interrupts */
931         if (!riscv_has_ext(env, RVH)) {
932             g_assert_not_reached();
933         }
934 
935         /* Compute bit position in HGEIP CSR */
936         irq = irq - IRQ_LOCAL_MAX + 1;
937         if (env->geilen < irq) {
938             g_assert_not_reached();
939         }
940 
941         /* Update HGEIP CSR */
942         env->hgeip &= ~((target_ulong)1 << irq);
943         if (level) {
944             env->hgeip |= (target_ulong)1 << irq;
945         }
946 
947         /* Update mip.SGEIP bit */
948         riscv_cpu_update_mip(cpu, MIP_SGEIP,
949                              BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
950     } else {
951         g_assert_not_reached();
952     }
953 }
954 #endif /* CONFIG_USER_ONLY */
955 
956 static void riscv_cpu_init(Object *obj)
957 {
958     RISCVCPU *cpu = RISCV_CPU(obj);
959 
960     cpu->cfg.ext_ifencei = true;
961     cpu->cfg.ext_icsr = true;
962     cpu->cfg.mmu = true;
963     cpu->cfg.pmp = true;
964 
965     cpu_set_cpustate_pointers(cpu);
966 
967 #ifndef CONFIG_USER_ONLY
968     qdev_init_gpio_in(DEVICE(cpu), riscv_cpu_set_irq,
969                       IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
970 #endif /* CONFIG_USER_ONLY */
971 }
972 
973 static Property riscv_cpu_extensions[] = {
974     /* Defaults for standard extensions */
975     DEFINE_PROP_BOOL("i", RISCVCPU, cfg.ext_i, true),
976     DEFINE_PROP_BOOL("e", RISCVCPU, cfg.ext_e, false),
977     DEFINE_PROP_BOOL("g", RISCVCPU, cfg.ext_g, false),
978     DEFINE_PROP_BOOL("m", RISCVCPU, cfg.ext_m, true),
979     DEFINE_PROP_BOOL("a", RISCVCPU, cfg.ext_a, true),
980     DEFINE_PROP_BOOL("f", RISCVCPU, cfg.ext_f, true),
981     DEFINE_PROP_BOOL("d", RISCVCPU, cfg.ext_d, true),
982     DEFINE_PROP_BOOL("c", RISCVCPU, cfg.ext_c, true),
983     DEFINE_PROP_BOOL("s", RISCVCPU, cfg.ext_s, true),
984     DEFINE_PROP_BOOL("u", RISCVCPU, cfg.ext_u, true),
985     DEFINE_PROP_BOOL("v", RISCVCPU, cfg.ext_v, false),
986     DEFINE_PROP_BOOL("h", RISCVCPU, cfg.ext_h, true),
987     DEFINE_PROP_UINT8("pmu-num", RISCVCPU, cfg.pmu_num, 16),
988     DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true),
989     DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true),
990     DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false),
991     DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false),
992     DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false),
993     DEFINE_PROP_BOOL("Zve64f", RISCVCPU, cfg.ext_zve64f, false),
994     DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true),
995     DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true),
996 
997     DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec),
998     DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec),
999     DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128),
1000     DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64),
1001 
1002     DEFINE_PROP_BOOL("svinval", RISCVCPU, cfg.ext_svinval, false),
1003     DEFINE_PROP_BOOL("svnapot", RISCVCPU, cfg.ext_svnapot, false),
1004     DEFINE_PROP_BOOL("svpbmt", RISCVCPU, cfg.ext_svpbmt, false),
1005 
1006     DEFINE_PROP_BOOL("zba", RISCVCPU, cfg.ext_zba, true),
1007     DEFINE_PROP_BOOL("zbb", RISCVCPU, cfg.ext_zbb, true),
1008     DEFINE_PROP_BOOL("zbc", RISCVCPU, cfg.ext_zbc, true),
1009     DEFINE_PROP_BOOL("zbkb", RISCVCPU, cfg.ext_zbkb, false),
1010     DEFINE_PROP_BOOL("zbkc", RISCVCPU, cfg.ext_zbkc, false),
1011     DEFINE_PROP_BOOL("zbkx", RISCVCPU, cfg.ext_zbkx, false),
1012     DEFINE_PROP_BOOL("zbs", RISCVCPU, cfg.ext_zbs, true),
1013     DEFINE_PROP_BOOL("zk", RISCVCPU, cfg.ext_zk, false),
1014     DEFINE_PROP_BOOL("zkn", RISCVCPU, cfg.ext_zkn, false),
1015     DEFINE_PROP_BOOL("zknd", RISCVCPU, cfg.ext_zknd, false),
1016     DEFINE_PROP_BOOL("zkne", RISCVCPU, cfg.ext_zkne, false),
1017     DEFINE_PROP_BOOL("zknh", RISCVCPU, cfg.ext_zknh, false),
1018     DEFINE_PROP_BOOL("zkr", RISCVCPU, cfg.ext_zkr, false),
1019     DEFINE_PROP_BOOL("zks", RISCVCPU, cfg.ext_zks, false),
1020     DEFINE_PROP_BOOL("zksed", RISCVCPU, cfg.ext_zksed, false),
1021     DEFINE_PROP_BOOL("zksh", RISCVCPU, cfg.ext_zksh, false),
1022     DEFINE_PROP_BOOL("zkt", RISCVCPU, cfg.ext_zkt, false),
1023 
1024     DEFINE_PROP_BOOL("zdinx", RISCVCPU, cfg.ext_zdinx, false),
1025     DEFINE_PROP_BOOL("zfinx", RISCVCPU, cfg.ext_zfinx, false),
1026     DEFINE_PROP_BOOL("zhinx", RISCVCPU, cfg.ext_zhinx, false),
1027     DEFINE_PROP_BOOL("zhinxmin", RISCVCPU, cfg.ext_zhinxmin, false),
1028 
1029     DEFINE_PROP_BOOL("zmmul", RISCVCPU, cfg.ext_zmmul, false),
1030 
1031     /* Vendor-specific custom extensions */
1032     DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false),
1033 
1034     /* These are experimental so mark with 'x-' */
1035     DEFINE_PROP_BOOL("x-j", RISCVCPU, cfg.ext_j, false),
1036     /* ePMP 0.9.3 */
1037     DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false),
1038     DEFINE_PROP_BOOL("x-aia", RISCVCPU, cfg.aia, false),
1039 
1040     DEFINE_PROP_END_OF_LIST(),
1041 };
1042 
1043 static void register_cpu_props(DeviceState *dev)
1044 {
1045     Property *prop;
1046 
1047     for (prop = riscv_cpu_extensions; prop && prop->name; prop++) {
1048         qdev_property_add_static(dev, prop);
1049     }
1050 }
1051 
1052 static Property riscv_cpu_properties[] = {
1053     DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
1054 
1055     DEFINE_PROP_UINT32("mvendorid", RISCVCPU, cfg.mvendorid, 0),
1056     DEFINE_PROP_UINT64("marchid", RISCVCPU, cfg.marchid, RISCV_CPU_MARCHID),
1057     DEFINE_PROP_UINT64("mimpid", RISCVCPU, cfg.mimpid, RISCV_CPU_MIMPID),
1058 
1059     DEFINE_PROP_UINT64("resetvec", RISCVCPU, cfg.resetvec, DEFAULT_RSTVEC),
1060 
1061     DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
1062 
1063     DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
1064     DEFINE_PROP_END_OF_LIST(),
1065 };
1066 
1067 static gchar *riscv_gdb_arch_name(CPUState *cs)
1068 {
1069     RISCVCPU *cpu = RISCV_CPU(cs);
1070     CPURISCVState *env = &cpu->env;
1071 
1072     switch (riscv_cpu_mxl(env)) {
1073     case MXL_RV32:
1074         return g_strdup("riscv:rv32");
1075     case MXL_RV64:
1076     case MXL_RV128:
1077         return g_strdup("riscv:rv64");
1078     default:
1079         g_assert_not_reached();
1080     }
1081 }
1082 
1083 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
1084 {
1085     RISCVCPU *cpu = RISCV_CPU(cs);
1086 
1087     if (strcmp(xmlname, "riscv-csr.xml") == 0) {
1088         return cpu->dyn_csr_xml;
1089     } else if (strcmp(xmlname, "riscv-vector.xml") == 0) {
1090         return cpu->dyn_vreg_xml;
1091     }
1092 
1093     return NULL;
1094 }
1095 
1096 #ifndef CONFIG_USER_ONLY
1097 #include "hw/core/sysemu-cpu-ops.h"
1098 
1099 static const struct SysemuCPUOps riscv_sysemu_ops = {
1100     .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
1101     .write_elf64_note = riscv_cpu_write_elf64_note,
1102     .write_elf32_note = riscv_cpu_write_elf32_note,
1103     .legacy_vmsd = &vmstate_riscv_cpu,
1104 };
1105 #endif
1106 
1107 #include "hw/core/tcg-cpu-ops.h"
1108 
1109 static const struct TCGCPUOps riscv_tcg_ops = {
1110     .initialize = riscv_translate_init,
1111     .synchronize_from_tb = riscv_cpu_synchronize_from_tb,
1112 
1113 #ifndef CONFIG_USER_ONLY
1114     .tlb_fill = riscv_cpu_tlb_fill,
1115     .cpu_exec_interrupt = riscv_cpu_exec_interrupt,
1116     .do_interrupt = riscv_cpu_do_interrupt,
1117     .do_transaction_failed = riscv_cpu_do_transaction_failed,
1118     .do_unaligned_access = riscv_cpu_do_unaligned_access,
1119     .debug_excp_handler = riscv_cpu_debug_excp_handler,
1120     .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint,
1121     .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint,
1122 #endif /* !CONFIG_USER_ONLY */
1123 };
1124 
1125 static void riscv_cpu_class_init(ObjectClass *c, void *data)
1126 {
1127     RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
1128     CPUClass *cc = CPU_CLASS(c);
1129     DeviceClass *dc = DEVICE_CLASS(c);
1130 
1131     device_class_set_parent_realize(dc, riscv_cpu_realize,
1132                                     &mcc->parent_realize);
1133 
1134     device_class_set_parent_reset(dc, riscv_cpu_reset, &mcc->parent_reset);
1135 
1136     cc->class_by_name = riscv_cpu_class_by_name;
1137     cc->has_work = riscv_cpu_has_work;
1138     cc->dump_state = riscv_cpu_dump_state;
1139     cc->set_pc = riscv_cpu_set_pc;
1140     cc->gdb_read_register = riscv_cpu_gdb_read_register;
1141     cc->gdb_write_register = riscv_cpu_gdb_write_register;
1142     cc->gdb_num_core_regs = 33;
1143     cc->gdb_stop_before_watchpoint = true;
1144     cc->disas_set_info = riscv_cpu_disas_set_info;
1145 #ifndef CONFIG_USER_ONLY
1146     cc->sysemu_ops = &riscv_sysemu_ops;
1147 #endif
1148     cc->gdb_arch_name = riscv_gdb_arch_name;
1149     cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml;
1150     cc->tcg_ops = &riscv_tcg_ops;
1151 
1152     device_class_set_props(dc, riscv_cpu_properties);
1153 }
1154 
1155 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, int max_str_len)
1156 {
1157     char *old = *isa_str;
1158     char *new = *isa_str;
1159     int i;
1160 
1161     for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) {
1162         if (isa_edata_arr[i].multi_letter &&
1163             isa_ext_is_enabled(cpu, &isa_edata_arr[i])) {
1164             new = g_strconcat(old, "_", isa_edata_arr[i].name, NULL);
1165             g_free(old);
1166             old = new;
1167         }
1168     }
1169 
1170     *isa_str = new;
1171 }
1172 
1173 char *riscv_isa_string(RISCVCPU *cpu)
1174 {
1175     int i;
1176     const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
1177     char *isa_str = g_new(char, maxlen);
1178     char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS);
1179     for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
1180         if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
1181             *p++ = qemu_tolower(riscv_single_letter_exts[i]);
1182         }
1183     }
1184     *p = '\0';
1185     if (!cpu->cfg.short_isa_string) {
1186         riscv_isa_string_ext(cpu, &isa_str, maxlen);
1187     }
1188     return isa_str;
1189 }
1190 
1191 static gint riscv_cpu_list_compare(gconstpointer a, gconstpointer b)
1192 {
1193     ObjectClass *class_a = (ObjectClass *)a;
1194     ObjectClass *class_b = (ObjectClass *)b;
1195     const char *name_a, *name_b;
1196 
1197     name_a = object_class_get_name(class_a);
1198     name_b = object_class_get_name(class_b);
1199     return strcmp(name_a, name_b);
1200 }
1201 
1202 static void riscv_cpu_list_entry(gpointer data, gpointer user_data)
1203 {
1204     const char *typename = object_class_get_name(OBJECT_CLASS(data));
1205     int len = strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX);
1206 
1207     qemu_printf("%.*s\n", len, typename);
1208 }
1209 
1210 void riscv_cpu_list(void)
1211 {
1212     GSList *list;
1213 
1214     list = object_class_get_list(TYPE_RISCV_CPU, false);
1215     list = g_slist_sort(list, riscv_cpu_list_compare);
1216     g_slist_foreach(list, riscv_cpu_list_entry, NULL);
1217     g_slist_free(list);
1218 }
1219 
1220 #define DEFINE_CPU(type_name, initfn)      \
1221     {                                      \
1222         .name = type_name,                 \
1223         .parent = TYPE_RISCV_CPU,          \
1224         .instance_init = initfn            \
1225     }
1226 
1227 static const TypeInfo riscv_cpu_type_infos[] = {
1228     {
1229         .name = TYPE_RISCV_CPU,
1230         .parent = TYPE_CPU,
1231         .instance_size = sizeof(RISCVCPU),
1232         .instance_align = __alignof__(RISCVCPU),
1233         .instance_init = riscv_cpu_init,
1234         .abstract = true,
1235         .class_size = sizeof(RISCVCPUClass),
1236         .class_init = riscv_cpu_class_init,
1237     },
1238     DEFINE_CPU(TYPE_RISCV_CPU_ANY,              riscv_any_cpu_init),
1239 #if defined(CONFIG_KVM)
1240     DEFINE_CPU(TYPE_RISCV_CPU_HOST,             riscv_host_cpu_init),
1241 #endif
1242 #if defined(TARGET_RISCV32)
1243     DEFINE_CPU(TYPE_RISCV_CPU_BASE32,           rv32_base_cpu_init),
1244     DEFINE_CPU(TYPE_RISCV_CPU_IBEX,             rv32_ibex_cpu_init),
1245     DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31,       rv32_sifive_e_cpu_init),
1246     DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34,       rv32_imafcu_nommu_cpu_init),
1247     DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34,       rv32_sifive_u_cpu_init),
1248 #elif defined(TARGET_RISCV64)
1249     DEFINE_CPU(TYPE_RISCV_CPU_BASE64,           rv64_base_cpu_init),
1250     DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51,       rv64_sifive_e_cpu_init),
1251     DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54,       rv64_sifive_u_cpu_init),
1252     DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C,         rv64_sifive_u_cpu_init),
1253     DEFINE_CPU(TYPE_RISCV_CPU_BASE128,          rv128_base_cpu_init),
1254 #endif
1255 };
1256 
1257 DEFINE_TYPES(riscv_cpu_type_infos)
1258