xref: /openbmc/qemu/target/riscv/cpu.c (revision 2317ba9f)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "cpu_vendorid.h"
26 #include "internals.h"
27 #include "exec/exec-all.h"
28 #include "qapi/error.h"
29 #include "qapi/visitor.h"
30 #include "qemu/error-report.h"
31 #include "hw/qdev-properties.h"
32 #include "migration/vmstate.h"
33 #include "fpu/softfloat-helpers.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/tcg.h"
36 #include "kvm/kvm_riscv.h"
37 #include "tcg/tcg-cpu.h"
38 #include "tcg/tcg.h"
39 
40 /* RISC-V CPU definitions */
41 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH";
42 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
43                               RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0};
44 
45 /*
46  * From vector_helper.c
47  * Note that vector data is stored in host-endian 64-bit chunks,
48  * so addressing bytes needs a host-endian fixup.
49  */
50 #if HOST_BIG_ENDIAN
51 #define BYTE(x)   ((x) ^ 7)
52 #else
53 #define BYTE(x)   (x)
54 #endif
55 
56 bool riscv_cpu_is_32bit(RISCVCPU *cpu)
57 {
58     return riscv_cpu_mxl(&cpu->env) == MXL_RV32;
59 }
60 
61 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
62     {#_name, _min_ver, CPU_CFG_OFFSET(_prop)}
63 
64 /*
65  * Here are the ordering rules of extension naming defined by RISC-V
66  * specification :
67  * 1. All extensions should be separated from other multi-letter extensions
68  *    by an underscore.
69  * 2. The first letter following the 'Z' conventionally indicates the most
70  *    closely related alphabetical extension category, IMAFDQLCBKJTPVH.
71  *    If multiple 'Z' extensions are named, they should be ordered first
72  *    by category, then alphabetically within a category.
73  * 3. Standard supervisor-level extensions (starts with 'S') should be
74  *    listed after standard unprivileged extensions.  If multiple
75  *    supervisor-level extensions are listed, they should be ordered
76  *    alphabetically.
77  * 4. Non-standard extensions (starts with 'X') must be listed after all
78  *    standard extensions. They must be separated from other multi-letter
79  *    extensions by an underscore.
80  *
81  * Single letter extensions are checked in riscv_cpu_validate_misa_priv()
82  * instead.
83  */
84 const RISCVIsaExtData isa_edata_arr[] = {
85     ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom),
86     ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop),
87     ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz),
88     ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
89     ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr),
90     ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr),
91     ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei),
92     ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl),
93     ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
94     ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm),
95     ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
96     ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
97     ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
98     ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
99     ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
100     ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh),
101     ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin),
102     ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx),
103     ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx),
104     ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca),
105     ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb),
106     ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf),
107     ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd),
108     ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce),
109     ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp),
110     ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt),
111     ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba),
112     ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb),
113     ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc),
114     ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb),
115     ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc),
116     ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx),
117     ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs),
118     ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk),
119     ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn),
120     ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd),
121     ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne),
122     ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh),
123     ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr),
124     ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks),
125     ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
126     ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
127     ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
128     ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb),
129     ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
130     ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
131     ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
132     ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
133     ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin),
134     ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
135     ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
136     ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
137     ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb),
138     ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg),
139     ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn),
140     ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc),
141     ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
142     ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng),
143     ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
144     ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
145     ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks),
146     ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc),
147     ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed),
148     ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg),
149     ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
150     ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt),
151     ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
152     ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
153     ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
154     ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
155     ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
156     ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
157     ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
158     ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
159     ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
160     ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
161     ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
162     ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt),
163     ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba),
164     ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb),
165     ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs),
166     ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo),
167     ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov),
168     ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx),
169     ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv),
170     ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac),
171     ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx),
172     ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair),
173     ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync),
174     ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
175 
176     DEFINE_PROP_END_OF_LIST(),
177 };
178 
179 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset)
180 {
181     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
182 
183     return *ext_enabled;
184 }
185 
186 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en)
187 {
188     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
189 
190     *ext_enabled = en;
191 }
192 
193 const char * const riscv_int_regnames[] = {
194     "x0/zero", "x1/ra",  "x2/sp",  "x3/gp",  "x4/tp",  "x5/t0",   "x6/t1",
195     "x7/t2",   "x8/s0",  "x9/s1",  "x10/a0", "x11/a1", "x12/a2",  "x13/a3",
196     "x14/a4",  "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3",  "x20/s4",
197     "x21/s5",  "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
198     "x28/t3",  "x29/t4", "x30/t5", "x31/t6"
199 };
200 
201 const char * const riscv_int_regnamesh[] = {
202     "x0h/zeroh", "x1h/rah",  "x2h/sph",   "x3h/gph",   "x4h/tph",  "x5h/t0h",
203     "x6h/t1h",   "x7h/t2h",  "x8h/s0h",   "x9h/s1h",   "x10h/a0h", "x11h/a1h",
204     "x12h/a2h",  "x13h/a3h", "x14h/a4h",  "x15h/a5h",  "x16h/a6h", "x17h/a7h",
205     "x18h/s2h",  "x19h/s3h", "x20h/s4h",  "x21h/s5h",  "x22h/s6h", "x23h/s7h",
206     "x24h/s8h",  "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
207     "x30h/t5h",  "x31h/t6h"
208 };
209 
210 const char * const riscv_fpr_regnames[] = {
211     "f0/ft0",   "f1/ft1",  "f2/ft2",   "f3/ft3",   "f4/ft4",  "f5/ft5",
212     "f6/ft6",   "f7/ft7",  "f8/fs0",   "f9/fs1",   "f10/fa0", "f11/fa1",
213     "f12/fa2",  "f13/fa3", "f14/fa4",  "f15/fa5",  "f16/fa6", "f17/fa7",
214     "f18/fs2",  "f19/fs3", "f20/fs4",  "f21/fs5",  "f22/fs6", "f23/fs7",
215     "f24/fs8",  "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
216     "f30/ft10", "f31/ft11"
217 };
218 
219 const char * const riscv_rvv_regnames[] = {
220   "v0",  "v1",  "v2",  "v3",  "v4",  "v5",  "v6",
221   "v7",  "v8",  "v9",  "v10", "v11", "v12", "v13",
222   "v14", "v15", "v16", "v17", "v18", "v19", "v20",
223   "v21", "v22", "v23", "v24", "v25", "v26", "v27",
224   "v28", "v29", "v30", "v31"
225 };
226 
227 static const char * const riscv_excp_names[] = {
228     "misaligned_fetch",
229     "fault_fetch",
230     "illegal_instruction",
231     "breakpoint",
232     "misaligned_load",
233     "fault_load",
234     "misaligned_store",
235     "fault_store",
236     "user_ecall",
237     "supervisor_ecall",
238     "hypervisor_ecall",
239     "machine_ecall",
240     "exec_page_fault",
241     "load_page_fault",
242     "reserved",
243     "store_page_fault",
244     "reserved",
245     "reserved",
246     "reserved",
247     "reserved",
248     "guest_exec_page_fault",
249     "guest_load_page_fault",
250     "reserved",
251     "guest_store_page_fault",
252 };
253 
254 static const char * const riscv_intr_names[] = {
255     "u_software",
256     "s_software",
257     "vs_software",
258     "m_software",
259     "u_timer",
260     "s_timer",
261     "vs_timer",
262     "m_timer",
263     "u_external",
264     "s_external",
265     "vs_external",
266     "m_external",
267     "reserved",
268     "reserved",
269     "reserved",
270     "reserved"
271 };
272 
273 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
274 {
275     if (async) {
276         return (cause < ARRAY_SIZE(riscv_intr_names)) ?
277                riscv_intr_names[cause] : "(unknown)";
278     } else {
279         return (cause < ARRAY_SIZE(riscv_excp_names)) ?
280                riscv_excp_names[cause] : "(unknown)";
281     }
282 }
283 
284 void riscv_cpu_set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext)
285 {
286     env->misa_mxl_max = env->misa_mxl = mxl;
287     env->misa_ext_mask = env->misa_ext = ext;
288 }
289 
290 #ifndef CONFIG_USER_ONLY
291 static uint8_t satp_mode_from_str(const char *satp_mode_str)
292 {
293     if (!strncmp(satp_mode_str, "mbare", 5)) {
294         return VM_1_10_MBARE;
295     }
296 
297     if (!strncmp(satp_mode_str, "sv32", 4)) {
298         return VM_1_10_SV32;
299     }
300 
301     if (!strncmp(satp_mode_str, "sv39", 4)) {
302         return VM_1_10_SV39;
303     }
304 
305     if (!strncmp(satp_mode_str, "sv48", 4)) {
306         return VM_1_10_SV48;
307     }
308 
309     if (!strncmp(satp_mode_str, "sv57", 4)) {
310         return VM_1_10_SV57;
311     }
312 
313     if (!strncmp(satp_mode_str, "sv64", 4)) {
314         return VM_1_10_SV64;
315     }
316 
317     g_assert_not_reached();
318 }
319 
320 uint8_t satp_mode_max_from_map(uint32_t map)
321 {
322     /*
323      * 'map = 0' will make us return (31 - 32), which C will
324      * happily overflow to UINT_MAX. There's no good result to
325      * return if 'map = 0' (e.g. returning 0 will be ambiguous
326      * with the result for 'map = 1').
327      *
328      * Assert out if map = 0. Callers will have to deal with
329      * it outside of this function.
330      */
331     g_assert(map > 0);
332 
333     /* map here has at least one bit set, so no problem with clz */
334     return 31 - __builtin_clz(map);
335 }
336 
337 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
338 {
339     if (is_32_bit) {
340         switch (satp_mode) {
341         case VM_1_10_SV32:
342             return "sv32";
343         case VM_1_10_MBARE:
344             return "none";
345         }
346     } else {
347         switch (satp_mode) {
348         case VM_1_10_SV64:
349             return "sv64";
350         case VM_1_10_SV57:
351             return "sv57";
352         case VM_1_10_SV48:
353             return "sv48";
354         case VM_1_10_SV39:
355             return "sv39";
356         case VM_1_10_MBARE:
357             return "none";
358         }
359     }
360 
361     g_assert_not_reached();
362 }
363 
364 static void set_satp_mode_max_supported(RISCVCPU *cpu,
365                                         uint8_t satp_mode)
366 {
367     bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
368     const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
369 
370     for (int i = 0; i <= satp_mode; ++i) {
371         if (valid_vm[i]) {
372             cpu->cfg.satp_mode.supported |= (1 << i);
373         }
374     }
375 }
376 
377 /* Set the satp mode to the max supported */
378 static void set_satp_mode_default_map(RISCVCPU *cpu)
379 {
380     /*
381      * Bare CPUs do not default to the max available.
382      * Users must set a valid satp_mode in the command
383      * line.
384      */
385     if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) {
386         warn_report("No satp mode set. Defaulting to 'bare'");
387         cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE);
388         return;
389     }
390 
391     cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
392 }
393 #endif
394 
395 static void riscv_any_cpu_init(Object *obj)
396 {
397     RISCVCPU *cpu = RISCV_CPU(obj);
398     CPURISCVState *env = &cpu->env;
399 #if defined(TARGET_RISCV32)
400     riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
401 #elif defined(TARGET_RISCV64)
402     riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
403 #endif
404 
405 #ifndef CONFIG_USER_ONLY
406     set_satp_mode_max_supported(RISCV_CPU(obj),
407         riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ?
408         VM_1_10_SV32 : VM_1_10_SV57);
409 #endif
410 
411     env->priv_ver = PRIV_VERSION_LATEST;
412 
413     /* inherited from parent obj via riscv_cpu_init() */
414     cpu->cfg.ext_zifencei = true;
415     cpu->cfg.ext_zicsr = true;
416     cpu->cfg.mmu = true;
417     cpu->cfg.pmp = true;
418 }
419 
420 static void riscv_max_cpu_init(Object *obj)
421 {
422     RISCVCPU *cpu = RISCV_CPU(obj);
423     CPURISCVState *env = &cpu->env;
424     RISCVMXL mlx = MXL_RV64;
425 
426 #ifdef TARGET_RISCV32
427     mlx = MXL_RV32;
428 #endif
429     riscv_cpu_set_misa(env, mlx, 0);
430     env->priv_ver = PRIV_VERSION_LATEST;
431 #ifndef CONFIG_USER_ONLY
432     set_satp_mode_max_supported(RISCV_CPU(obj), mlx == MXL_RV32 ?
433                                 VM_1_10_SV32 : VM_1_10_SV57);
434 #endif
435 }
436 
437 #if defined(TARGET_RISCV64)
438 static void rv64_base_cpu_init(Object *obj)
439 {
440     CPURISCVState *env = &RISCV_CPU(obj)->env;
441     /* We set this in the realise function */
442     riscv_cpu_set_misa(env, MXL_RV64, 0);
443     /* Set latest version of privileged specification */
444     env->priv_ver = PRIV_VERSION_LATEST;
445 #ifndef CONFIG_USER_ONLY
446     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
447 #endif
448 }
449 
450 static void rv64_sifive_u_cpu_init(Object *obj)
451 {
452     RISCVCPU *cpu = RISCV_CPU(obj);
453     CPURISCVState *env = &cpu->env;
454     riscv_cpu_set_misa(env, MXL_RV64,
455                        RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
456     env->priv_ver = PRIV_VERSION_1_10_0;
457 #ifndef CONFIG_USER_ONLY
458     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39);
459 #endif
460 
461     /* inherited from parent obj via riscv_cpu_init() */
462     cpu->cfg.ext_zifencei = true;
463     cpu->cfg.ext_zicsr = true;
464     cpu->cfg.mmu = true;
465     cpu->cfg.pmp = true;
466 }
467 
468 static void rv64_sifive_e_cpu_init(Object *obj)
469 {
470     CPURISCVState *env = &RISCV_CPU(obj)->env;
471     RISCVCPU *cpu = RISCV_CPU(obj);
472 
473     riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU);
474     env->priv_ver = PRIV_VERSION_1_10_0;
475 #ifndef CONFIG_USER_ONLY
476     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
477 #endif
478 
479     /* inherited from parent obj via riscv_cpu_init() */
480     cpu->cfg.ext_zifencei = true;
481     cpu->cfg.ext_zicsr = true;
482     cpu->cfg.pmp = true;
483 }
484 
485 static void rv64_thead_c906_cpu_init(Object *obj)
486 {
487     CPURISCVState *env = &RISCV_CPU(obj)->env;
488     RISCVCPU *cpu = RISCV_CPU(obj);
489 
490     riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU);
491     env->priv_ver = PRIV_VERSION_1_11_0;
492 
493     cpu->cfg.ext_zfa = true;
494     cpu->cfg.ext_zfh = true;
495     cpu->cfg.mmu = true;
496     cpu->cfg.ext_xtheadba = true;
497     cpu->cfg.ext_xtheadbb = true;
498     cpu->cfg.ext_xtheadbs = true;
499     cpu->cfg.ext_xtheadcmo = true;
500     cpu->cfg.ext_xtheadcondmov = true;
501     cpu->cfg.ext_xtheadfmemidx = true;
502     cpu->cfg.ext_xtheadmac = true;
503     cpu->cfg.ext_xtheadmemidx = true;
504     cpu->cfg.ext_xtheadmempair = true;
505     cpu->cfg.ext_xtheadsync = true;
506 
507     cpu->cfg.mvendorid = THEAD_VENDOR_ID;
508 #ifndef CONFIG_USER_ONLY
509     set_satp_mode_max_supported(cpu, VM_1_10_SV39);
510 #endif
511 
512     /* inherited from parent obj via riscv_cpu_init() */
513     cpu->cfg.pmp = true;
514 }
515 
516 static void rv64_veyron_v1_cpu_init(Object *obj)
517 {
518     CPURISCVState *env = &RISCV_CPU(obj)->env;
519     RISCVCPU *cpu = RISCV_CPU(obj);
520 
521     riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH);
522     env->priv_ver = PRIV_VERSION_1_12_0;
523 
524     /* Enable ISA extensions */
525     cpu->cfg.mmu = true;
526     cpu->cfg.ext_zifencei = true;
527     cpu->cfg.ext_zicsr = true;
528     cpu->cfg.pmp = true;
529     cpu->cfg.ext_zicbom = true;
530     cpu->cfg.cbom_blocksize = 64;
531     cpu->cfg.cboz_blocksize = 64;
532     cpu->cfg.ext_zicboz = true;
533     cpu->cfg.ext_smaia = true;
534     cpu->cfg.ext_ssaia = true;
535     cpu->cfg.ext_sscofpmf = true;
536     cpu->cfg.ext_sstc = true;
537     cpu->cfg.ext_svinval = true;
538     cpu->cfg.ext_svnapot = true;
539     cpu->cfg.ext_svpbmt = true;
540     cpu->cfg.ext_smstateen = true;
541     cpu->cfg.ext_zba = true;
542     cpu->cfg.ext_zbb = true;
543     cpu->cfg.ext_zbc = true;
544     cpu->cfg.ext_zbs = true;
545     cpu->cfg.ext_XVentanaCondOps = true;
546 
547     cpu->cfg.mvendorid = VEYRON_V1_MVENDORID;
548     cpu->cfg.marchid = VEYRON_V1_MARCHID;
549     cpu->cfg.mimpid = VEYRON_V1_MIMPID;
550 
551 #ifndef CONFIG_USER_ONLY
552     set_satp_mode_max_supported(cpu, VM_1_10_SV48);
553 #endif
554 }
555 
556 static void rv128_base_cpu_init(Object *obj)
557 {
558     if (qemu_tcg_mttcg_enabled()) {
559         /* Missing 128-bit aligned atomics */
560         error_report("128-bit RISC-V currently does not work with Multi "
561                      "Threaded TCG. Please use: -accel tcg,thread=single");
562         exit(EXIT_FAILURE);
563     }
564     CPURISCVState *env = &RISCV_CPU(obj)->env;
565     /* We set this in the realise function */
566     riscv_cpu_set_misa(env, MXL_RV128, 0);
567     /* Set latest version of privileged specification */
568     env->priv_ver = PRIV_VERSION_LATEST;
569 #ifndef CONFIG_USER_ONLY
570     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
571 #endif
572 }
573 
574 static void rv64i_bare_cpu_init(Object *obj)
575 {
576     CPURISCVState *env = &RISCV_CPU(obj)->env;
577     riscv_cpu_set_misa(env, MXL_RV64, RVI);
578 
579     /* Remove the defaults from the parent class */
580     RISCV_CPU(obj)->cfg.ext_zicntr = false;
581     RISCV_CPU(obj)->cfg.ext_zihpm = false;
582 
583     /* Set to QEMU's first supported priv version */
584     env->priv_ver = PRIV_VERSION_1_10_0;
585 
586     /*
587      * Support all available satp_mode settings. The default
588      * value will be set to MBARE if the user doesn't set
589      * satp_mode manually (see set_satp_mode_default()).
590      */
591 #ifndef CONFIG_USER_ONLY
592     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV64);
593 #endif
594 }
595 #else
596 static void rv32_base_cpu_init(Object *obj)
597 {
598     CPURISCVState *env = &RISCV_CPU(obj)->env;
599     /* We set this in the realise function */
600     riscv_cpu_set_misa(env, MXL_RV32, 0);
601     /* Set latest version of privileged specification */
602     env->priv_ver = PRIV_VERSION_LATEST;
603 #ifndef CONFIG_USER_ONLY
604     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
605 #endif
606 }
607 
608 static void rv32_sifive_u_cpu_init(Object *obj)
609 {
610     RISCVCPU *cpu = RISCV_CPU(obj);
611     CPURISCVState *env = &cpu->env;
612     riscv_cpu_set_misa(env, MXL_RV32,
613                        RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
614     env->priv_ver = PRIV_VERSION_1_10_0;
615 #ifndef CONFIG_USER_ONLY
616     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
617 #endif
618 
619     /* inherited from parent obj via riscv_cpu_init() */
620     cpu->cfg.ext_zifencei = true;
621     cpu->cfg.ext_zicsr = true;
622     cpu->cfg.mmu = true;
623     cpu->cfg.pmp = true;
624 }
625 
626 static void rv32_sifive_e_cpu_init(Object *obj)
627 {
628     CPURISCVState *env = &RISCV_CPU(obj)->env;
629     RISCVCPU *cpu = RISCV_CPU(obj);
630 
631     riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU);
632     env->priv_ver = PRIV_VERSION_1_10_0;
633 #ifndef CONFIG_USER_ONLY
634     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
635 #endif
636 
637     /* inherited from parent obj via riscv_cpu_init() */
638     cpu->cfg.ext_zifencei = true;
639     cpu->cfg.ext_zicsr = true;
640     cpu->cfg.pmp = true;
641 }
642 
643 static void rv32_ibex_cpu_init(Object *obj)
644 {
645     CPURISCVState *env = &RISCV_CPU(obj)->env;
646     RISCVCPU *cpu = RISCV_CPU(obj);
647 
648     riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU);
649     env->priv_ver = PRIV_VERSION_1_12_0;
650 #ifndef CONFIG_USER_ONLY
651     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
652 #endif
653     /* inherited from parent obj via riscv_cpu_init() */
654     cpu->cfg.ext_zifencei = true;
655     cpu->cfg.ext_zicsr = true;
656     cpu->cfg.pmp = true;
657     cpu->cfg.ext_smepmp = true;
658 }
659 
660 static void rv32_imafcu_nommu_cpu_init(Object *obj)
661 {
662     CPURISCVState *env = &RISCV_CPU(obj)->env;
663     RISCVCPU *cpu = RISCV_CPU(obj);
664 
665     riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU);
666     env->priv_ver = PRIV_VERSION_1_10_0;
667 #ifndef CONFIG_USER_ONLY
668     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
669 #endif
670 
671     /* inherited from parent obj via riscv_cpu_init() */
672     cpu->cfg.ext_zifencei = true;
673     cpu->cfg.ext_zicsr = true;
674     cpu->cfg.pmp = true;
675 }
676 #endif
677 
678 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
679 {
680     ObjectClass *oc;
681     char *typename;
682     char **cpuname;
683 
684     cpuname = g_strsplit(cpu_model, ",", 1);
685     typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
686     oc = object_class_by_name(typename);
687     g_strfreev(cpuname);
688     g_free(typename);
689 
690     return oc;
691 }
692 
693 char *riscv_cpu_get_name(RISCVCPU *cpu)
694 {
695     RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu);
696     const char *typename = object_class_get_name(OBJECT_CLASS(rcc));
697 
698     g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX));
699 
700     return cpu_model_from_type(typename);
701 }
702 
703 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
704 {
705     RISCVCPU *cpu = RISCV_CPU(cs);
706     CPURISCVState *env = &cpu->env;
707     int i, j;
708     uint8_t *p;
709 
710 #if !defined(CONFIG_USER_ONLY)
711     if (riscv_has_ext(env, RVH)) {
712         qemu_fprintf(f, " %s %d\n", "V      =  ", env->virt_enabled);
713     }
714 #endif
715     qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc      ", env->pc);
716 #ifndef CONFIG_USER_ONLY
717     {
718         static const int dump_csrs[] = {
719             CSR_MHARTID,
720             CSR_MSTATUS,
721             CSR_MSTATUSH,
722             /*
723              * CSR_SSTATUS is intentionally omitted here as its value
724              * can be figured out by looking at CSR_MSTATUS
725              */
726             CSR_HSTATUS,
727             CSR_VSSTATUS,
728             CSR_MIP,
729             CSR_MIE,
730             CSR_MIDELEG,
731             CSR_HIDELEG,
732             CSR_MEDELEG,
733             CSR_HEDELEG,
734             CSR_MTVEC,
735             CSR_STVEC,
736             CSR_VSTVEC,
737             CSR_MEPC,
738             CSR_SEPC,
739             CSR_VSEPC,
740             CSR_MCAUSE,
741             CSR_SCAUSE,
742             CSR_VSCAUSE,
743             CSR_MTVAL,
744             CSR_STVAL,
745             CSR_HTVAL,
746             CSR_MTVAL2,
747             CSR_MSCRATCH,
748             CSR_SSCRATCH,
749             CSR_SATP,
750             CSR_MMTE,
751             CSR_UPMBASE,
752             CSR_UPMMASK,
753             CSR_SPMBASE,
754             CSR_SPMMASK,
755             CSR_MPMBASE,
756             CSR_MPMMASK,
757         };
758 
759         for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
760             int csrno = dump_csrs[i];
761             target_ulong val = 0;
762             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
763 
764             /*
765              * Rely on the smode, hmode, etc, predicates within csr.c
766              * to do the filtering of the registers that are present.
767              */
768             if (res == RISCV_EXCP_NONE) {
769                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
770                              csr_ops[csrno].name, val);
771             }
772         }
773     }
774 #endif
775 
776     for (i = 0; i < 32; i++) {
777         qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
778                      riscv_int_regnames[i], env->gpr[i]);
779         if ((i & 3) == 3) {
780             qemu_fprintf(f, "\n");
781         }
782     }
783     if (flags & CPU_DUMP_FPU) {
784         for (i = 0; i < 32; i++) {
785             qemu_fprintf(f, " %-8s %016" PRIx64,
786                          riscv_fpr_regnames[i], env->fpr[i]);
787             if ((i & 3) == 3) {
788                 qemu_fprintf(f, "\n");
789             }
790         }
791     }
792     if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) {
793         static const int dump_rvv_csrs[] = {
794                     CSR_VSTART,
795                     CSR_VXSAT,
796                     CSR_VXRM,
797                     CSR_VCSR,
798                     CSR_VL,
799                     CSR_VTYPE,
800                     CSR_VLENB,
801                 };
802         for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
803             int csrno = dump_rvv_csrs[i];
804             target_ulong val = 0;
805             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
806 
807             /*
808              * Rely on the smode, hmode, etc, predicates within csr.c
809              * to do the filtering of the registers that are present.
810              */
811             if (res == RISCV_EXCP_NONE) {
812                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
813                              csr_ops[csrno].name, val);
814             }
815         }
816         uint16_t vlenb = cpu->cfg.vlen >> 3;
817 
818         for (i = 0; i < 32; i++) {
819             qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]);
820             p = (uint8_t *)env->vreg;
821             for (j = vlenb - 1 ; j >= 0; j--) {
822                 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j)));
823             }
824             qemu_fprintf(f, "\n");
825         }
826     }
827 }
828 
829 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
830 {
831     RISCVCPU *cpu = RISCV_CPU(cs);
832     CPURISCVState *env = &cpu->env;
833 
834     if (env->xl == MXL_RV32) {
835         env->pc = (int32_t)value;
836     } else {
837         env->pc = value;
838     }
839 }
840 
841 static vaddr riscv_cpu_get_pc(CPUState *cs)
842 {
843     RISCVCPU *cpu = RISCV_CPU(cs);
844     CPURISCVState *env = &cpu->env;
845 
846     /* Match cpu_get_tb_cpu_state. */
847     if (env->xl == MXL_RV32) {
848         return env->pc & UINT32_MAX;
849     }
850     return env->pc;
851 }
852 
853 static bool riscv_cpu_has_work(CPUState *cs)
854 {
855 #ifndef CONFIG_USER_ONLY
856     RISCVCPU *cpu = RISCV_CPU(cs);
857     CPURISCVState *env = &cpu->env;
858     /*
859      * Definition of the WFI instruction requires it to ignore the privilege
860      * mode and delegation registers, but respect individual enables
861      */
862     return riscv_cpu_all_pending(env) != 0 ||
863         riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE ||
864         riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE;
865 #else
866     return true;
867 #endif
868 }
869 
870 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch)
871 {
872     return riscv_env_mmu_index(cpu_env(cs), ifetch);
873 }
874 
875 static void riscv_cpu_reset_hold(Object *obj)
876 {
877 #ifndef CONFIG_USER_ONLY
878     uint8_t iprio;
879     int i, irq, rdzero;
880 #endif
881     CPUState *cs = CPU(obj);
882     RISCVCPU *cpu = RISCV_CPU(cs);
883     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
884     CPURISCVState *env = &cpu->env;
885 
886     if (mcc->parent_phases.hold) {
887         mcc->parent_phases.hold(obj);
888     }
889 #ifndef CONFIG_USER_ONLY
890     env->misa_mxl = env->misa_mxl_max;
891     env->priv = PRV_M;
892     env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
893     if (env->misa_mxl > MXL_RV32) {
894         /*
895          * The reset status of SXL/UXL is undefined, but mstatus is WARL
896          * and we must ensure that the value after init is valid for read.
897          */
898         env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
899         env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
900         if (riscv_has_ext(env, RVH)) {
901             env->vsstatus = set_field(env->vsstatus,
902                                       MSTATUS64_SXL, env->misa_mxl);
903             env->vsstatus = set_field(env->vsstatus,
904                                       MSTATUS64_UXL, env->misa_mxl);
905             env->mstatus_hs = set_field(env->mstatus_hs,
906                                         MSTATUS64_SXL, env->misa_mxl);
907             env->mstatus_hs = set_field(env->mstatus_hs,
908                                         MSTATUS64_UXL, env->misa_mxl);
909         }
910     }
911     env->mcause = 0;
912     env->miclaim = MIP_SGEIP;
913     env->pc = env->resetvec;
914     env->bins = 0;
915     env->two_stage_lookup = false;
916 
917     env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
918                    (cpu->cfg.ext_svadu ? MENVCFG_ADUE : 0);
919     env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) |
920                    (cpu->cfg.ext_svadu ? HENVCFG_ADUE : 0);
921 
922     /* Initialized default priorities of local interrupts. */
923     for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
924         iprio = riscv_cpu_default_priority(i);
925         env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
926         env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
927         env->hviprio[i] = 0;
928     }
929     i = 0;
930     while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
931         if (!rdzero) {
932             env->hviprio[irq] = env->miprio[irq];
933         }
934         i++;
935     }
936     /* mmte is supposed to have pm.current hardwired to 1 */
937     env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
938 
939     /*
940      * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor
941      * extension is enabled.
942      */
943     if (riscv_has_ext(env, RVH)) {
944         env->mideleg |= HS_MODE_INTERRUPTS;
945     }
946 
947     /*
948      * Clear mseccfg and unlock all the PMP entries upon reset.
949      * This is allowed as per the priv and smepmp specifications
950      * and is needed to clear stale entries across reboots.
951      */
952     if (riscv_cpu_cfg(env)->ext_smepmp) {
953         env->mseccfg = 0;
954     }
955 
956     pmp_unlock_entries(env);
957 #endif
958     env->xl = riscv_cpu_mxl(env);
959     riscv_cpu_update_mask(env);
960     cs->exception_index = RISCV_EXCP_NONE;
961     env->load_res = -1;
962     set_default_nan_mode(1, &env->fp_status);
963 
964 #ifndef CONFIG_USER_ONLY
965     if (cpu->cfg.debug) {
966         riscv_trigger_reset_hold(env);
967     }
968 
969     if (kvm_enabled()) {
970         kvm_riscv_reset_vcpu(cpu);
971     }
972 #endif
973 }
974 
975 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
976 {
977     RISCVCPU *cpu = RISCV_CPU(s);
978     CPURISCVState *env = &cpu->env;
979     info->target_info = &cpu->cfg;
980 
981     switch (env->xl) {
982     case MXL_RV32:
983         info->print_insn = print_insn_riscv32;
984         break;
985     case MXL_RV64:
986         info->print_insn = print_insn_riscv64;
987         break;
988     case MXL_RV128:
989         info->print_insn = print_insn_riscv128;
990         break;
991     default:
992         g_assert_not_reached();
993     }
994 }
995 
996 #ifndef CONFIG_USER_ONLY
997 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
998 {
999     bool rv32 = riscv_cpu_is_32bit(cpu);
1000     uint8_t satp_mode_map_max, satp_mode_supported_max;
1001 
1002     /* The CPU wants the OS to decide which satp mode to use */
1003     if (cpu->cfg.satp_mode.supported == 0) {
1004         return;
1005     }
1006 
1007     satp_mode_supported_max =
1008                     satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
1009 
1010     if (cpu->cfg.satp_mode.map == 0) {
1011         if (cpu->cfg.satp_mode.init == 0) {
1012             /* If unset by the user, we fallback to the default satp mode. */
1013             set_satp_mode_default_map(cpu);
1014         } else {
1015             /*
1016              * Find the lowest level that was disabled and then enable the
1017              * first valid level below which can be found in
1018              * valid_vm_1_10_32/64.
1019              */
1020             for (int i = 1; i < 16; ++i) {
1021                 if ((cpu->cfg.satp_mode.init & (1 << i)) &&
1022                     (cpu->cfg.satp_mode.supported & (1 << i))) {
1023                     for (int j = i - 1; j >= 0; --j) {
1024                         if (cpu->cfg.satp_mode.supported & (1 << j)) {
1025                             cpu->cfg.satp_mode.map |= (1 << j);
1026                             break;
1027                         }
1028                     }
1029                     break;
1030                 }
1031             }
1032         }
1033     }
1034 
1035     satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
1036 
1037     /* Make sure the user asked for a supported configuration (HW and qemu) */
1038     if (satp_mode_map_max > satp_mode_supported_max) {
1039         error_setg(errp, "satp_mode %s is higher than hw max capability %s",
1040                    satp_mode_str(satp_mode_map_max, rv32),
1041                    satp_mode_str(satp_mode_supported_max, rv32));
1042         return;
1043     }
1044 
1045     /*
1046      * Make sure the user did not ask for an invalid configuration as per
1047      * the specification.
1048      */
1049     if (!rv32) {
1050         for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1051             if (!(cpu->cfg.satp_mode.map & (1 << i)) &&
1052                 (cpu->cfg.satp_mode.init & (1 << i)) &&
1053                 (cpu->cfg.satp_mode.supported & (1 << i))) {
1054                 error_setg(errp, "cannot disable %s satp mode if %s "
1055                            "is enabled", satp_mode_str(i, false),
1056                            satp_mode_str(satp_mode_map_max, false));
1057                 return;
1058             }
1059         }
1060     }
1061 
1062     /* Finally expand the map so that all valid modes are set */
1063     for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1064         if (cpu->cfg.satp_mode.supported & (1 << i)) {
1065             cpu->cfg.satp_mode.map |= (1 << i);
1066         }
1067     }
1068 }
1069 #endif
1070 
1071 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1072 {
1073     Error *local_err = NULL;
1074 
1075 #ifndef CONFIG_USER_ONLY
1076     riscv_cpu_satp_mode_finalize(cpu, &local_err);
1077     if (local_err != NULL) {
1078         error_propagate(errp, local_err);
1079         return;
1080     }
1081 #endif
1082 
1083     /*
1084      * KVM accel does not have a specialized finalize()
1085      * callback because its extensions are validated
1086      * in the get()/set() callbacks of each property.
1087      */
1088     if (tcg_enabled()) {
1089         riscv_tcg_cpu_finalize_features(cpu, &local_err);
1090         if (local_err != NULL) {
1091             error_propagate(errp, local_err);
1092             return;
1093         }
1094     }
1095 }
1096 
1097 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
1098 {
1099     CPUState *cs = CPU(dev);
1100     RISCVCPU *cpu = RISCV_CPU(dev);
1101     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
1102     Error *local_err = NULL;
1103 
1104     if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) {
1105         warn_report("The 'any' CPU is deprecated and will be "
1106                     "removed in the future.");
1107     }
1108 
1109     cpu_exec_realizefn(cs, &local_err);
1110     if (local_err != NULL) {
1111         error_propagate(errp, local_err);
1112         return;
1113     }
1114 
1115     riscv_cpu_finalize_features(cpu, &local_err);
1116     if (local_err != NULL) {
1117         error_propagate(errp, local_err);
1118         return;
1119     }
1120 
1121     riscv_cpu_register_gdb_regs_for_features(cs);
1122 
1123 #ifndef CONFIG_USER_ONLY
1124     if (cpu->cfg.debug) {
1125         riscv_trigger_realize(&cpu->env);
1126     }
1127 #endif
1128 
1129     qemu_init_vcpu(cs);
1130     cpu_reset(cs);
1131 
1132     mcc->parent_realize(dev, errp);
1133 }
1134 
1135 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu)
1136 {
1137     if (tcg_enabled()) {
1138         return riscv_cpu_tcg_compatible(cpu);
1139     }
1140 
1141     return true;
1142 }
1143 
1144 #ifndef CONFIG_USER_ONLY
1145 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
1146                                void *opaque, Error **errp)
1147 {
1148     RISCVSATPMap *satp_map = opaque;
1149     uint8_t satp = satp_mode_from_str(name);
1150     bool value;
1151 
1152     value = satp_map->map & (1 << satp);
1153 
1154     visit_type_bool(v, name, &value, errp);
1155 }
1156 
1157 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
1158                                void *opaque, Error **errp)
1159 {
1160     RISCVSATPMap *satp_map = opaque;
1161     uint8_t satp = satp_mode_from_str(name);
1162     bool value;
1163 
1164     if (!visit_type_bool(v, name, &value, errp)) {
1165         return;
1166     }
1167 
1168     satp_map->map = deposit32(satp_map->map, satp, 1, value);
1169     satp_map->init |= 1 << satp;
1170 }
1171 
1172 void riscv_add_satp_mode_properties(Object *obj)
1173 {
1174     RISCVCPU *cpu = RISCV_CPU(obj);
1175 
1176     if (cpu->env.misa_mxl == MXL_RV32) {
1177         object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
1178                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1179     } else {
1180         object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
1181                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1182         object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
1183                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1184         object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
1185                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1186         object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
1187                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1188     }
1189 }
1190 
1191 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
1192 {
1193     RISCVCPU *cpu = RISCV_CPU(opaque);
1194     CPURISCVState *env = &cpu->env;
1195 
1196     if (irq < IRQ_LOCAL_MAX) {
1197         switch (irq) {
1198         case IRQ_U_SOFT:
1199         case IRQ_S_SOFT:
1200         case IRQ_VS_SOFT:
1201         case IRQ_M_SOFT:
1202         case IRQ_U_TIMER:
1203         case IRQ_S_TIMER:
1204         case IRQ_VS_TIMER:
1205         case IRQ_M_TIMER:
1206         case IRQ_U_EXT:
1207         case IRQ_VS_EXT:
1208         case IRQ_M_EXT:
1209             if (kvm_enabled()) {
1210                 kvm_riscv_set_irq(cpu, irq, level);
1211             } else {
1212                 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level));
1213             }
1214              break;
1215         case IRQ_S_EXT:
1216             if (kvm_enabled()) {
1217                 kvm_riscv_set_irq(cpu, irq, level);
1218             } else {
1219                 env->external_seip = level;
1220                 riscv_cpu_update_mip(env, 1 << irq,
1221                                      BOOL_TO_MASK(level | env->software_seip));
1222             }
1223             break;
1224         default:
1225             g_assert_not_reached();
1226         }
1227     } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
1228         /* Require H-extension for handling guest local interrupts */
1229         if (!riscv_has_ext(env, RVH)) {
1230             g_assert_not_reached();
1231         }
1232 
1233         /* Compute bit position in HGEIP CSR */
1234         irq = irq - IRQ_LOCAL_MAX + 1;
1235         if (env->geilen < irq) {
1236             g_assert_not_reached();
1237         }
1238 
1239         /* Update HGEIP CSR */
1240         env->hgeip &= ~((target_ulong)1 << irq);
1241         if (level) {
1242             env->hgeip |= (target_ulong)1 << irq;
1243         }
1244 
1245         /* Update mip.SGEIP bit */
1246         riscv_cpu_update_mip(env, MIP_SGEIP,
1247                              BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
1248     } else {
1249         g_assert_not_reached();
1250     }
1251 }
1252 #endif /* CONFIG_USER_ONLY */
1253 
1254 static bool riscv_cpu_is_dynamic(Object *cpu_obj)
1255 {
1256     return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
1257 }
1258 
1259 static void riscv_cpu_post_init(Object *obj)
1260 {
1261     accel_cpu_instance_init(CPU(obj));
1262 }
1263 
1264 static void riscv_cpu_init(Object *obj)
1265 {
1266 #ifndef CONFIG_USER_ONLY
1267     qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
1268                       IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
1269 #endif /* CONFIG_USER_ONLY */
1270 
1271     /*
1272      * The timer and performance counters extensions were supported
1273      * in QEMU before they were added as discrete extensions in the
1274      * ISA. To keep compatibility we'll always default them to 'true'
1275      * for all CPUs. Each accelerator will decide what to do when
1276      * users disable them.
1277      */
1278     RISCV_CPU(obj)->cfg.ext_zicntr = true;
1279     RISCV_CPU(obj)->cfg.ext_zihpm = true;
1280 }
1281 
1282 typedef struct misa_ext_info {
1283     const char *name;
1284     const char *description;
1285 } MISAExtInfo;
1286 
1287 #define MISA_INFO_IDX(_bit) \
1288     __builtin_ctz(_bit)
1289 
1290 #define MISA_EXT_INFO(_bit, _propname, _descr) \
1291     [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr}
1292 
1293 static const MISAExtInfo misa_ext_info_arr[] = {
1294     MISA_EXT_INFO(RVA, "a", "Atomic instructions"),
1295     MISA_EXT_INFO(RVC, "c", "Compressed instructions"),
1296     MISA_EXT_INFO(RVD, "d", "Double-precision float point"),
1297     MISA_EXT_INFO(RVF, "f", "Single-precision float point"),
1298     MISA_EXT_INFO(RVI, "i", "Base integer instruction set"),
1299     MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"),
1300     MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"),
1301     MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
1302     MISA_EXT_INFO(RVU, "u", "User-level instructions"),
1303     MISA_EXT_INFO(RVH, "h", "Hypervisor"),
1304     MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
1305     MISA_EXT_INFO(RVV, "v", "Vector operations"),
1306     MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
1307     MISA_EXT_INFO(RVB, "x-b", "Bit manipulation (Zba_Zbb_Zbs)")
1308 };
1309 
1310 static int riscv_validate_misa_info_idx(uint32_t bit)
1311 {
1312     int idx;
1313 
1314     /*
1315      * Our lowest valid input (RVA) is 1 and
1316      * __builtin_ctz() is UB with zero.
1317      */
1318     g_assert(bit != 0);
1319     idx = MISA_INFO_IDX(bit);
1320 
1321     g_assert(idx < ARRAY_SIZE(misa_ext_info_arr));
1322     return idx;
1323 }
1324 
1325 const char *riscv_get_misa_ext_name(uint32_t bit)
1326 {
1327     int idx = riscv_validate_misa_info_idx(bit);
1328     const char *val = misa_ext_info_arr[idx].name;
1329 
1330     g_assert(val != NULL);
1331     return val;
1332 }
1333 
1334 const char *riscv_get_misa_ext_description(uint32_t bit)
1335 {
1336     int idx = riscv_validate_misa_info_idx(bit);
1337     const char *val = misa_ext_info_arr[idx].description;
1338 
1339     g_assert(val != NULL);
1340     return val;
1341 }
1342 
1343 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \
1344     {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
1345      .enabled = _defval}
1346 
1347 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
1348     /* Defaults for standard extensions */
1349     MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
1350     MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
1351     MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
1352     MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
1353     MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
1354     MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false),
1355     MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
1356     MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true),
1357     MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false),
1358     MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false),
1359     MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false),
1360     MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false),
1361     MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false),
1362     MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
1363 
1364     MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
1365     MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
1366     MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
1367     MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
1368     MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false),
1369     MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false),
1370 
1371     MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true),
1372     MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true),
1373 
1374     MULTI_EXT_CFG_BOOL("zba", ext_zba, true),
1375     MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true),
1376     MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true),
1377     MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false),
1378     MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false),
1379     MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false),
1380     MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true),
1381     MULTI_EXT_CFG_BOOL("zk", ext_zk, false),
1382     MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false),
1383     MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false),
1384     MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false),
1385     MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false),
1386     MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false),
1387     MULTI_EXT_CFG_BOOL("zks", ext_zks, false),
1388     MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false),
1389     MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false),
1390     MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false),
1391 
1392     MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false),
1393     MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false),
1394     MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false),
1395     MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false),
1396 
1397     MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true),
1398     MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true),
1399     MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true),
1400 
1401     MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false),
1402 
1403     MULTI_EXT_CFG_BOOL("zca", ext_zca, false),
1404     MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false),
1405     MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false),
1406     MULTI_EXT_CFG_BOOL("zce", ext_zce, false),
1407     MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false),
1408     MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false),
1409     MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false),
1410     MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false),
1411 
1412     /* Vector cryptography extensions */
1413     MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false),
1414     MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false),
1415     MULTI_EXT_CFG_BOOL("zvkb", ext_zvkg, false),
1416     MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false),
1417     MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false),
1418     MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false),
1419     MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false),
1420     MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false),
1421     MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false),
1422     MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false),
1423     MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false),
1424     MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false),
1425     MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false),
1426     MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false),
1427     MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false),
1428     MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false),
1429 
1430     DEFINE_PROP_END_OF_LIST(),
1431 };
1432 
1433 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = {
1434     MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false),
1435     MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false),
1436     MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false),
1437     MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false),
1438     MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false),
1439     MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false),
1440     MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false),
1441     MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false),
1442     MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false),
1443     MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false),
1444     MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false),
1445     MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false),
1446 
1447     DEFINE_PROP_END_OF_LIST(),
1448 };
1449 
1450 /* These are experimental so mark with 'x-' */
1451 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
1452     MULTI_EXT_CFG_BOOL("x-smaia", ext_smaia, false),
1453     MULTI_EXT_CFG_BOOL("x-ssaia", ext_ssaia, false),
1454 
1455     MULTI_EXT_CFG_BOOL("x-zvfh", ext_zvfh, false),
1456     MULTI_EXT_CFG_BOOL("x-zvfhmin", ext_zvfhmin, false),
1457 
1458     MULTI_EXT_CFG_BOOL("x-zfbfmin", ext_zfbfmin, false),
1459     MULTI_EXT_CFG_BOOL("x-zvfbfmin", ext_zvfbfmin, false),
1460     MULTI_EXT_CFG_BOOL("x-zvfbfwma", ext_zvfbfwma, false),
1461 
1462     DEFINE_PROP_END_OF_LIST(),
1463 };
1464 
1465 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
1466     MULTI_EXT_CFG_BOOL("svade", svade, true),
1467     MULTI_EXT_CFG_BOOL("zic64b", zic64b, true),
1468 
1469     DEFINE_PROP_END_OF_LIST(),
1470 };
1471 
1472 /* Deprecated entries marked for future removal */
1473 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = {
1474     MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true),
1475     MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true),
1476     MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true),
1477     MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true),
1478     MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true),
1479     MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true),
1480     MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false),
1481     MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false),
1482     MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false),
1483     MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false),
1484     MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false),
1485 
1486     DEFINE_PROP_END_OF_LIST(),
1487 };
1488 
1489 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name,
1490                              void *opaque, Error **errp)
1491 {
1492     RISCVCPU *cpu = RISCV_CPU(obj);
1493     uint8_t pmu_num;
1494 
1495     visit_type_uint8(v, name, &pmu_num, errp);
1496 
1497     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1498         error_setg(errp, "Number of counters exceeds maximum available");
1499         return;
1500     }
1501 
1502     if (pmu_num == 0) {
1503         cpu->cfg.pmu_mask = 0;
1504     } else {
1505         cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, pmu_num);
1506     }
1507 
1508     warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\"");
1509 }
1510 
1511 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name,
1512                              void *opaque, Error **errp)
1513 {
1514     RISCVCPU *cpu = RISCV_CPU(obj);
1515     uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask);
1516 
1517     visit_type_uint8(v, name, &pmu_num, errp);
1518 }
1519 
1520 const PropertyInfo prop_pmu_num = {
1521     .name = "pmu-num",
1522     .get = prop_pmu_num_get,
1523     .set = prop_pmu_num_set,
1524 };
1525 
1526 Property riscv_cpu_options[] = {
1527     DEFINE_PROP_UINT32("pmu-mask", RISCVCPU, cfg.pmu_mask, MAKE_64BIT_MASK(3, 16)),
1528     {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */
1529 
1530     DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true),
1531     DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true),
1532 
1533     DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec),
1534     DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec),
1535 
1536     DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128),
1537     DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64),
1538 
1539     DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64),
1540     DEFINE_PROP_UINT16("cbop_blocksize", RISCVCPU, cfg.cbop_blocksize, 64),
1541     DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64),
1542 
1543     DEFINE_PROP_END_OF_LIST(),
1544 };
1545 
1546 /*
1547  * RVA22U64 defines some 'named features' or 'synthetic extensions'
1548  * that are cache related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa
1549  * and Zicclsm. We do not implement caching in QEMU so we'll consider
1550  * all these named features as always enabled.
1551  *
1552  * There's no riscv,isa update for them (nor for zic64b, despite it
1553  * having a cfg offset) at this moment.
1554  */
1555 static RISCVCPUProfile RVA22U64 = {
1556     .parent = NULL,
1557     .name = "rva22u64",
1558     .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU,
1559     .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
1560     .satp_mode = RISCV_PROFILE_ATTR_UNUSED,
1561     .ext_offsets = {
1562         CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause),
1563         CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb),
1564         CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin),
1565         CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr),
1566         CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom),
1567         CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz),
1568 
1569         /* mandatory named features for this profile */
1570         CPU_CFG_OFFSET(zic64b),
1571 
1572         RISCV_PROFILE_EXT_LIST_END
1573     }
1574 };
1575 
1576 /*
1577  * As with RVA22U64, RVA22S64 also defines 'named features'.
1578  *
1579  * Cache related features that we consider enabled since we don't
1580  * implement cache: Ssccptr
1581  *
1582  * Other named features that we already implement: Sstvecd, Sstvala,
1583  * Sscounterenw
1584  *
1585  * Named features that we need to enable: svade
1586  *
1587  * The remaining features/extensions comes from RVA22U64.
1588  */
1589 static RISCVCPUProfile RVA22S64 = {
1590     .parent = &RVA22U64,
1591     .name = "rva22s64",
1592     .misa_ext = RVS,
1593     .priv_spec = PRIV_VERSION_1_12_0,
1594     .satp_mode = VM_1_10_SV39,
1595     .ext_offsets = {
1596         /* rva22s64 exts */
1597         CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt),
1598         CPU_CFG_OFFSET(ext_svinval),
1599 
1600         /* rva22s64 named features */
1601         CPU_CFG_OFFSET(svade),
1602 
1603         RISCV_PROFILE_EXT_LIST_END
1604     }
1605 };
1606 
1607 RISCVCPUProfile *riscv_profiles[] = {
1608     &RVA22U64,
1609     &RVA22S64,
1610     NULL,
1611 };
1612 
1613 static Property riscv_cpu_properties[] = {
1614     DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
1615 
1616 #ifndef CONFIG_USER_ONLY
1617     DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
1618 #endif
1619 
1620     DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
1621 
1622     DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
1623     DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
1624 
1625     /*
1626      * write_misa() is marked as experimental for now so mark
1627      * it with -x and default to 'false'.
1628      */
1629     DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
1630     DEFINE_PROP_END_OF_LIST(),
1631 };
1632 
1633 #if defined(TARGET_RISCV64)
1634 static void rva22u64_profile_cpu_init(Object *obj)
1635 {
1636     rv64i_bare_cpu_init(obj);
1637 
1638     RVA22U64.enabled = true;
1639 }
1640 
1641 static void rva22s64_profile_cpu_init(Object *obj)
1642 {
1643     rv64i_bare_cpu_init(obj);
1644 
1645     RVA22S64.enabled = true;
1646 }
1647 #endif
1648 
1649 static const gchar *riscv_gdb_arch_name(CPUState *cs)
1650 {
1651     RISCVCPU *cpu = RISCV_CPU(cs);
1652     CPURISCVState *env = &cpu->env;
1653 
1654     switch (riscv_cpu_mxl(env)) {
1655     case MXL_RV32:
1656         return "riscv:rv32";
1657     case MXL_RV64:
1658     case MXL_RV128:
1659         return "riscv:rv64";
1660     default:
1661         g_assert_not_reached();
1662     }
1663 }
1664 
1665 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
1666 {
1667     RISCVCPU *cpu = RISCV_CPU(cs);
1668 
1669     if (strcmp(xmlname, "riscv-csr.xml") == 0) {
1670         return cpu->dyn_csr_xml;
1671     } else if (strcmp(xmlname, "riscv-vector.xml") == 0) {
1672         return cpu->dyn_vreg_xml;
1673     }
1674 
1675     return NULL;
1676 }
1677 
1678 #ifndef CONFIG_USER_ONLY
1679 static int64_t riscv_get_arch_id(CPUState *cs)
1680 {
1681     RISCVCPU *cpu = RISCV_CPU(cs);
1682 
1683     return cpu->env.mhartid;
1684 }
1685 
1686 #include "hw/core/sysemu-cpu-ops.h"
1687 
1688 static const struct SysemuCPUOps riscv_sysemu_ops = {
1689     .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
1690     .write_elf64_note = riscv_cpu_write_elf64_note,
1691     .write_elf32_note = riscv_cpu_write_elf32_note,
1692     .legacy_vmsd = &vmstate_riscv_cpu,
1693 };
1694 #endif
1695 
1696 static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name,
1697                               void *opaque, Error **errp)
1698 {
1699     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
1700     RISCVCPU *cpu = RISCV_CPU(obj);
1701     uint32_t prev_val = cpu->cfg.mvendorid;
1702     uint32_t value;
1703 
1704     if (!visit_type_uint32(v, name, &value, errp)) {
1705         return;
1706     }
1707 
1708     if (!dynamic_cpu && prev_val != value) {
1709         error_setg(errp, "Unable to change %s mvendorid (0x%x)",
1710                    object_get_typename(obj), prev_val);
1711         return;
1712     }
1713 
1714     cpu->cfg.mvendorid = value;
1715 }
1716 
1717 static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name,
1718                               void *opaque, Error **errp)
1719 {
1720     uint32_t value = RISCV_CPU(obj)->cfg.mvendorid;
1721 
1722     visit_type_uint32(v, name, &value, errp);
1723 }
1724 
1725 static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name,
1726                            void *opaque, Error **errp)
1727 {
1728     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
1729     RISCVCPU *cpu = RISCV_CPU(obj);
1730     uint64_t prev_val = cpu->cfg.mimpid;
1731     uint64_t value;
1732 
1733     if (!visit_type_uint64(v, name, &value, errp)) {
1734         return;
1735     }
1736 
1737     if (!dynamic_cpu && prev_val != value) {
1738         error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")",
1739                    object_get_typename(obj), prev_val);
1740         return;
1741     }
1742 
1743     cpu->cfg.mimpid = value;
1744 }
1745 
1746 static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name,
1747                            void *opaque, Error **errp)
1748 {
1749     uint64_t value = RISCV_CPU(obj)->cfg.mimpid;
1750 
1751     visit_type_uint64(v, name, &value, errp);
1752 }
1753 
1754 static void cpu_set_marchid(Object *obj, Visitor *v, const char *name,
1755                             void *opaque, Error **errp)
1756 {
1757     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
1758     RISCVCPU *cpu = RISCV_CPU(obj);
1759     uint64_t prev_val = cpu->cfg.marchid;
1760     uint64_t value, invalid_val;
1761     uint32_t mxlen = 0;
1762 
1763     if (!visit_type_uint64(v, name, &value, errp)) {
1764         return;
1765     }
1766 
1767     if (!dynamic_cpu && prev_val != value) {
1768         error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")",
1769                    object_get_typename(obj), prev_val);
1770         return;
1771     }
1772 
1773     switch (riscv_cpu_mxl(&cpu->env)) {
1774     case MXL_RV32:
1775         mxlen = 32;
1776         break;
1777     case MXL_RV64:
1778     case MXL_RV128:
1779         mxlen = 64;
1780         break;
1781     default:
1782         g_assert_not_reached();
1783     }
1784 
1785     invalid_val = 1LL << (mxlen - 1);
1786 
1787     if (value == invalid_val) {
1788         error_setg(errp, "Unable to set marchid with MSB (%u) bit set "
1789                          "and the remaining bits zero", mxlen);
1790         return;
1791     }
1792 
1793     cpu->cfg.marchid = value;
1794 }
1795 
1796 static void cpu_get_marchid(Object *obj, Visitor *v, const char *name,
1797                            void *opaque, Error **errp)
1798 {
1799     uint64_t value = RISCV_CPU(obj)->cfg.marchid;
1800 
1801     visit_type_uint64(v, name, &value, errp);
1802 }
1803 
1804 static void riscv_cpu_class_init(ObjectClass *c, void *data)
1805 {
1806     RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
1807     CPUClass *cc = CPU_CLASS(c);
1808     DeviceClass *dc = DEVICE_CLASS(c);
1809     ResettableClass *rc = RESETTABLE_CLASS(c);
1810 
1811     device_class_set_parent_realize(dc, riscv_cpu_realize,
1812                                     &mcc->parent_realize);
1813 
1814     resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL,
1815                                        &mcc->parent_phases);
1816 
1817     cc->class_by_name = riscv_cpu_class_by_name;
1818     cc->has_work = riscv_cpu_has_work;
1819     cc->mmu_index = riscv_cpu_mmu_index;
1820     cc->dump_state = riscv_cpu_dump_state;
1821     cc->set_pc = riscv_cpu_set_pc;
1822     cc->get_pc = riscv_cpu_get_pc;
1823     cc->gdb_read_register = riscv_cpu_gdb_read_register;
1824     cc->gdb_write_register = riscv_cpu_gdb_write_register;
1825     cc->gdb_num_core_regs = 33;
1826     cc->gdb_stop_before_watchpoint = true;
1827     cc->disas_set_info = riscv_cpu_disas_set_info;
1828 #ifndef CONFIG_USER_ONLY
1829     cc->sysemu_ops = &riscv_sysemu_ops;
1830     cc->get_arch_id = riscv_get_arch_id;
1831 #endif
1832     cc->gdb_arch_name = riscv_gdb_arch_name;
1833     cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml;
1834 
1835     object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid,
1836                               cpu_set_mvendorid, NULL, NULL);
1837 
1838     object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid,
1839                               cpu_set_mimpid, NULL, NULL);
1840 
1841     object_class_property_add(c, "marchid", "uint64", cpu_get_marchid,
1842                               cpu_set_marchid, NULL, NULL);
1843 
1844     device_class_set_props(dc, riscv_cpu_properties);
1845 }
1846 
1847 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
1848                                  int max_str_len)
1849 {
1850     const RISCVIsaExtData *edata;
1851     char *old = *isa_str;
1852     char *new = *isa_str;
1853 
1854     for (edata = isa_edata_arr; edata && edata->name; edata++) {
1855         if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
1856             new = g_strconcat(old, "_", edata->name, NULL);
1857             g_free(old);
1858             old = new;
1859         }
1860     }
1861 
1862     *isa_str = new;
1863 }
1864 
1865 char *riscv_isa_string(RISCVCPU *cpu)
1866 {
1867     int i;
1868     const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
1869     char *isa_str = g_new(char, maxlen);
1870     char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS);
1871     for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
1872         if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
1873             *p++ = qemu_tolower(riscv_single_letter_exts[i]);
1874         }
1875     }
1876     *p = '\0';
1877     if (!cpu->cfg.short_isa_string) {
1878         riscv_isa_string_ext(cpu, &isa_str, maxlen);
1879     }
1880     return isa_str;
1881 }
1882 
1883 #define DEFINE_CPU(type_name, initfn)      \
1884     {                                      \
1885         .name = type_name,                 \
1886         .parent = TYPE_RISCV_CPU,          \
1887         .instance_init = initfn            \
1888     }
1889 
1890 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \
1891     {                                         \
1892         .name = type_name,                    \
1893         .parent = TYPE_RISCV_DYNAMIC_CPU,     \
1894         .instance_init = initfn               \
1895     }
1896 
1897 #define DEFINE_VENDOR_CPU(type_name, initfn) \
1898     {                                        \
1899         .name = type_name,                   \
1900         .parent = TYPE_RISCV_VENDOR_CPU,     \
1901         .instance_init = initfn              \
1902     }
1903 
1904 #define DEFINE_BARE_CPU(type_name, initfn) \
1905     {                                      \
1906         .name = type_name,                 \
1907         .parent = TYPE_RISCV_BARE_CPU,     \
1908         .instance_init = initfn            \
1909     }
1910 
1911 #define DEFINE_PROFILE_CPU(type_name, initfn) \
1912     {                                         \
1913         .name = type_name,                    \
1914         .parent = TYPE_RISCV_BARE_CPU,        \
1915         .instance_init = initfn               \
1916     }
1917 
1918 static const TypeInfo riscv_cpu_type_infos[] = {
1919     {
1920         .name = TYPE_RISCV_CPU,
1921         .parent = TYPE_CPU,
1922         .instance_size = sizeof(RISCVCPU),
1923         .instance_align = __alignof(RISCVCPU),
1924         .instance_init = riscv_cpu_init,
1925         .instance_post_init = riscv_cpu_post_init,
1926         .abstract = true,
1927         .class_size = sizeof(RISCVCPUClass),
1928         .class_init = riscv_cpu_class_init,
1929     },
1930     {
1931         .name = TYPE_RISCV_DYNAMIC_CPU,
1932         .parent = TYPE_RISCV_CPU,
1933         .abstract = true,
1934     },
1935     {
1936         .name = TYPE_RISCV_VENDOR_CPU,
1937         .parent = TYPE_RISCV_CPU,
1938         .abstract = true,
1939     },
1940     {
1941         .name = TYPE_RISCV_BARE_CPU,
1942         .parent = TYPE_RISCV_CPU,
1943         .abstract = true,
1944     },
1945     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY,      riscv_any_cpu_init),
1946     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX,      riscv_max_cpu_init),
1947 #if defined(TARGET_RISCV32)
1948     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32,   rv32_base_cpu_init),
1949     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX,        rv32_ibex_cpu_init),
1950     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31,  rv32_sifive_e_cpu_init),
1951     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34,  rv32_imafcu_nommu_cpu_init),
1952     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34,  rv32_sifive_u_cpu_init),
1953 #elif defined(TARGET_RISCV64)
1954     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64,   rv64_base_cpu_init),
1955     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51,  rv64_sifive_e_cpu_init),
1956     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54,  rv64_sifive_u_cpu_init),
1957     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C,    rv64_sifive_u_cpu_init),
1958     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906,  rv64_thead_c906_cpu_init),
1959     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1,   rv64_veyron_v1_cpu_init),
1960     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128,  rv128_base_cpu_init),
1961     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, rv64i_bare_cpu_init),
1962     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, rva22u64_profile_cpu_init),
1963     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, rva22s64_profile_cpu_init),
1964 #endif
1965 };
1966 
1967 DEFINE_TYPES(riscv_cpu_type_infos)
1968