xref: /openbmc/qemu/target/riscv/cpu.c (revision d167a224)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "cpu_vendorid.h"
26 #include "internals.h"
27 #include "exec/exec-all.h"
28 #include "qapi/error.h"
29 #include "qapi/visitor.h"
30 #include "qemu/error-report.h"
31 #include "hw/qdev-properties.h"
32 #include "migration/vmstate.h"
33 #include "fpu/softfloat-helpers.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/tcg.h"
36 #include "kvm/kvm_riscv.h"
37 #include "tcg/tcg-cpu.h"
38 #include "tcg/tcg.h"
39 
40 /* RISC-V CPU definitions */
41 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH";
42 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
43                               RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0};
44 
45 /*
46  * From vector_helper.c
47  * Note that vector data is stored in host-endian 64-bit chunks,
48  * so addressing bytes needs a host-endian fixup.
49  */
50 #if HOST_BIG_ENDIAN
51 #define BYTE(x)   ((x) ^ 7)
52 #else
53 #define BYTE(x)   (x)
54 #endif
55 
56 bool riscv_cpu_is_32bit(RISCVCPU *cpu)
57 {
58     return riscv_cpu_mxl(&cpu->env) == MXL_RV32;
59 }
60 
61 /* Hash that stores general user set numeric options */
62 static GHashTable *general_user_opts;
63 
64 static void cpu_option_add_user_setting(const char *optname, uint32_t value)
65 {
66     g_hash_table_insert(general_user_opts, (gpointer)optname,
67                         GUINT_TO_POINTER(value));
68 }
69 
70 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
71     {#_name, _min_ver, CPU_CFG_OFFSET(_prop)}
72 
73 /*
74  * Here are the ordering rules of extension naming defined by RISC-V
75  * specification :
76  * 1. All extensions should be separated from other multi-letter extensions
77  *    by an underscore.
78  * 2. The first letter following the 'Z' conventionally indicates the most
79  *    closely related alphabetical extension category, IMAFDQLCBKJTPVH.
80  *    If multiple 'Z' extensions are named, they should be ordered first
81  *    by category, then alphabetically within a category.
82  * 3. Standard supervisor-level extensions (starts with 'S') should be
83  *    listed after standard unprivileged extensions.  If multiple
84  *    supervisor-level extensions are listed, they should be ordered
85  *    alphabetically.
86  * 4. Non-standard extensions (starts with 'X') must be listed after all
87  *    standard extensions. They must be separated from other multi-letter
88  *    extensions by an underscore.
89  *
90  * Single letter extensions are checked in riscv_cpu_validate_misa_priv()
91  * instead.
92  */
93 const RISCVIsaExtData isa_edata_arr[] = {
94     ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom),
95     ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop),
96     ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz),
97     ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
98     ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr),
99     ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr),
100     ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei),
101     ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl),
102     ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
103     ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm),
104     ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
105     ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
106     ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
107     ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
108     ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
109     ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh),
110     ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin),
111     ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx),
112     ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx),
113     ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca),
114     ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb),
115     ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf),
116     ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd),
117     ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce),
118     ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp),
119     ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt),
120     ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba),
121     ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb),
122     ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc),
123     ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb),
124     ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc),
125     ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx),
126     ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs),
127     ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk),
128     ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn),
129     ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd),
130     ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne),
131     ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh),
132     ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr),
133     ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks),
134     ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
135     ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
136     ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
137     ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb),
138     ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
139     ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
140     ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
141     ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
142     ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin),
143     ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
144     ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
145     ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
146     ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb),
147     ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg),
148     ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn),
149     ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc),
150     ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
151     ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng),
152     ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
153     ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
154     ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks),
155     ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc),
156     ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed),
157     ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg),
158     ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
159     ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt),
160     ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
161     ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
162     ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
163     ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
164     ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
165     ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
166     ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
167     ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
168     ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
169     ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
170     ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
171     ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt),
172     ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba),
173     ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb),
174     ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs),
175     ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo),
176     ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov),
177     ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx),
178     ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv),
179     ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac),
180     ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx),
181     ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair),
182     ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync),
183     ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
184 
185     DEFINE_PROP_END_OF_LIST(),
186 };
187 
188 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset)
189 {
190     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
191 
192     return *ext_enabled;
193 }
194 
195 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en)
196 {
197     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
198 
199     *ext_enabled = en;
200 }
201 
202 bool riscv_cpu_is_vendor(Object *cpu_obj)
203 {
204     return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL;
205 }
206 
207 const char * const riscv_int_regnames[] = {
208     "x0/zero", "x1/ra",  "x2/sp",  "x3/gp",  "x4/tp",  "x5/t0",   "x6/t1",
209     "x7/t2",   "x8/s0",  "x9/s1",  "x10/a0", "x11/a1", "x12/a2",  "x13/a3",
210     "x14/a4",  "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3",  "x20/s4",
211     "x21/s5",  "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
212     "x28/t3",  "x29/t4", "x30/t5", "x31/t6"
213 };
214 
215 const char * const riscv_int_regnamesh[] = {
216     "x0h/zeroh", "x1h/rah",  "x2h/sph",   "x3h/gph",   "x4h/tph",  "x5h/t0h",
217     "x6h/t1h",   "x7h/t2h",  "x8h/s0h",   "x9h/s1h",   "x10h/a0h", "x11h/a1h",
218     "x12h/a2h",  "x13h/a3h", "x14h/a4h",  "x15h/a5h",  "x16h/a6h", "x17h/a7h",
219     "x18h/s2h",  "x19h/s3h", "x20h/s4h",  "x21h/s5h",  "x22h/s6h", "x23h/s7h",
220     "x24h/s8h",  "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
221     "x30h/t5h",  "x31h/t6h"
222 };
223 
224 const char * const riscv_fpr_regnames[] = {
225     "f0/ft0",   "f1/ft1",  "f2/ft2",   "f3/ft3",   "f4/ft4",  "f5/ft5",
226     "f6/ft6",   "f7/ft7",  "f8/fs0",   "f9/fs1",   "f10/fa0", "f11/fa1",
227     "f12/fa2",  "f13/fa3", "f14/fa4",  "f15/fa5",  "f16/fa6", "f17/fa7",
228     "f18/fs2",  "f19/fs3", "f20/fs4",  "f21/fs5",  "f22/fs6", "f23/fs7",
229     "f24/fs8",  "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
230     "f30/ft10", "f31/ft11"
231 };
232 
233 const char * const riscv_rvv_regnames[] = {
234   "v0",  "v1",  "v2",  "v3",  "v4",  "v5",  "v6",
235   "v7",  "v8",  "v9",  "v10", "v11", "v12", "v13",
236   "v14", "v15", "v16", "v17", "v18", "v19", "v20",
237   "v21", "v22", "v23", "v24", "v25", "v26", "v27",
238   "v28", "v29", "v30", "v31"
239 };
240 
241 static const char * const riscv_excp_names[] = {
242     "misaligned_fetch",
243     "fault_fetch",
244     "illegal_instruction",
245     "breakpoint",
246     "misaligned_load",
247     "fault_load",
248     "misaligned_store",
249     "fault_store",
250     "user_ecall",
251     "supervisor_ecall",
252     "hypervisor_ecall",
253     "machine_ecall",
254     "exec_page_fault",
255     "load_page_fault",
256     "reserved",
257     "store_page_fault",
258     "reserved",
259     "reserved",
260     "reserved",
261     "reserved",
262     "guest_exec_page_fault",
263     "guest_load_page_fault",
264     "reserved",
265     "guest_store_page_fault",
266 };
267 
268 static const char * const riscv_intr_names[] = {
269     "u_software",
270     "s_software",
271     "vs_software",
272     "m_software",
273     "u_timer",
274     "s_timer",
275     "vs_timer",
276     "m_timer",
277     "u_external",
278     "s_external",
279     "vs_external",
280     "m_external",
281     "reserved",
282     "reserved",
283     "reserved",
284     "reserved"
285 };
286 
287 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
288 {
289     if (async) {
290         return (cause < ARRAY_SIZE(riscv_intr_names)) ?
291                riscv_intr_names[cause] : "(unknown)";
292     } else {
293         return (cause < ARRAY_SIZE(riscv_excp_names)) ?
294                riscv_excp_names[cause] : "(unknown)";
295     }
296 }
297 
298 void riscv_cpu_set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext)
299 {
300     env->misa_mxl_max = env->misa_mxl = mxl;
301     env->misa_ext_mask = env->misa_ext = ext;
302 }
303 
304 #ifndef CONFIG_USER_ONLY
305 static uint8_t satp_mode_from_str(const char *satp_mode_str)
306 {
307     if (!strncmp(satp_mode_str, "mbare", 5)) {
308         return VM_1_10_MBARE;
309     }
310 
311     if (!strncmp(satp_mode_str, "sv32", 4)) {
312         return VM_1_10_SV32;
313     }
314 
315     if (!strncmp(satp_mode_str, "sv39", 4)) {
316         return VM_1_10_SV39;
317     }
318 
319     if (!strncmp(satp_mode_str, "sv48", 4)) {
320         return VM_1_10_SV48;
321     }
322 
323     if (!strncmp(satp_mode_str, "sv57", 4)) {
324         return VM_1_10_SV57;
325     }
326 
327     if (!strncmp(satp_mode_str, "sv64", 4)) {
328         return VM_1_10_SV64;
329     }
330 
331     g_assert_not_reached();
332 }
333 
334 uint8_t satp_mode_max_from_map(uint32_t map)
335 {
336     /*
337      * 'map = 0' will make us return (31 - 32), which C will
338      * happily overflow to UINT_MAX. There's no good result to
339      * return if 'map = 0' (e.g. returning 0 will be ambiguous
340      * with the result for 'map = 1').
341      *
342      * Assert out if map = 0. Callers will have to deal with
343      * it outside of this function.
344      */
345     g_assert(map > 0);
346 
347     /* map here has at least one bit set, so no problem with clz */
348     return 31 - __builtin_clz(map);
349 }
350 
351 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
352 {
353     if (is_32_bit) {
354         switch (satp_mode) {
355         case VM_1_10_SV32:
356             return "sv32";
357         case VM_1_10_MBARE:
358             return "none";
359         }
360     } else {
361         switch (satp_mode) {
362         case VM_1_10_SV64:
363             return "sv64";
364         case VM_1_10_SV57:
365             return "sv57";
366         case VM_1_10_SV48:
367             return "sv48";
368         case VM_1_10_SV39:
369             return "sv39";
370         case VM_1_10_MBARE:
371             return "none";
372         }
373     }
374 
375     g_assert_not_reached();
376 }
377 
378 static void set_satp_mode_max_supported(RISCVCPU *cpu,
379                                         uint8_t satp_mode)
380 {
381     bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
382     const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
383 
384     for (int i = 0; i <= satp_mode; ++i) {
385         if (valid_vm[i]) {
386             cpu->cfg.satp_mode.supported |= (1 << i);
387         }
388     }
389 }
390 
391 /* Set the satp mode to the max supported */
392 static void set_satp_mode_default_map(RISCVCPU *cpu)
393 {
394     /*
395      * Bare CPUs do not default to the max available.
396      * Users must set a valid satp_mode in the command
397      * line.
398      */
399     if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) {
400         warn_report("No satp mode set. Defaulting to 'bare'");
401         cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE);
402         return;
403     }
404 
405     cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
406 }
407 #endif
408 
409 static void riscv_any_cpu_init(Object *obj)
410 {
411     RISCVCPU *cpu = RISCV_CPU(obj);
412     CPURISCVState *env = &cpu->env;
413 #if defined(TARGET_RISCV32)
414     riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
415 #elif defined(TARGET_RISCV64)
416     riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
417 #endif
418 
419 #ifndef CONFIG_USER_ONLY
420     set_satp_mode_max_supported(RISCV_CPU(obj),
421         riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ?
422         VM_1_10_SV32 : VM_1_10_SV57);
423 #endif
424 
425     env->priv_ver = PRIV_VERSION_LATEST;
426 
427     /* inherited from parent obj via riscv_cpu_init() */
428     cpu->cfg.ext_zifencei = true;
429     cpu->cfg.ext_zicsr = true;
430     cpu->cfg.mmu = true;
431     cpu->cfg.pmp = true;
432 }
433 
434 static void riscv_max_cpu_init(Object *obj)
435 {
436     RISCVCPU *cpu = RISCV_CPU(obj);
437     CPURISCVState *env = &cpu->env;
438     RISCVMXL mlx = MXL_RV64;
439 
440 #ifdef TARGET_RISCV32
441     mlx = MXL_RV32;
442 #endif
443     riscv_cpu_set_misa(env, mlx, 0);
444     env->priv_ver = PRIV_VERSION_LATEST;
445 #ifndef CONFIG_USER_ONLY
446     set_satp_mode_max_supported(RISCV_CPU(obj), mlx == MXL_RV32 ?
447                                 VM_1_10_SV32 : VM_1_10_SV57);
448 #endif
449 }
450 
451 #if defined(TARGET_RISCV64)
452 static void rv64_base_cpu_init(Object *obj)
453 {
454     CPURISCVState *env = &RISCV_CPU(obj)->env;
455     /* We set this in the realise function */
456     riscv_cpu_set_misa(env, MXL_RV64, 0);
457     /* Set latest version of privileged specification */
458     env->priv_ver = PRIV_VERSION_LATEST;
459 #ifndef CONFIG_USER_ONLY
460     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
461 #endif
462 }
463 
464 static void rv64_sifive_u_cpu_init(Object *obj)
465 {
466     RISCVCPU *cpu = RISCV_CPU(obj);
467     CPURISCVState *env = &cpu->env;
468     riscv_cpu_set_misa(env, MXL_RV64,
469                        RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
470     env->priv_ver = PRIV_VERSION_1_10_0;
471 #ifndef CONFIG_USER_ONLY
472     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39);
473 #endif
474 
475     /* inherited from parent obj via riscv_cpu_init() */
476     cpu->cfg.ext_zifencei = true;
477     cpu->cfg.ext_zicsr = true;
478     cpu->cfg.mmu = true;
479     cpu->cfg.pmp = true;
480 }
481 
482 static void rv64_sifive_e_cpu_init(Object *obj)
483 {
484     CPURISCVState *env = &RISCV_CPU(obj)->env;
485     RISCVCPU *cpu = RISCV_CPU(obj);
486 
487     riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU);
488     env->priv_ver = PRIV_VERSION_1_10_0;
489 #ifndef CONFIG_USER_ONLY
490     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
491 #endif
492 
493     /* inherited from parent obj via riscv_cpu_init() */
494     cpu->cfg.ext_zifencei = true;
495     cpu->cfg.ext_zicsr = true;
496     cpu->cfg.pmp = true;
497 }
498 
499 static void rv64_thead_c906_cpu_init(Object *obj)
500 {
501     CPURISCVState *env = &RISCV_CPU(obj)->env;
502     RISCVCPU *cpu = RISCV_CPU(obj);
503 
504     riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU);
505     env->priv_ver = PRIV_VERSION_1_11_0;
506 
507     cpu->cfg.ext_zfa = true;
508     cpu->cfg.ext_zfh = true;
509     cpu->cfg.mmu = true;
510     cpu->cfg.ext_xtheadba = true;
511     cpu->cfg.ext_xtheadbb = true;
512     cpu->cfg.ext_xtheadbs = true;
513     cpu->cfg.ext_xtheadcmo = true;
514     cpu->cfg.ext_xtheadcondmov = true;
515     cpu->cfg.ext_xtheadfmemidx = true;
516     cpu->cfg.ext_xtheadmac = true;
517     cpu->cfg.ext_xtheadmemidx = true;
518     cpu->cfg.ext_xtheadmempair = true;
519     cpu->cfg.ext_xtheadsync = true;
520 
521     cpu->cfg.mvendorid = THEAD_VENDOR_ID;
522 #ifndef CONFIG_USER_ONLY
523     set_satp_mode_max_supported(cpu, VM_1_10_SV39);
524 #endif
525 
526     /* inherited from parent obj via riscv_cpu_init() */
527     cpu->cfg.pmp = true;
528 }
529 
530 static void rv64_veyron_v1_cpu_init(Object *obj)
531 {
532     CPURISCVState *env = &RISCV_CPU(obj)->env;
533     RISCVCPU *cpu = RISCV_CPU(obj);
534 
535     riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH);
536     env->priv_ver = PRIV_VERSION_1_12_0;
537 
538     /* Enable ISA extensions */
539     cpu->cfg.mmu = true;
540     cpu->cfg.ext_zifencei = true;
541     cpu->cfg.ext_zicsr = true;
542     cpu->cfg.pmp = true;
543     cpu->cfg.ext_zicbom = true;
544     cpu->cfg.cbom_blocksize = 64;
545     cpu->cfg.cboz_blocksize = 64;
546     cpu->cfg.ext_zicboz = true;
547     cpu->cfg.ext_smaia = true;
548     cpu->cfg.ext_ssaia = true;
549     cpu->cfg.ext_sscofpmf = true;
550     cpu->cfg.ext_sstc = true;
551     cpu->cfg.ext_svinval = true;
552     cpu->cfg.ext_svnapot = true;
553     cpu->cfg.ext_svpbmt = true;
554     cpu->cfg.ext_smstateen = true;
555     cpu->cfg.ext_zba = true;
556     cpu->cfg.ext_zbb = true;
557     cpu->cfg.ext_zbc = true;
558     cpu->cfg.ext_zbs = true;
559     cpu->cfg.ext_XVentanaCondOps = true;
560 
561     cpu->cfg.mvendorid = VEYRON_V1_MVENDORID;
562     cpu->cfg.marchid = VEYRON_V1_MARCHID;
563     cpu->cfg.mimpid = VEYRON_V1_MIMPID;
564 
565 #ifndef CONFIG_USER_ONLY
566     set_satp_mode_max_supported(cpu, VM_1_10_SV48);
567 #endif
568 }
569 
570 static void rv128_base_cpu_init(Object *obj)
571 {
572     if (qemu_tcg_mttcg_enabled()) {
573         /* Missing 128-bit aligned atomics */
574         error_report("128-bit RISC-V currently does not work with Multi "
575                      "Threaded TCG. Please use: -accel tcg,thread=single");
576         exit(EXIT_FAILURE);
577     }
578     CPURISCVState *env = &RISCV_CPU(obj)->env;
579     /* We set this in the realise function */
580     riscv_cpu_set_misa(env, MXL_RV128, 0);
581     /* Set latest version of privileged specification */
582     env->priv_ver = PRIV_VERSION_LATEST;
583 #ifndef CONFIG_USER_ONLY
584     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
585 #endif
586 }
587 
588 static void rv64i_bare_cpu_init(Object *obj)
589 {
590     CPURISCVState *env = &RISCV_CPU(obj)->env;
591     riscv_cpu_set_misa(env, MXL_RV64, RVI);
592 
593     /* Remove the defaults from the parent class */
594     RISCV_CPU(obj)->cfg.ext_zicntr = false;
595     RISCV_CPU(obj)->cfg.ext_zihpm = false;
596 
597     /* Set to QEMU's first supported priv version */
598     env->priv_ver = PRIV_VERSION_1_10_0;
599 
600     /*
601      * Support all available satp_mode settings. The default
602      * value will be set to MBARE if the user doesn't set
603      * satp_mode manually (see set_satp_mode_default()).
604      */
605 #ifndef CONFIG_USER_ONLY
606     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV64);
607 #endif
608 }
609 #else
610 static void rv32_base_cpu_init(Object *obj)
611 {
612     CPURISCVState *env = &RISCV_CPU(obj)->env;
613     /* We set this in the realise function */
614     riscv_cpu_set_misa(env, MXL_RV32, 0);
615     /* Set latest version of privileged specification */
616     env->priv_ver = PRIV_VERSION_LATEST;
617 #ifndef CONFIG_USER_ONLY
618     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
619 #endif
620 }
621 
622 static void rv32_sifive_u_cpu_init(Object *obj)
623 {
624     RISCVCPU *cpu = RISCV_CPU(obj);
625     CPURISCVState *env = &cpu->env;
626     riscv_cpu_set_misa(env, MXL_RV32,
627                        RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
628     env->priv_ver = PRIV_VERSION_1_10_0;
629 #ifndef CONFIG_USER_ONLY
630     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
631 #endif
632 
633     /* inherited from parent obj via riscv_cpu_init() */
634     cpu->cfg.ext_zifencei = true;
635     cpu->cfg.ext_zicsr = true;
636     cpu->cfg.mmu = true;
637     cpu->cfg.pmp = true;
638 }
639 
640 static void rv32_sifive_e_cpu_init(Object *obj)
641 {
642     CPURISCVState *env = &RISCV_CPU(obj)->env;
643     RISCVCPU *cpu = RISCV_CPU(obj);
644 
645     riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU);
646     env->priv_ver = PRIV_VERSION_1_10_0;
647 #ifndef CONFIG_USER_ONLY
648     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
649 #endif
650 
651     /* inherited from parent obj via riscv_cpu_init() */
652     cpu->cfg.ext_zifencei = true;
653     cpu->cfg.ext_zicsr = true;
654     cpu->cfg.pmp = true;
655 }
656 
657 static void rv32_ibex_cpu_init(Object *obj)
658 {
659     CPURISCVState *env = &RISCV_CPU(obj)->env;
660     RISCVCPU *cpu = RISCV_CPU(obj);
661 
662     riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU);
663     env->priv_ver = PRIV_VERSION_1_12_0;
664 #ifndef CONFIG_USER_ONLY
665     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
666 #endif
667     /* inherited from parent obj via riscv_cpu_init() */
668     cpu->cfg.ext_zifencei = true;
669     cpu->cfg.ext_zicsr = true;
670     cpu->cfg.pmp = true;
671     cpu->cfg.ext_smepmp = true;
672 }
673 
674 static void rv32_imafcu_nommu_cpu_init(Object *obj)
675 {
676     CPURISCVState *env = &RISCV_CPU(obj)->env;
677     RISCVCPU *cpu = RISCV_CPU(obj);
678 
679     riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU);
680     env->priv_ver = PRIV_VERSION_1_10_0;
681 #ifndef CONFIG_USER_ONLY
682     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
683 #endif
684 
685     /* inherited from parent obj via riscv_cpu_init() */
686     cpu->cfg.ext_zifencei = true;
687     cpu->cfg.ext_zicsr = true;
688     cpu->cfg.pmp = true;
689 }
690 #endif
691 
692 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
693 {
694     ObjectClass *oc;
695     char *typename;
696     char **cpuname;
697 
698     cpuname = g_strsplit(cpu_model, ",", 1);
699     typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
700     oc = object_class_by_name(typename);
701     g_strfreev(cpuname);
702     g_free(typename);
703 
704     return oc;
705 }
706 
707 char *riscv_cpu_get_name(RISCVCPU *cpu)
708 {
709     RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu);
710     const char *typename = object_class_get_name(OBJECT_CLASS(rcc));
711 
712     g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX));
713 
714     return cpu_model_from_type(typename);
715 }
716 
717 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
718 {
719     RISCVCPU *cpu = RISCV_CPU(cs);
720     CPURISCVState *env = &cpu->env;
721     int i, j;
722     uint8_t *p;
723 
724 #if !defined(CONFIG_USER_ONLY)
725     if (riscv_has_ext(env, RVH)) {
726         qemu_fprintf(f, " %s %d\n", "V      =  ", env->virt_enabled);
727     }
728 #endif
729     qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc      ", env->pc);
730 #ifndef CONFIG_USER_ONLY
731     {
732         static const int dump_csrs[] = {
733             CSR_MHARTID,
734             CSR_MSTATUS,
735             CSR_MSTATUSH,
736             /*
737              * CSR_SSTATUS is intentionally omitted here as its value
738              * can be figured out by looking at CSR_MSTATUS
739              */
740             CSR_HSTATUS,
741             CSR_VSSTATUS,
742             CSR_MIP,
743             CSR_MIE,
744             CSR_MIDELEG,
745             CSR_HIDELEG,
746             CSR_MEDELEG,
747             CSR_HEDELEG,
748             CSR_MTVEC,
749             CSR_STVEC,
750             CSR_VSTVEC,
751             CSR_MEPC,
752             CSR_SEPC,
753             CSR_VSEPC,
754             CSR_MCAUSE,
755             CSR_SCAUSE,
756             CSR_VSCAUSE,
757             CSR_MTVAL,
758             CSR_STVAL,
759             CSR_HTVAL,
760             CSR_MTVAL2,
761             CSR_MSCRATCH,
762             CSR_SSCRATCH,
763             CSR_SATP,
764             CSR_MMTE,
765             CSR_UPMBASE,
766             CSR_UPMMASK,
767             CSR_SPMBASE,
768             CSR_SPMMASK,
769             CSR_MPMBASE,
770             CSR_MPMMASK,
771         };
772 
773         for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
774             int csrno = dump_csrs[i];
775             target_ulong val = 0;
776             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
777 
778             /*
779              * Rely on the smode, hmode, etc, predicates within csr.c
780              * to do the filtering of the registers that are present.
781              */
782             if (res == RISCV_EXCP_NONE) {
783                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
784                              csr_ops[csrno].name, val);
785             }
786         }
787     }
788 #endif
789 
790     for (i = 0; i < 32; i++) {
791         qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
792                      riscv_int_regnames[i], env->gpr[i]);
793         if ((i & 3) == 3) {
794             qemu_fprintf(f, "\n");
795         }
796     }
797     if (flags & CPU_DUMP_FPU) {
798         for (i = 0; i < 32; i++) {
799             qemu_fprintf(f, " %-8s %016" PRIx64,
800                          riscv_fpr_regnames[i], env->fpr[i]);
801             if ((i & 3) == 3) {
802                 qemu_fprintf(f, "\n");
803             }
804         }
805     }
806     if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) {
807         static const int dump_rvv_csrs[] = {
808                     CSR_VSTART,
809                     CSR_VXSAT,
810                     CSR_VXRM,
811                     CSR_VCSR,
812                     CSR_VL,
813                     CSR_VTYPE,
814                     CSR_VLENB,
815                 };
816         for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
817             int csrno = dump_rvv_csrs[i];
818             target_ulong val = 0;
819             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
820 
821             /*
822              * Rely on the smode, hmode, etc, predicates within csr.c
823              * to do the filtering of the registers that are present.
824              */
825             if (res == RISCV_EXCP_NONE) {
826                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
827                              csr_ops[csrno].name, val);
828             }
829         }
830         uint16_t vlenb = cpu->cfg.vlen >> 3;
831 
832         for (i = 0; i < 32; i++) {
833             qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]);
834             p = (uint8_t *)env->vreg;
835             for (j = vlenb - 1 ; j >= 0; j--) {
836                 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j)));
837             }
838             qemu_fprintf(f, "\n");
839         }
840     }
841 }
842 
843 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
844 {
845     RISCVCPU *cpu = RISCV_CPU(cs);
846     CPURISCVState *env = &cpu->env;
847 
848     if (env->xl == MXL_RV32) {
849         env->pc = (int32_t)value;
850     } else {
851         env->pc = value;
852     }
853 }
854 
855 static vaddr riscv_cpu_get_pc(CPUState *cs)
856 {
857     RISCVCPU *cpu = RISCV_CPU(cs);
858     CPURISCVState *env = &cpu->env;
859 
860     /* Match cpu_get_tb_cpu_state. */
861     if (env->xl == MXL_RV32) {
862         return env->pc & UINT32_MAX;
863     }
864     return env->pc;
865 }
866 
867 static bool riscv_cpu_has_work(CPUState *cs)
868 {
869 #ifndef CONFIG_USER_ONLY
870     RISCVCPU *cpu = RISCV_CPU(cs);
871     CPURISCVState *env = &cpu->env;
872     /*
873      * Definition of the WFI instruction requires it to ignore the privilege
874      * mode and delegation registers, but respect individual enables
875      */
876     return riscv_cpu_all_pending(env) != 0 ||
877         riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE ||
878         riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE;
879 #else
880     return true;
881 #endif
882 }
883 
884 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch)
885 {
886     return riscv_env_mmu_index(cpu_env(cs), ifetch);
887 }
888 
889 static void riscv_cpu_reset_hold(Object *obj)
890 {
891 #ifndef CONFIG_USER_ONLY
892     uint8_t iprio;
893     int i, irq, rdzero;
894 #endif
895     CPUState *cs = CPU(obj);
896     RISCVCPU *cpu = RISCV_CPU(cs);
897     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
898     CPURISCVState *env = &cpu->env;
899 
900     if (mcc->parent_phases.hold) {
901         mcc->parent_phases.hold(obj);
902     }
903 #ifndef CONFIG_USER_ONLY
904     env->misa_mxl = env->misa_mxl_max;
905     env->priv = PRV_M;
906     env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
907     if (env->misa_mxl > MXL_RV32) {
908         /*
909          * The reset status of SXL/UXL is undefined, but mstatus is WARL
910          * and we must ensure that the value after init is valid for read.
911          */
912         env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
913         env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
914         if (riscv_has_ext(env, RVH)) {
915             env->vsstatus = set_field(env->vsstatus,
916                                       MSTATUS64_SXL, env->misa_mxl);
917             env->vsstatus = set_field(env->vsstatus,
918                                       MSTATUS64_UXL, env->misa_mxl);
919             env->mstatus_hs = set_field(env->mstatus_hs,
920                                         MSTATUS64_SXL, env->misa_mxl);
921             env->mstatus_hs = set_field(env->mstatus_hs,
922                                         MSTATUS64_UXL, env->misa_mxl);
923         }
924     }
925     env->mcause = 0;
926     env->miclaim = MIP_SGEIP;
927     env->pc = env->resetvec;
928     env->bins = 0;
929     env->two_stage_lookup = false;
930 
931     env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
932                    (cpu->cfg.ext_svadu ? MENVCFG_ADUE : 0);
933     env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) |
934                    (cpu->cfg.ext_svadu ? HENVCFG_ADUE : 0);
935 
936     /* Initialized default priorities of local interrupts. */
937     for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
938         iprio = riscv_cpu_default_priority(i);
939         env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
940         env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
941         env->hviprio[i] = 0;
942     }
943     i = 0;
944     while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
945         if (!rdzero) {
946             env->hviprio[irq] = env->miprio[irq];
947         }
948         i++;
949     }
950     /* mmte is supposed to have pm.current hardwired to 1 */
951     env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
952 
953     /*
954      * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor
955      * extension is enabled.
956      */
957     if (riscv_has_ext(env, RVH)) {
958         env->mideleg |= HS_MODE_INTERRUPTS;
959     }
960 
961     /*
962      * Clear mseccfg and unlock all the PMP entries upon reset.
963      * This is allowed as per the priv and smepmp specifications
964      * and is needed to clear stale entries across reboots.
965      */
966     if (riscv_cpu_cfg(env)->ext_smepmp) {
967         env->mseccfg = 0;
968     }
969 
970     pmp_unlock_entries(env);
971 #endif
972     env->xl = riscv_cpu_mxl(env);
973     riscv_cpu_update_mask(env);
974     cs->exception_index = RISCV_EXCP_NONE;
975     env->load_res = -1;
976     set_default_nan_mode(1, &env->fp_status);
977 
978 #ifndef CONFIG_USER_ONLY
979     if (cpu->cfg.debug) {
980         riscv_trigger_reset_hold(env);
981     }
982 
983     if (kvm_enabled()) {
984         kvm_riscv_reset_vcpu(cpu);
985     }
986 #endif
987 }
988 
989 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
990 {
991     RISCVCPU *cpu = RISCV_CPU(s);
992     CPURISCVState *env = &cpu->env;
993     info->target_info = &cpu->cfg;
994 
995     switch (env->xl) {
996     case MXL_RV32:
997         info->print_insn = print_insn_riscv32;
998         break;
999     case MXL_RV64:
1000         info->print_insn = print_insn_riscv64;
1001         break;
1002     case MXL_RV128:
1003         info->print_insn = print_insn_riscv128;
1004         break;
1005     default:
1006         g_assert_not_reached();
1007     }
1008 }
1009 
1010 #ifndef CONFIG_USER_ONLY
1011 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
1012 {
1013     bool rv32 = riscv_cpu_is_32bit(cpu);
1014     uint8_t satp_mode_map_max, satp_mode_supported_max;
1015 
1016     /* The CPU wants the OS to decide which satp mode to use */
1017     if (cpu->cfg.satp_mode.supported == 0) {
1018         return;
1019     }
1020 
1021     satp_mode_supported_max =
1022                     satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
1023 
1024     if (cpu->cfg.satp_mode.map == 0) {
1025         if (cpu->cfg.satp_mode.init == 0) {
1026             /* If unset by the user, we fallback to the default satp mode. */
1027             set_satp_mode_default_map(cpu);
1028         } else {
1029             /*
1030              * Find the lowest level that was disabled and then enable the
1031              * first valid level below which can be found in
1032              * valid_vm_1_10_32/64.
1033              */
1034             for (int i = 1; i < 16; ++i) {
1035                 if ((cpu->cfg.satp_mode.init & (1 << i)) &&
1036                     (cpu->cfg.satp_mode.supported & (1 << i))) {
1037                     for (int j = i - 1; j >= 0; --j) {
1038                         if (cpu->cfg.satp_mode.supported & (1 << j)) {
1039                             cpu->cfg.satp_mode.map |= (1 << j);
1040                             break;
1041                         }
1042                     }
1043                     break;
1044                 }
1045             }
1046         }
1047     }
1048 
1049     satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
1050 
1051     /* Make sure the user asked for a supported configuration (HW and qemu) */
1052     if (satp_mode_map_max > satp_mode_supported_max) {
1053         error_setg(errp, "satp_mode %s is higher than hw max capability %s",
1054                    satp_mode_str(satp_mode_map_max, rv32),
1055                    satp_mode_str(satp_mode_supported_max, rv32));
1056         return;
1057     }
1058 
1059     /*
1060      * Make sure the user did not ask for an invalid configuration as per
1061      * the specification.
1062      */
1063     if (!rv32) {
1064         for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1065             if (!(cpu->cfg.satp_mode.map & (1 << i)) &&
1066                 (cpu->cfg.satp_mode.init & (1 << i)) &&
1067                 (cpu->cfg.satp_mode.supported & (1 << i))) {
1068                 error_setg(errp, "cannot disable %s satp mode if %s "
1069                            "is enabled", satp_mode_str(i, false),
1070                            satp_mode_str(satp_mode_map_max, false));
1071                 return;
1072             }
1073         }
1074     }
1075 
1076     /* Finally expand the map so that all valid modes are set */
1077     for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1078         if (cpu->cfg.satp_mode.supported & (1 << i)) {
1079             cpu->cfg.satp_mode.map |= (1 << i);
1080         }
1081     }
1082 }
1083 #endif
1084 
1085 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1086 {
1087     Error *local_err = NULL;
1088 
1089 #ifndef CONFIG_USER_ONLY
1090     riscv_cpu_satp_mode_finalize(cpu, &local_err);
1091     if (local_err != NULL) {
1092         error_propagate(errp, local_err);
1093         return;
1094     }
1095 #endif
1096 
1097     /*
1098      * KVM accel does not have a specialized finalize()
1099      * callback because its extensions are validated
1100      * in the get()/set() callbacks of each property.
1101      */
1102     if (tcg_enabled()) {
1103         riscv_tcg_cpu_finalize_features(cpu, &local_err);
1104         if (local_err != NULL) {
1105             error_propagate(errp, local_err);
1106             return;
1107         }
1108     }
1109 }
1110 
1111 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
1112 {
1113     CPUState *cs = CPU(dev);
1114     RISCVCPU *cpu = RISCV_CPU(dev);
1115     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
1116     Error *local_err = NULL;
1117 
1118     if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) {
1119         warn_report("The 'any' CPU is deprecated and will be "
1120                     "removed in the future.");
1121     }
1122 
1123     cpu_exec_realizefn(cs, &local_err);
1124     if (local_err != NULL) {
1125         error_propagate(errp, local_err);
1126         return;
1127     }
1128 
1129     riscv_cpu_finalize_features(cpu, &local_err);
1130     if (local_err != NULL) {
1131         error_propagate(errp, local_err);
1132         return;
1133     }
1134 
1135     riscv_cpu_register_gdb_regs_for_features(cs);
1136 
1137 #ifndef CONFIG_USER_ONLY
1138     if (cpu->cfg.debug) {
1139         riscv_trigger_realize(&cpu->env);
1140     }
1141 #endif
1142 
1143     qemu_init_vcpu(cs);
1144     cpu_reset(cs);
1145 
1146     mcc->parent_realize(dev, errp);
1147 }
1148 
1149 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu)
1150 {
1151     if (tcg_enabled()) {
1152         return riscv_cpu_tcg_compatible(cpu);
1153     }
1154 
1155     return true;
1156 }
1157 
1158 #ifndef CONFIG_USER_ONLY
1159 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
1160                                void *opaque, Error **errp)
1161 {
1162     RISCVSATPMap *satp_map = opaque;
1163     uint8_t satp = satp_mode_from_str(name);
1164     bool value;
1165 
1166     value = satp_map->map & (1 << satp);
1167 
1168     visit_type_bool(v, name, &value, errp);
1169 }
1170 
1171 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
1172                                void *opaque, Error **errp)
1173 {
1174     RISCVSATPMap *satp_map = opaque;
1175     uint8_t satp = satp_mode_from_str(name);
1176     bool value;
1177 
1178     if (!visit_type_bool(v, name, &value, errp)) {
1179         return;
1180     }
1181 
1182     satp_map->map = deposit32(satp_map->map, satp, 1, value);
1183     satp_map->init |= 1 << satp;
1184 }
1185 
1186 void riscv_add_satp_mode_properties(Object *obj)
1187 {
1188     RISCVCPU *cpu = RISCV_CPU(obj);
1189 
1190     if (cpu->env.misa_mxl == MXL_RV32) {
1191         object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
1192                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1193     } else {
1194         object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
1195                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1196         object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
1197                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1198         object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
1199                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1200         object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
1201                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1202     }
1203 }
1204 
1205 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
1206 {
1207     RISCVCPU *cpu = RISCV_CPU(opaque);
1208     CPURISCVState *env = &cpu->env;
1209 
1210     if (irq < IRQ_LOCAL_MAX) {
1211         switch (irq) {
1212         case IRQ_U_SOFT:
1213         case IRQ_S_SOFT:
1214         case IRQ_VS_SOFT:
1215         case IRQ_M_SOFT:
1216         case IRQ_U_TIMER:
1217         case IRQ_S_TIMER:
1218         case IRQ_VS_TIMER:
1219         case IRQ_M_TIMER:
1220         case IRQ_U_EXT:
1221         case IRQ_VS_EXT:
1222         case IRQ_M_EXT:
1223             if (kvm_enabled()) {
1224                 kvm_riscv_set_irq(cpu, irq, level);
1225             } else {
1226                 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level));
1227             }
1228              break;
1229         case IRQ_S_EXT:
1230             if (kvm_enabled()) {
1231                 kvm_riscv_set_irq(cpu, irq, level);
1232             } else {
1233                 env->external_seip = level;
1234                 riscv_cpu_update_mip(env, 1 << irq,
1235                                      BOOL_TO_MASK(level | env->software_seip));
1236             }
1237             break;
1238         default:
1239             g_assert_not_reached();
1240         }
1241     } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
1242         /* Require H-extension for handling guest local interrupts */
1243         if (!riscv_has_ext(env, RVH)) {
1244             g_assert_not_reached();
1245         }
1246 
1247         /* Compute bit position in HGEIP CSR */
1248         irq = irq - IRQ_LOCAL_MAX + 1;
1249         if (env->geilen < irq) {
1250             g_assert_not_reached();
1251         }
1252 
1253         /* Update HGEIP CSR */
1254         env->hgeip &= ~((target_ulong)1 << irq);
1255         if (level) {
1256             env->hgeip |= (target_ulong)1 << irq;
1257         }
1258 
1259         /* Update mip.SGEIP bit */
1260         riscv_cpu_update_mip(env, MIP_SGEIP,
1261                              BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
1262     } else {
1263         g_assert_not_reached();
1264     }
1265 }
1266 #endif /* CONFIG_USER_ONLY */
1267 
1268 static bool riscv_cpu_is_dynamic(Object *cpu_obj)
1269 {
1270     return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
1271 }
1272 
1273 static void riscv_cpu_post_init(Object *obj)
1274 {
1275     accel_cpu_instance_init(CPU(obj));
1276 }
1277 
1278 static void riscv_cpu_init(Object *obj)
1279 {
1280     RISCVCPU *cpu = RISCV_CPU(obj);
1281 
1282 #ifndef CONFIG_USER_ONLY
1283     qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
1284                       IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
1285 #endif /* CONFIG_USER_ONLY */
1286 
1287     general_user_opts = g_hash_table_new(g_str_hash, g_str_equal);
1288 
1289     /*
1290      * The timer and performance counters extensions were supported
1291      * in QEMU before they were added as discrete extensions in the
1292      * ISA. To keep compatibility we'll always default them to 'true'
1293      * for all CPUs. Each accelerator will decide what to do when
1294      * users disable them.
1295      */
1296     RISCV_CPU(obj)->cfg.ext_zicntr = true;
1297     RISCV_CPU(obj)->cfg.ext_zihpm = true;
1298 
1299     /* Default values for non-bool cpu properties */
1300     cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16);
1301 }
1302 
1303 typedef struct misa_ext_info {
1304     const char *name;
1305     const char *description;
1306 } MISAExtInfo;
1307 
1308 #define MISA_INFO_IDX(_bit) \
1309     __builtin_ctz(_bit)
1310 
1311 #define MISA_EXT_INFO(_bit, _propname, _descr) \
1312     [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr}
1313 
1314 static const MISAExtInfo misa_ext_info_arr[] = {
1315     MISA_EXT_INFO(RVA, "a", "Atomic instructions"),
1316     MISA_EXT_INFO(RVC, "c", "Compressed instructions"),
1317     MISA_EXT_INFO(RVD, "d", "Double-precision float point"),
1318     MISA_EXT_INFO(RVF, "f", "Single-precision float point"),
1319     MISA_EXT_INFO(RVI, "i", "Base integer instruction set"),
1320     MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"),
1321     MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"),
1322     MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
1323     MISA_EXT_INFO(RVU, "u", "User-level instructions"),
1324     MISA_EXT_INFO(RVH, "h", "Hypervisor"),
1325     MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
1326     MISA_EXT_INFO(RVV, "v", "Vector operations"),
1327     MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
1328     MISA_EXT_INFO(RVB, "x-b", "Bit manipulation (Zba_Zbb_Zbs)")
1329 };
1330 
1331 static int riscv_validate_misa_info_idx(uint32_t bit)
1332 {
1333     int idx;
1334 
1335     /*
1336      * Our lowest valid input (RVA) is 1 and
1337      * __builtin_ctz() is UB with zero.
1338      */
1339     g_assert(bit != 0);
1340     idx = MISA_INFO_IDX(bit);
1341 
1342     g_assert(idx < ARRAY_SIZE(misa_ext_info_arr));
1343     return idx;
1344 }
1345 
1346 const char *riscv_get_misa_ext_name(uint32_t bit)
1347 {
1348     int idx = riscv_validate_misa_info_idx(bit);
1349     const char *val = misa_ext_info_arr[idx].name;
1350 
1351     g_assert(val != NULL);
1352     return val;
1353 }
1354 
1355 const char *riscv_get_misa_ext_description(uint32_t bit)
1356 {
1357     int idx = riscv_validate_misa_info_idx(bit);
1358     const char *val = misa_ext_info_arr[idx].description;
1359 
1360     g_assert(val != NULL);
1361     return val;
1362 }
1363 
1364 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \
1365     {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
1366      .enabled = _defval}
1367 
1368 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
1369     /* Defaults for standard extensions */
1370     MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
1371     MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
1372     MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
1373     MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
1374     MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
1375     MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false),
1376     MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
1377     MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true),
1378     MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false),
1379     MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false),
1380     MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false),
1381     MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false),
1382     MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false),
1383     MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
1384 
1385     MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
1386     MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
1387     MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
1388     MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
1389     MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false),
1390     MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false),
1391 
1392     MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true),
1393     MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true),
1394 
1395     MULTI_EXT_CFG_BOOL("zba", ext_zba, true),
1396     MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true),
1397     MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true),
1398     MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false),
1399     MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false),
1400     MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false),
1401     MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true),
1402     MULTI_EXT_CFG_BOOL("zk", ext_zk, false),
1403     MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false),
1404     MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false),
1405     MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false),
1406     MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false),
1407     MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false),
1408     MULTI_EXT_CFG_BOOL("zks", ext_zks, false),
1409     MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false),
1410     MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false),
1411     MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false),
1412 
1413     MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false),
1414     MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false),
1415     MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false),
1416     MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false),
1417 
1418     MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true),
1419     MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true),
1420     MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true),
1421 
1422     MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false),
1423 
1424     MULTI_EXT_CFG_BOOL("zca", ext_zca, false),
1425     MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false),
1426     MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false),
1427     MULTI_EXT_CFG_BOOL("zce", ext_zce, false),
1428     MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false),
1429     MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false),
1430     MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false),
1431     MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false),
1432 
1433     /* Vector cryptography extensions */
1434     MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false),
1435     MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false),
1436     MULTI_EXT_CFG_BOOL("zvkb", ext_zvkg, false),
1437     MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false),
1438     MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false),
1439     MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false),
1440     MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false),
1441     MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false),
1442     MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false),
1443     MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false),
1444     MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false),
1445     MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false),
1446     MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false),
1447     MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false),
1448     MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false),
1449     MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false),
1450 
1451     DEFINE_PROP_END_OF_LIST(),
1452 };
1453 
1454 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = {
1455     MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false),
1456     MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false),
1457     MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false),
1458     MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false),
1459     MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false),
1460     MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false),
1461     MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false),
1462     MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false),
1463     MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false),
1464     MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false),
1465     MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false),
1466     MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false),
1467 
1468     DEFINE_PROP_END_OF_LIST(),
1469 };
1470 
1471 /* These are experimental so mark with 'x-' */
1472 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
1473     MULTI_EXT_CFG_BOOL("x-smaia", ext_smaia, false),
1474     MULTI_EXT_CFG_BOOL("x-ssaia", ext_ssaia, false),
1475 
1476     MULTI_EXT_CFG_BOOL("x-zvfh", ext_zvfh, false),
1477     MULTI_EXT_CFG_BOOL("x-zvfhmin", ext_zvfhmin, false),
1478 
1479     MULTI_EXT_CFG_BOOL("x-zfbfmin", ext_zfbfmin, false),
1480     MULTI_EXT_CFG_BOOL("x-zvfbfmin", ext_zvfbfmin, false),
1481     MULTI_EXT_CFG_BOOL("x-zvfbfwma", ext_zvfbfwma, false),
1482 
1483     DEFINE_PROP_END_OF_LIST(),
1484 };
1485 
1486 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
1487     MULTI_EXT_CFG_BOOL("svade", svade, true),
1488     MULTI_EXT_CFG_BOOL("zic64b", zic64b, true),
1489 
1490     DEFINE_PROP_END_OF_LIST(),
1491 };
1492 
1493 /* Deprecated entries marked for future removal */
1494 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = {
1495     MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true),
1496     MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true),
1497     MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true),
1498     MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true),
1499     MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true),
1500     MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true),
1501     MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false),
1502     MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false),
1503     MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false),
1504     MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false),
1505     MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false),
1506 
1507     DEFINE_PROP_END_OF_LIST(),
1508 };
1509 
1510 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname,
1511                              Error **errp)
1512 {
1513     g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1514     error_setg(errp, "CPU '%s' does not allow changing the value of '%s'",
1515                cpuname, propname);
1516 }
1517 
1518 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name,
1519                              void *opaque, Error **errp)
1520 {
1521     RISCVCPU *cpu = RISCV_CPU(obj);
1522     uint8_t pmu_num, curr_pmu_num;
1523     uint32_t pmu_mask;
1524 
1525     visit_type_uint8(v, name, &pmu_num, errp);
1526 
1527     curr_pmu_num = ctpop32(cpu->cfg.pmu_mask);
1528 
1529     if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) {
1530         cpu_set_prop_err(cpu, name, errp);
1531         error_append_hint(errp, "Current '%s' val: %u\n",
1532                           name, curr_pmu_num);
1533         return;
1534     }
1535 
1536     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1537         error_setg(errp, "Number of counters exceeds maximum available");
1538         return;
1539     }
1540 
1541     if (pmu_num == 0) {
1542         pmu_mask = 0;
1543     } else {
1544         pmu_mask = MAKE_64BIT_MASK(3, pmu_num);
1545     }
1546 
1547     warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\"");
1548     cpu->cfg.pmu_mask = pmu_mask;
1549     cpu_option_add_user_setting("pmu-mask", pmu_mask);
1550 }
1551 
1552 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name,
1553                              void *opaque, Error **errp)
1554 {
1555     RISCVCPU *cpu = RISCV_CPU(obj);
1556     uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask);
1557 
1558     visit_type_uint8(v, name, &pmu_num, errp);
1559 }
1560 
1561 static const PropertyInfo prop_pmu_num = {
1562     .name = "pmu-num",
1563     .get = prop_pmu_num_get,
1564     .set = prop_pmu_num_set,
1565 };
1566 
1567 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name,
1568                              void *opaque, Error **errp)
1569 {
1570     RISCVCPU *cpu = RISCV_CPU(obj);
1571     uint32_t value;
1572     uint8_t pmu_num;
1573 
1574     visit_type_uint32(v, name, &value, errp);
1575 
1576     if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) {
1577         cpu_set_prop_err(cpu, name, errp);
1578         error_append_hint(errp, "Current '%s' val: %x\n",
1579                           name, cpu->cfg.pmu_mask);
1580         return;
1581     }
1582 
1583     pmu_num = ctpop32(value);
1584 
1585     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1586         error_setg(errp, "Number of counters exceeds maximum available");
1587         return;
1588     }
1589 
1590     cpu_option_add_user_setting(name, value);
1591     cpu->cfg.pmu_mask = value;
1592 }
1593 
1594 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name,
1595                              void *opaque, Error **errp)
1596 {
1597     uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask;
1598 
1599     visit_type_uint8(v, name, &pmu_mask, errp);
1600 }
1601 
1602 static const PropertyInfo prop_pmu_mask = {
1603     .name = "pmu-mask",
1604     .get = prop_pmu_mask_get,
1605     .set = prop_pmu_mask_set,
1606 };
1607 
1608 Property riscv_cpu_options[] = {
1609     DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true),
1610     DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true),
1611 
1612     DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec),
1613     DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec),
1614 
1615     DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128),
1616     DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64),
1617 
1618     DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64),
1619     DEFINE_PROP_UINT16("cbop_blocksize", RISCVCPU, cfg.cbop_blocksize, 64),
1620     DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64),
1621 
1622     DEFINE_PROP_END_OF_LIST(),
1623 };
1624 
1625 /*
1626  * RVA22U64 defines some 'named features' or 'synthetic extensions'
1627  * that are cache related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa
1628  * and Zicclsm. We do not implement caching in QEMU so we'll consider
1629  * all these named features as always enabled.
1630  *
1631  * There's no riscv,isa update for them (nor for zic64b, despite it
1632  * having a cfg offset) at this moment.
1633  */
1634 static RISCVCPUProfile RVA22U64 = {
1635     .parent = NULL,
1636     .name = "rva22u64",
1637     .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU,
1638     .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
1639     .satp_mode = RISCV_PROFILE_ATTR_UNUSED,
1640     .ext_offsets = {
1641         CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause),
1642         CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb),
1643         CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin),
1644         CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr),
1645         CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom),
1646         CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz),
1647 
1648         /* mandatory named features for this profile */
1649         CPU_CFG_OFFSET(zic64b),
1650 
1651         RISCV_PROFILE_EXT_LIST_END
1652     }
1653 };
1654 
1655 /*
1656  * As with RVA22U64, RVA22S64 also defines 'named features'.
1657  *
1658  * Cache related features that we consider enabled since we don't
1659  * implement cache: Ssccptr
1660  *
1661  * Other named features that we already implement: Sstvecd, Sstvala,
1662  * Sscounterenw
1663  *
1664  * Named features that we need to enable: svade
1665  *
1666  * The remaining features/extensions comes from RVA22U64.
1667  */
1668 static RISCVCPUProfile RVA22S64 = {
1669     .parent = &RVA22U64,
1670     .name = "rva22s64",
1671     .misa_ext = RVS,
1672     .priv_spec = PRIV_VERSION_1_12_0,
1673     .satp_mode = VM_1_10_SV39,
1674     .ext_offsets = {
1675         /* rva22s64 exts */
1676         CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt),
1677         CPU_CFG_OFFSET(ext_svinval),
1678 
1679         /* rva22s64 named features */
1680         CPU_CFG_OFFSET(svade),
1681 
1682         RISCV_PROFILE_EXT_LIST_END
1683     }
1684 };
1685 
1686 RISCVCPUProfile *riscv_profiles[] = {
1687     &RVA22U64,
1688     &RVA22S64,
1689     NULL,
1690 };
1691 
1692 static Property riscv_cpu_properties[] = {
1693     DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
1694 
1695     {.name = "pmu-mask", .info = &prop_pmu_mask},
1696     {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */
1697 
1698 #ifndef CONFIG_USER_ONLY
1699     DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
1700 #endif
1701 
1702     DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
1703 
1704     DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
1705     DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
1706 
1707     /*
1708      * write_misa() is marked as experimental for now so mark
1709      * it with -x and default to 'false'.
1710      */
1711     DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
1712     DEFINE_PROP_END_OF_LIST(),
1713 };
1714 
1715 #if defined(TARGET_RISCV64)
1716 static void rva22u64_profile_cpu_init(Object *obj)
1717 {
1718     rv64i_bare_cpu_init(obj);
1719 
1720     RVA22U64.enabled = true;
1721 }
1722 
1723 static void rva22s64_profile_cpu_init(Object *obj)
1724 {
1725     rv64i_bare_cpu_init(obj);
1726 
1727     RVA22S64.enabled = true;
1728 }
1729 #endif
1730 
1731 static const gchar *riscv_gdb_arch_name(CPUState *cs)
1732 {
1733     RISCVCPU *cpu = RISCV_CPU(cs);
1734     CPURISCVState *env = &cpu->env;
1735 
1736     switch (riscv_cpu_mxl(env)) {
1737     case MXL_RV32:
1738         return "riscv:rv32";
1739     case MXL_RV64:
1740     case MXL_RV128:
1741         return "riscv:rv64";
1742     default:
1743         g_assert_not_reached();
1744     }
1745 }
1746 
1747 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
1748 {
1749     RISCVCPU *cpu = RISCV_CPU(cs);
1750 
1751     if (strcmp(xmlname, "riscv-csr.xml") == 0) {
1752         return cpu->dyn_csr_xml;
1753     } else if (strcmp(xmlname, "riscv-vector.xml") == 0) {
1754         return cpu->dyn_vreg_xml;
1755     }
1756 
1757     return NULL;
1758 }
1759 
1760 #ifndef CONFIG_USER_ONLY
1761 static int64_t riscv_get_arch_id(CPUState *cs)
1762 {
1763     RISCVCPU *cpu = RISCV_CPU(cs);
1764 
1765     return cpu->env.mhartid;
1766 }
1767 
1768 #include "hw/core/sysemu-cpu-ops.h"
1769 
1770 static const struct SysemuCPUOps riscv_sysemu_ops = {
1771     .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
1772     .write_elf64_note = riscv_cpu_write_elf64_note,
1773     .write_elf32_note = riscv_cpu_write_elf32_note,
1774     .legacy_vmsd = &vmstate_riscv_cpu,
1775 };
1776 #endif
1777 
1778 static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name,
1779                               void *opaque, Error **errp)
1780 {
1781     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
1782     RISCVCPU *cpu = RISCV_CPU(obj);
1783     uint32_t prev_val = cpu->cfg.mvendorid;
1784     uint32_t value;
1785 
1786     if (!visit_type_uint32(v, name, &value, errp)) {
1787         return;
1788     }
1789 
1790     if (!dynamic_cpu && prev_val != value) {
1791         error_setg(errp, "Unable to change %s mvendorid (0x%x)",
1792                    object_get_typename(obj), prev_val);
1793         return;
1794     }
1795 
1796     cpu->cfg.mvendorid = value;
1797 }
1798 
1799 static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name,
1800                               void *opaque, Error **errp)
1801 {
1802     uint32_t value = RISCV_CPU(obj)->cfg.mvendorid;
1803 
1804     visit_type_uint32(v, name, &value, errp);
1805 }
1806 
1807 static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name,
1808                            void *opaque, Error **errp)
1809 {
1810     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
1811     RISCVCPU *cpu = RISCV_CPU(obj);
1812     uint64_t prev_val = cpu->cfg.mimpid;
1813     uint64_t value;
1814 
1815     if (!visit_type_uint64(v, name, &value, errp)) {
1816         return;
1817     }
1818 
1819     if (!dynamic_cpu && prev_val != value) {
1820         error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")",
1821                    object_get_typename(obj), prev_val);
1822         return;
1823     }
1824 
1825     cpu->cfg.mimpid = value;
1826 }
1827 
1828 static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name,
1829                            void *opaque, Error **errp)
1830 {
1831     uint64_t value = RISCV_CPU(obj)->cfg.mimpid;
1832 
1833     visit_type_uint64(v, name, &value, errp);
1834 }
1835 
1836 static void cpu_set_marchid(Object *obj, Visitor *v, const char *name,
1837                             void *opaque, Error **errp)
1838 {
1839     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
1840     RISCVCPU *cpu = RISCV_CPU(obj);
1841     uint64_t prev_val = cpu->cfg.marchid;
1842     uint64_t value, invalid_val;
1843     uint32_t mxlen = 0;
1844 
1845     if (!visit_type_uint64(v, name, &value, errp)) {
1846         return;
1847     }
1848 
1849     if (!dynamic_cpu && prev_val != value) {
1850         error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")",
1851                    object_get_typename(obj), prev_val);
1852         return;
1853     }
1854 
1855     switch (riscv_cpu_mxl(&cpu->env)) {
1856     case MXL_RV32:
1857         mxlen = 32;
1858         break;
1859     case MXL_RV64:
1860     case MXL_RV128:
1861         mxlen = 64;
1862         break;
1863     default:
1864         g_assert_not_reached();
1865     }
1866 
1867     invalid_val = 1LL << (mxlen - 1);
1868 
1869     if (value == invalid_val) {
1870         error_setg(errp, "Unable to set marchid with MSB (%u) bit set "
1871                          "and the remaining bits zero", mxlen);
1872         return;
1873     }
1874 
1875     cpu->cfg.marchid = value;
1876 }
1877 
1878 static void cpu_get_marchid(Object *obj, Visitor *v, const char *name,
1879                            void *opaque, Error **errp)
1880 {
1881     uint64_t value = RISCV_CPU(obj)->cfg.marchid;
1882 
1883     visit_type_uint64(v, name, &value, errp);
1884 }
1885 
1886 static void riscv_cpu_class_init(ObjectClass *c, void *data)
1887 {
1888     RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
1889     CPUClass *cc = CPU_CLASS(c);
1890     DeviceClass *dc = DEVICE_CLASS(c);
1891     ResettableClass *rc = RESETTABLE_CLASS(c);
1892 
1893     device_class_set_parent_realize(dc, riscv_cpu_realize,
1894                                     &mcc->parent_realize);
1895 
1896     resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL,
1897                                        &mcc->parent_phases);
1898 
1899     cc->class_by_name = riscv_cpu_class_by_name;
1900     cc->has_work = riscv_cpu_has_work;
1901     cc->mmu_index = riscv_cpu_mmu_index;
1902     cc->dump_state = riscv_cpu_dump_state;
1903     cc->set_pc = riscv_cpu_set_pc;
1904     cc->get_pc = riscv_cpu_get_pc;
1905     cc->gdb_read_register = riscv_cpu_gdb_read_register;
1906     cc->gdb_write_register = riscv_cpu_gdb_write_register;
1907     cc->gdb_num_core_regs = 33;
1908     cc->gdb_stop_before_watchpoint = true;
1909     cc->disas_set_info = riscv_cpu_disas_set_info;
1910 #ifndef CONFIG_USER_ONLY
1911     cc->sysemu_ops = &riscv_sysemu_ops;
1912     cc->get_arch_id = riscv_get_arch_id;
1913 #endif
1914     cc->gdb_arch_name = riscv_gdb_arch_name;
1915     cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml;
1916 
1917     object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid,
1918                               cpu_set_mvendorid, NULL, NULL);
1919 
1920     object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid,
1921                               cpu_set_mimpid, NULL, NULL);
1922 
1923     object_class_property_add(c, "marchid", "uint64", cpu_get_marchid,
1924                               cpu_set_marchid, NULL, NULL);
1925 
1926     device_class_set_props(dc, riscv_cpu_properties);
1927 }
1928 
1929 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
1930                                  int max_str_len)
1931 {
1932     const RISCVIsaExtData *edata;
1933     char *old = *isa_str;
1934     char *new = *isa_str;
1935 
1936     for (edata = isa_edata_arr; edata && edata->name; edata++) {
1937         if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
1938             new = g_strconcat(old, "_", edata->name, NULL);
1939             g_free(old);
1940             old = new;
1941         }
1942     }
1943 
1944     *isa_str = new;
1945 }
1946 
1947 char *riscv_isa_string(RISCVCPU *cpu)
1948 {
1949     int i;
1950     const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
1951     char *isa_str = g_new(char, maxlen);
1952     char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS);
1953     for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
1954         if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
1955             *p++ = qemu_tolower(riscv_single_letter_exts[i]);
1956         }
1957     }
1958     *p = '\0';
1959     if (!cpu->cfg.short_isa_string) {
1960         riscv_isa_string_ext(cpu, &isa_str, maxlen);
1961     }
1962     return isa_str;
1963 }
1964 
1965 #define DEFINE_CPU(type_name, initfn)      \
1966     {                                      \
1967         .name = type_name,                 \
1968         .parent = TYPE_RISCV_CPU,          \
1969         .instance_init = initfn            \
1970     }
1971 
1972 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \
1973     {                                         \
1974         .name = type_name,                    \
1975         .parent = TYPE_RISCV_DYNAMIC_CPU,     \
1976         .instance_init = initfn               \
1977     }
1978 
1979 #define DEFINE_VENDOR_CPU(type_name, initfn) \
1980     {                                        \
1981         .name = type_name,                   \
1982         .parent = TYPE_RISCV_VENDOR_CPU,     \
1983         .instance_init = initfn              \
1984     }
1985 
1986 #define DEFINE_BARE_CPU(type_name, initfn) \
1987     {                                      \
1988         .name = type_name,                 \
1989         .parent = TYPE_RISCV_BARE_CPU,     \
1990         .instance_init = initfn            \
1991     }
1992 
1993 #define DEFINE_PROFILE_CPU(type_name, initfn) \
1994     {                                         \
1995         .name = type_name,                    \
1996         .parent = TYPE_RISCV_BARE_CPU,        \
1997         .instance_init = initfn               \
1998     }
1999 
2000 static const TypeInfo riscv_cpu_type_infos[] = {
2001     {
2002         .name = TYPE_RISCV_CPU,
2003         .parent = TYPE_CPU,
2004         .instance_size = sizeof(RISCVCPU),
2005         .instance_align = __alignof(RISCVCPU),
2006         .instance_init = riscv_cpu_init,
2007         .instance_post_init = riscv_cpu_post_init,
2008         .abstract = true,
2009         .class_size = sizeof(RISCVCPUClass),
2010         .class_init = riscv_cpu_class_init,
2011     },
2012     {
2013         .name = TYPE_RISCV_DYNAMIC_CPU,
2014         .parent = TYPE_RISCV_CPU,
2015         .abstract = true,
2016     },
2017     {
2018         .name = TYPE_RISCV_VENDOR_CPU,
2019         .parent = TYPE_RISCV_CPU,
2020         .abstract = true,
2021     },
2022     {
2023         .name = TYPE_RISCV_BARE_CPU,
2024         .parent = TYPE_RISCV_CPU,
2025         .abstract = true,
2026     },
2027     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY,      riscv_any_cpu_init),
2028     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX,      riscv_max_cpu_init),
2029 #if defined(TARGET_RISCV32)
2030     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32,   rv32_base_cpu_init),
2031     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX,        rv32_ibex_cpu_init),
2032     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31,  rv32_sifive_e_cpu_init),
2033     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34,  rv32_imafcu_nommu_cpu_init),
2034     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34,  rv32_sifive_u_cpu_init),
2035 #elif defined(TARGET_RISCV64)
2036     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64,   rv64_base_cpu_init),
2037     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51,  rv64_sifive_e_cpu_init),
2038     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54,  rv64_sifive_u_cpu_init),
2039     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C,    rv64_sifive_u_cpu_init),
2040     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906,  rv64_thead_c906_cpu_init),
2041     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1,   rv64_veyron_v1_cpu_init),
2042     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128,  rv128_base_cpu_init),
2043     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, rv64i_bare_cpu_init),
2044     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, rva22u64_profile_cpu_init),
2045     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, rva22s64_profile_cpu_init),
2046 #endif
2047 };
2048 
2049 DEFINE_TYPES(riscv_cpu_type_infos)
2050