xref: /openbmc/qemu/target/riscv/cpu.c (revision 1fc0a58a982c821100a353edf2e5fca76effb1a2)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "cpu_vendorid.h"
26 #include "internals.h"
27 #include "exec/exec-all.h"
28 #include "qapi/error.h"
29 #include "qapi/visitor.h"
30 #include "qemu/error-report.h"
31 #include "hw/qdev-properties.h"
32 #include "hw/core/qdev-prop-internal.h"
33 #include "migration/vmstate.h"
34 #include "fpu/softfloat-helpers.h"
35 #include "sysemu/device_tree.h"
36 #include "sysemu/kvm.h"
37 #include "sysemu/tcg.h"
38 #include "kvm/kvm_riscv.h"
39 #include "tcg/tcg-cpu.h"
40 #include "tcg/tcg.h"
41 
42 /* RISC-V CPU definitions */
43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH";
44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
45                               RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0};
46 
47 /*
48  * From vector_helper.c
49  * Note that vector data is stored in host-endian 64-bit chunks,
50  * so addressing bytes needs a host-endian fixup.
51  */
52 #if HOST_BIG_ENDIAN
53 #define BYTE(x)   ((x) ^ 7)
54 #else
55 #define BYTE(x)   (x)
56 #endif
57 
riscv_cpu_is_32bit(RISCVCPU * cpu)58 bool riscv_cpu_is_32bit(RISCVCPU *cpu)
59 {
60     return riscv_cpu_mxl(&cpu->env) == MXL_RV32;
61 }
62 
63 /* Hash that stores general user set numeric options */
64 static GHashTable *general_user_opts;
65 
cpu_option_add_user_setting(const char * optname,uint32_t value)66 static void cpu_option_add_user_setting(const char *optname, uint32_t value)
67 {
68     g_hash_table_insert(general_user_opts, (gpointer)optname,
69                         GUINT_TO_POINTER(value));
70 }
71 
riscv_cpu_option_set(const char * optname)72 bool riscv_cpu_option_set(const char *optname)
73 {
74     return g_hash_table_contains(general_user_opts, optname);
75 }
76 
77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
78     {#_name, _min_ver, CPU_CFG_OFFSET(_prop)}
79 
80 /*
81  * Here are the ordering rules of extension naming defined by RISC-V
82  * specification :
83  * 1. All extensions should be separated from other multi-letter extensions
84  *    by an underscore.
85  * 2. The first letter following the 'Z' conventionally indicates the most
86  *    closely related alphabetical extension category, IMAFDQLCBKJTPVH.
87  *    If multiple 'Z' extensions are named, they should be ordered first
88  *    by category, then alphabetically within a category.
89  * 3. Standard supervisor-level extensions (starts with 'S') should be
90  *    listed after standard unprivileged extensions.  If multiple
91  *    supervisor-level extensions are listed, they should be ordered
92  *    alphabetically.
93  * 4. Non-standard extensions (starts with 'X') must be listed after all
94  *    standard extensions. They must be separated from other multi-letter
95  *    extensions by an underscore.
96  *
97  * Single letter extensions are checked in riscv_cpu_validate_misa_priv()
98  * instead.
99  */
100 const RISCVIsaExtData isa_edata_arr[] = {
101     ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b),
102     ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom),
103     ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop),
104     ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz),
105     ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11),
106     ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11),
107     ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11),
108     ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11),
109     ISA_EXT_DATA_ENTRY(zicfilp, PRIV_VERSION_1_12_0, ext_zicfilp),
110     ISA_EXT_DATA_ENTRY(zicfiss, PRIV_VERSION_1_13_0, ext_zicfiss),
111     ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
112     ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr),
113     ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr),
114     ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei),
115     ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl),
116     ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
117     ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm),
118     ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop),
119     ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
120     ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_12),
121     ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo),
122     ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha),
123     ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
124     ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b),
125     ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc),
126     ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
127     ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
128     ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
129     ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh),
130     ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin),
131     ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx),
132     ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx),
133     ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca),
134     ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb),
135     ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf),
136     ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd),
137     ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce),
138     ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop),
139     ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp),
140     ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt),
141     ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba),
142     ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb),
143     ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc),
144     ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb),
145     ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc),
146     ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx),
147     ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs),
148     ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk),
149     ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn),
150     ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd),
151     ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne),
152     ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh),
153     ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr),
154     ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks),
155     ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
156     ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
157     ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
158     ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso),
159     ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb),
160     ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
161     ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
162     ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x),
163     ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
164     ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
165     ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x),
166     ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin),
167     ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
168     ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
169     ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
170     ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb),
171     ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg),
172     ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn),
173     ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc),
174     ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
175     ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng),
176     ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
177     ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
178     ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks),
179     ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc),
180     ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed),
181     ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg),
182     ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
183     ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt),
184     ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
185     ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
186     ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
187     ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf),
188     ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
189     ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
190     ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
191     ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11),
192     ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
193     ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
194     ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
195     ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
196     ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
197     ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade),
198     ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
199     ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
200     ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
201     ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt),
202     ISA_EXT_DATA_ENTRY(svvptc, PRIV_VERSION_1_13_0, ext_svvptc),
203     ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba),
204     ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb),
205     ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs),
206     ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo),
207     ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov),
208     ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx),
209     ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv),
210     ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac),
211     ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx),
212     ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair),
213     ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync),
214     ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
215 
216     DEFINE_PROP_END_OF_LIST(),
217 };
218 
isa_ext_is_enabled(RISCVCPU * cpu,uint32_t ext_offset)219 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset)
220 {
221     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
222 
223     return *ext_enabled;
224 }
225 
isa_ext_update_enabled(RISCVCPU * cpu,uint32_t ext_offset,bool en)226 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en)
227 {
228     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
229 
230     *ext_enabled = en;
231 }
232 
riscv_cpu_is_vendor(Object * cpu_obj)233 bool riscv_cpu_is_vendor(Object *cpu_obj)
234 {
235     return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL;
236 }
237 
238 const char * const riscv_int_regnames[] = {
239     "x0/zero", "x1/ra",  "x2/sp",  "x3/gp",  "x4/tp",  "x5/t0",   "x6/t1",
240     "x7/t2",   "x8/s0",  "x9/s1",  "x10/a0", "x11/a1", "x12/a2",  "x13/a3",
241     "x14/a4",  "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3",  "x20/s4",
242     "x21/s5",  "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
243     "x28/t3",  "x29/t4", "x30/t5", "x31/t6"
244 };
245 
246 const char * const riscv_int_regnamesh[] = {
247     "x0h/zeroh", "x1h/rah",  "x2h/sph",   "x3h/gph",   "x4h/tph",  "x5h/t0h",
248     "x6h/t1h",   "x7h/t2h",  "x8h/s0h",   "x9h/s1h",   "x10h/a0h", "x11h/a1h",
249     "x12h/a2h",  "x13h/a3h", "x14h/a4h",  "x15h/a5h",  "x16h/a6h", "x17h/a7h",
250     "x18h/s2h",  "x19h/s3h", "x20h/s4h",  "x21h/s5h",  "x22h/s6h", "x23h/s7h",
251     "x24h/s8h",  "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
252     "x30h/t5h",  "x31h/t6h"
253 };
254 
255 const char * const riscv_fpr_regnames[] = {
256     "f0/ft0",   "f1/ft1",  "f2/ft2",   "f3/ft3",   "f4/ft4",  "f5/ft5",
257     "f6/ft6",   "f7/ft7",  "f8/fs0",   "f9/fs1",   "f10/fa0", "f11/fa1",
258     "f12/fa2",  "f13/fa3", "f14/fa4",  "f15/fa5",  "f16/fa6", "f17/fa7",
259     "f18/fs2",  "f19/fs3", "f20/fs4",  "f21/fs5",  "f22/fs6", "f23/fs7",
260     "f24/fs8",  "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
261     "f30/ft10", "f31/ft11"
262 };
263 
264 const char * const riscv_rvv_regnames[] = {
265   "v0",  "v1",  "v2",  "v3",  "v4",  "v5",  "v6",
266   "v7",  "v8",  "v9",  "v10", "v11", "v12", "v13",
267   "v14", "v15", "v16", "v17", "v18", "v19", "v20",
268   "v21", "v22", "v23", "v24", "v25", "v26", "v27",
269   "v28", "v29", "v30", "v31"
270 };
271 
272 static const char * const riscv_excp_names[] = {
273     "misaligned_fetch",
274     "fault_fetch",
275     "illegal_instruction",
276     "breakpoint",
277     "misaligned_load",
278     "fault_load",
279     "misaligned_store",
280     "fault_store",
281     "user_ecall",
282     "supervisor_ecall",
283     "hypervisor_ecall",
284     "machine_ecall",
285     "exec_page_fault",
286     "load_page_fault",
287     "reserved",
288     "store_page_fault",
289     "reserved",
290     "reserved",
291     "reserved",
292     "reserved",
293     "guest_exec_page_fault",
294     "guest_load_page_fault",
295     "reserved",
296     "guest_store_page_fault",
297 };
298 
299 static const char * const riscv_intr_names[] = {
300     "u_software",
301     "s_software",
302     "vs_software",
303     "m_software",
304     "u_timer",
305     "s_timer",
306     "vs_timer",
307     "m_timer",
308     "u_external",
309     "s_external",
310     "vs_external",
311     "m_external",
312     "reserved",
313     "reserved",
314     "reserved",
315     "reserved"
316 };
317 
riscv_cpu_get_trap_name(target_ulong cause,bool async)318 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
319 {
320     if (async) {
321         return (cause < ARRAY_SIZE(riscv_intr_names)) ?
322                riscv_intr_names[cause] : "(unknown)";
323     } else {
324         return (cause < ARRAY_SIZE(riscv_excp_names)) ?
325                riscv_excp_names[cause] : "(unknown)";
326     }
327 }
328 
riscv_cpu_set_misa_ext(CPURISCVState * env,uint32_t ext)329 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext)
330 {
331     env->misa_ext_mask = env->misa_ext = ext;
332 }
333 
riscv_cpu_max_xlen(RISCVCPUClass * mcc)334 int riscv_cpu_max_xlen(RISCVCPUClass *mcc)
335 {
336     return 16 << mcc->misa_mxl_max;
337 }
338 
339 #ifndef CONFIG_USER_ONLY
satp_mode_from_str(const char * satp_mode_str)340 static uint8_t satp_mode_from_str(const char *satp_mode_str)
341 {
342     if (!strncmp(satp_mode_str, "mbare", 5)) {
343         return VM_1_10_MBARE;
344     }
345 
346     if (!strncmp(satp_mode_str, "sv32", 4)) {
347         return VM_1_10_SV32;
348     }
349 
350     if (!strncmp(satp_mode_str, "sv39", 4)) {
351         return VM_1_10_SV39;
352     }
353 
354     if (!strncmp(satp_mode_str, "sv48", 4)) {
355         return VM_1_10_SV48;
356     }
357 
358     if (!strncmp(satp_mode_str, "sv57", 4)) {
359         return VM_1_10_SV57;
360     }
361 
362     if (!strncmp(satp_mode_str, "sv64", 4)) {
363         return VM_1_10_SV64;
364     }
365 
366     g_assert_not_reached();
367 }
368 
satp_mode_max_from_map(uint32_t map)369 uint8_t satp_mode_max_from_map(uint32_t map)
370 {
371     /*
372      * 'map = 0' will make us return (31 - 32), which C will
373      * happily overflow to UINT_MAX. There's no good result to
374      * return if 'map = 0' (e.g. returning 0 will be ambiguous
375      * with the result for 'map = 1').
376      *
377      * Assert out if map = 0. Callers will have to deal with
378      * it outside of this function.
379      */
380     g_assert(map > 0);
381 
382     /* map here has at least one bit set, so no problem with clz */
383     return 31 - __builtin_clz(map);
384 }
385 
satp_mode_str(uint8_t satp_mode,bool is_32_bit)386 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
387 {
388     if (is_32_bit) {
389         switch (satp_mode) {
390         case VM_1_10_SV32:
391             return "sv32";
392         case VM_1_10_MBARE:
393             return "none";
394         }
395     } else {
396         switch (satp_mode) {
397         case VM_1_10_SV64:
398             return "sv64";
399         case VM_1_10_SV57:
400             return "sv57";
401         case VM_1_10_SV48:
402             return "sv48";
403         case VM_1_10_SV39:
404             return "sv39";
405         case VM_1_10_MBARE:
406             return "none";
407         }
408     }
409 
410     g_assert_not_reached();
411 }
412 
set_satp_mode_max_supported(RISCVCPU * cpu,uint8_t satp_mode)413 static void set_satp_mode_max_supported(RISCVCPU *cpu,
414                                         uint8_t satp_mode)
415 {
416     bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
417     const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
418 
419     for (int i = 0; i <= satp_mode; ++i) {
420         if (valid_vm[i]) {
421             cpu->cfg.satp_mode.supported |= (1 << i);
422         }
423     }
424 }
425 
426 /* Set the satp mode to the max supported */
set_satp_mode_default_map(RISCVCPU * cpu)427 static void set_satp_mode_default_map(RISCVCPU *cpu)
428 {
429     /*
430      * Bare CPUs do not default to the max available.
431      * Users must set a valid satp_mode in the command
432      * line.
433      */
434     if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) {
435         warn_report("No satp mode set. Defaulting to 'bare'");
436         cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE);
437         return;
438     }
439 
440     cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
441 }
442 #endif
443 
riscv_max_cpu_init(Object * obj)444 static void riscv_max_cpu_init(Object *obj)
445 {
446     RISCVCPU *cpu = RISCV_CPU(obj);
447     CPURISCVState *env = &cpu->env;
448 
449     cpu->cfg.mmu = true;
450     cpu->cfg.pmp = true;
451 
452     env->priv_ver = PRIV_VERSION_LATEST;
453 #ifndef CONFIG_USER_ONLY
454     set_satp_mode_max_supported(RISCV_CPU(obj),
455         riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ?
456         VM_1_10_SV32 : VM_1_10_SV57);
457 #endif
458 }
459 
460 #if defined(TARGET_RISCV64)
rv64_base_cpu_init(Object * obj)461 static void rv64_base_cpu_init(Object *obj)
462 {
463     RISCVCPU *cpu = RISCV_CPU(obj);
464     CPURISCVState *env = &cpu->env;
465 
466     cpu->cfg.mmu = true;
467     cpu->cfg.pmp = true;
468 
469     /* Set latest version of privileged specification */
470     env->priv_ver = PRIV_VERSION_LATEST;
471 #ifndef CONFIG_USER_ONLY
472     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
473 #endif
474 }
475 
rv64_sifive_u_cpu_init(Object * obj)476 static void rv64_sifive_u_cpu_init(Object *obj)
477 {
478     RISCVCPU *cpu = RISCV_CPU(obj);
479     CPURISCVState *env = &cpu->env;
480     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
481     env->priv_ver = PRIV_VERSION_1_10_0;
482 #ifndef CONFIG_USER_ONLY
483     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39);
484 #endif
485 
486     /* inherited from parent obj via riscv_cpu_init() */
487     cpu->cfg.ext_zifencei = true;
488     cpu->cfg.ext_zicsr = true;
489     cpu->cfg.mmu = true;
490     cpu->cfg.pmp = true;
491 }
492 
rv64_sifive_e_cpu_init(Object * obj)493 static void rv64_sifive_e_cpu_init(Object *obj)
494 {
495     CPURISCVState *env = &RISCV_CPU(obj)->env;
496     RISCVCPU *cpu = RISCV_CPU(obj);
497 
498     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
499     env->priv_ver = PRIV_VERSION_1_10_0;
500 #ifndef CONFIG_USER_ONLY
501     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
502 #endif
503 
504     /* inherited from parent obj via riscv_cpu_init() */
505     cpu->cfg.ext_zifencei = true;
506     cpu->cfg.ext_zicsr = true;
507     cpu->cfg.pmp = true;
508 }
509 
rv64_thead_c906_cpu_init(Object * obj)510 static void rv64_thead_c906_cpu_init(Object *obj)
511 {
512     CPURISCVState *env = &RISCV_CPU(obj)->env;
513     RISCVCPU *cpu = RISCV_CPU(obj);
514 
515     riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU);
516     env->priv_ver = PRIV_VERSION_1_11_0;
517 
518     cpu->cfg.ext_zfa = true;
519     cpu->cfg.ext_zfh = true;
520     cpu->cfg.mmu = true;
521     cpu->cfg.ext_xtheadba = true;
522     cpu->cfg.ext_xtheadbb = true;
523     cpu->cfg.ext_xtheadbs = true;
524     cpu->cfg.ext_xtheadcmo = true;
525     cpu->cfg.ext_xtheadcondmov = true;
526     cpu->cfg.ext_xtheadfmemidx = true;
527     cpu->cfg.ext_xtheadmac = true;
528     cpu->cfg.ext_xtheadmemidx = true;
529     cpu->cfg.ext_xtheadmempair = true;
530     cpu->cfg.ext_xtheadsync = true;
531 
532     cpu->cfg.mvendorid = THEAD_VENDOR_ID;
533 #ifndef CONFIG_USER_ONLY
534     set_satp_mode_max_supported(cpu, VM_1_10_SV39);
535     th_register_custom_csrs(cpu);
536 #endif
537 
538     /* inherited from parent obj via riscv_cpu_init() */
539     cpu->cfg.pmp = true;
540 }
541 
rv64_veyron_v1_cpu_init(Object * obj)542 static void rv64_veyron_v1_cpu_init(Object *obj)
543 {
544     CPURISCVState *env = &RISCV_CPU(obj)->env;
545     RISCVCPU *cpu = RISCV_CPU(obj);
546 
547     riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH);
548     env->priv_ver = PRIV_VERSION_1_12_0;
549 
550     /* Enable ISA extensions */
551     cpu->cfg.mmu = true;
552     cpu->cfg.ext_zifencei = true;
553     cpu->cfg.ext_zicsr = true;
554     cpu->cfg.pmp = true;
555     cpu->cfg.ext_zicbom = true;
556     cpu->cfg.cbom_blocksize = 64;
557     cpu->cfg.cboz_blocksize = 64;
558     cpu->cfg.ext_zicboz = true;
559     cpu->cfg.ext_smaia = true;
560     cpu->cfg.ext_ssaia = true;
561     cpu->cfg.ext_sscofpmf = true;
562     cpu->cfg.ext_sstc = true;
563     cpu->cfg.ext_svinval = true;
564     cpu->cfg.ext_svnapot = true;
565     cpu->cfg.ext_svpbmt = true;
566     cpu->cfg.ext_smstateen = true;
567     cpu->cfg.ext_zba = true;
568     cpu->cfg.ext_zbb = true;
569     cpu->cfg.ext_zbc = true;
570     cpu->cfg.ext_zbs = true;
571     cpu->cfg.ext_XVentanaCondOps = true;
572 
573     cpu->cfg.mvendorid = VEYRON_V1_MVENDORID;
574     cpu->cfg.marchid = VEYRON_V1_MARCHID;
575     cpu->cfg.mimpid = VEYRON_V1_MIMPID;
576 
577 #ifndef CONFIG_USER_ONLY
578     set_satp_mode_max_supported(cpu, VM_1_10_SV48);
579 #endif
580 }
581 
582 #ifdef CONFIG_TCG
rv128_base_cpu_init(Object * obj)583 static void rv128_base_cpu_init(Object *obj)
584 {
585     RISCVCPU *cpu = RISCV_CPU(obj);
586     CPURISCVState *env = &cpu->env;
587 
588     if (qemu_tcg_mttcg_enabled()) {
589         /* Missing 128-bit aligned atomics */
590         error_report("128-bit RISC-V currently does not work with Multi "
591                      "Threaded TCG. Please use: -accel tcg,thread=single");
592         exit(EXIT_FAILURE);
593     }
594 
595     cpu->cfg.mmu = true;
596     cpu->cfg.pmp = true;
597 
598     /* Set latest version of privileged specification */
599     env->priv_ver = PRIV_VERSION_LATEST;
600 #ifndef CONFIG_USER_ONLY
601     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
602 #endif
603 }
604 #endif /* CONFIG_TCG */
605 
rv64i_bare_cpu_init(Object * obj)606 static void rv64i_bare_cpu_init(Object *obj)
607 {
608     CPURISCVState *env = &RISCV_CPU(obj)->env;
609     riscv_cpu_set_misa_ext(env, RVI);
610 }
611 
rv64e_bare_cpu_init(Object * obj)612 static void rv64e_bare_cpu_init(Object *obj)
613 {
614     CPURISCVState *env = &RISCV_CPU(obj)->env;
615     riscv_cpu_set_misa_ext(env, RVE);
616 }
617 
618 #endif /* !TARGET_RISCV64 */
619 
620 #if defined(TARGET_RISCV32) || \
621     (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY))
622 
rv32_base_cpu_init(Object * obj)623 static void rv32_base_cpu_init(Object *obj)
624 {
625     RISCVCPU *cpu = RISCV_CPU(obj);
626     CPURISCVState *env = &cpu->env;
627 
628     cpu->cfg.mmu = true;
629     cpu->cfg.pmp = true;
630 
631     /* Set latest version of privileged specification */
632     env->priv_ver = PRIV_VERSION_LATEST;
633 #ifndef CONFIG_USER_ONLY
634     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
635 #endif
636 }
637 
rv32_sifive_u_cpu_init(Object * obj)638 static void rv32_sifive_u_cpu_init(Object *obj)
639 {
640     RISCVCPU *cpu = RISCV_CPU(obj);
641     CPURISCVState *env = &cpu->env;
642     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
643     env->priv_ver = PRIV_VERSION_1_10_0;
644 #ifndef CONFIG_USER_ONLY
645     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
646 #endif
647 
648     /* inherited from parent obj via riscv_cpu_init() */
649     cpu->cfg.ext_zifencei = true;
650     cpu->cfg.ext_zicsr = true;
651     cpu->cfg.mmu = true;
652     cpu->cfg.pmp = true;
653 }
654 
rv32_sifive_e_cpu_init(Object * obj)655 static void rv32_sifive_e_cpu_init(Object *obj)
656 {
657     CPURISCVState *env = &RISCV_CPU(obj)->env;
658     RISCVCPU *cpu = RISCV_CPU(obj);
659 
660     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
661     env->priv_ver = PRIV_VERSION_1_10_0;
662 #ifndef CONFIG_USER_ONLY
663     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
664 #endif
665 
666     /* inherited from parent obj via riscv_cpu_init() */
667     cpu->cfg.ext_zifencei = true;
668     cpu->cfg.ext_zicsr = true;
669     cpu->cfg.pmp = true;
670 }
671 
rv32_ibex_cpu_init(Object * obj)672 static void rv32_ibex_cpu_init(Object *obj)
673 {
674     CPURISCVState *env = &RISCV_CPU(obj)->env;
675     RISCVCPU *cpu = RISCV_CPU(obj);
676 
677     riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU);
678     env->priv_ver = PRIV_VERSION_1_12_0;
679 #ifndef CONFIG_USER_ONLY
680     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
681 #endif
682     /* inherited from parent obj via riscv_cpu_init() */
683     cpu->cfg.ext_zifencei = true;
684     cpu->cfg.ext_zicsr = true;
685     cpu->cfg.pmp = true;
686     cpu->cfg.ext_smepmp = true;
687 
688     cpu->cfg.ext_zba = true;
689     cpu->cfg.ext_zbb = true;
690     cpu->cfg.ext_zbc = true;
691     cpu->cfg.ext_zbs = true;
692 }
693 
rv32_imafcu_nommu_cpu_init(Object * obj)694 static void rv32_imafcu_nommu_cpu_init(Object *obj)
695 {
696     CPURISCVState *env = &RISCV_CPU(obj)->env;
697     RISCVCPU *cpu = RISCV_CPU(obj);
698 
699     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU);
700     env->priv_ver = PRIV_VERSION_1_10_0;
701 #ifndef CONFIG_USER_ONLY
702     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
703 #endif
704 
705     /* inherited from parent obj via riscv_cpu_init() */
706     cpu->cfg.ext_zifencei = true;
707     cpu->cfg.ext_zicsr = true;
708     cpu->cfg.pmp = true;
709 }
710 
rv32i_bare_cpu_init(Object * obj)711 static void rv32i_bare_cpu_init(Object *obj)
712 {
713     CPURISCVState *env = &RISCV_CPU(obj)->env;
714     riscv_cpu_set_misa_ext(env, RVI);
715 }
716 
rv32e_bare_cpu_init(Object * obj)717 static void rv32e_bare_cpu_init(Object *obj)
718 {
719     CPURISCVState *env = &RISCV_CPU(obj)->env;
720     riscv_cpu_set_misa_ext(env, RVE);
721 }
722 #endif
723 
riscv_cpu_class_by_name(const char * cpu_model)724 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
725 {
726     ObjectClass *oc;
727     char *typename;
728     char **cpuname;
729 
730     cpuname = g_strsplit(cpu_model, ",", 1);
731     typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
732     oc = object_class_by_name(typename);
733     g_strfreev(cpuname);
734     g_free(typename);
735 
736     return oc;
737 }
738 
riscv_cpu_get_name(RISCVCPU * cpu)739 char *riscv_cpu_get_name(RISCVCPU *cpu)
740 {
741     RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu);
742     const char *typename = object_class_get_name(OBJECT_CLASS(rcc));
743 
744     g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX));
745 
746     return cpu_model_from_type(typename);
747 }
748 
riscv_cpu_dump_state(CPUState * cs,FILE * f,int flags)749 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
750 {
751     RISCVCPU *cpu = RISCV_CPU(cs);
752     CPURISCVState *env = &cpu->env;
753     int i, j;
754     uint8_t *p;
755 
756 #if !defined(CONFIG_USER_ONLY)
757     if (riscv_has_ext(env, RVH)) {
758         qemu_fprintf(f, " %s %d\n", "V      =  ", env->virt_enabled);
759     }
760 #endif
761     qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc      ", env->pc);
762 #ifndef CONFIG_USER_ONLY
763     {
764         static const int dump_csrs[] = {
765             CSR_MHARTID,
766             CSR_MSTATUS,
767             CSR_MSTATUSH,
768             /*
769              * CSR_SSTATUS is intentionally omitted here as its value
770              * can be figured out by looking at CSR_MSTATUS
771              */
772             CSR_HSTATUS,
773             CSR_VSSTATUS,
774             CSR_MIP,
775             CSR_MIE,
776             CSR_MIDELEG,
777             CSR_HIDELEG,
778             CSR_MEDELEG,
779             CSR_HEDELEG,
780             CSR_MTVEC,
781             CSR_STVEC,
782             CSR_VSTVEC,
783             CSR_MEPC,
784             CSR_SEPC,
785             CSR_VSEPC,
786             CSR_MCAUSE,
787             CSR_SCAUSE,
788             CSR_VSCAUSE,
789             CSR_MTVAL,
790             CSR_STVAL,
791             CSR_HTVAL,
792             CSR_MTVAL2,
793             CSR_MSCRATCH,
794             CSR_SSCRATCH,
795             CSR_SATP,
796             CSR_MMTE,
797             CSR_UPMBASE,
798             CSR_UPMMASK,
799             CSR_SPMBASE,
800             CSR_SPMMASK,
801             CSR_MPMBASE,
802             CSR_MPMMASK,
803         };
804 
805         for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
806             int csrno = dump_csrs[i];
807             target_ulong val = 0;
808             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
809 
810             /*
811              * Rely on the smode, hmode, etc, predicates within csr.c
812              * to do the filtering of the registers that are present.
813              */
814             if (res == RISCV_EXCP_NONE) {
815                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
816                              csr_ops[csrno].name, val);
817             }
818         }
819     }
820 #endif
821 
822     for (i = 0; i < 32; i++) {
823         qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
824                      riscv_int_regnames[i], env->gpr[i]);
825         if ((i & 3) == 3) {
826             qemu_fprintf(f, "\n");
827         }
828     }
829     if (flags & CPU_DUMP_FPU) {
830         target_ulong val = 0;
831         RISCVException res = riscv_csrrw_debug(env, CSR_FCSR, &val, 0, 0);
832         if (res == RISCV_EXCP_NONE) {
833             qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
834                     csr_ops[CSR_FCSR].name, val);
835         }
836         for (i = 0; i < 32; i++) {
837             qemu_fprintf(f, " %-8s %016" PRIx64,
838                          riscv_fpr_regnames[i], env->fpr[i]);
839             if ((i & 3) == 3) {
840                 qemu_fprintf(f, "\n");
841             }
842         }
843     }
844     if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) {
845         static const int dump_rvv_csrs[] = {
846                     CSR_VSTART,
847                     CSR_VXSAT,
848                     CSR_VXRM,
849                     CSR_VCSR,
850                     CSR_VL,
851                     CSR_VTYPE,
852                     CSR_VLENB,
853                 };
854         for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
855             int csrno = dump_rvv_csrs[i];
856             target_ulong val = 0;
857             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
858 
859             /*
860              * Rely on the smode, hmode, etc, predicates within csr.c
861              * to do the filtering of the registers that are present.
862              */
863             if (res == RISCV_EXCP_NONE) {
864                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
865                              csr_ops[csrno].name, val);
866             }
867         }
868         uint16_t vlenb = cpu->cfg.vlenb;
869 
870         for (i = 0; i < 32; i++) {
871             qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]);
872             p = (uint8_t *)env->vreg;
873             for (j = vlenb - 1 ; j >= 0; j--) {
874                 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j)));
875             }
876             qemu_fprintf(f, "\n");
877         }
878     }
879 }
880 
riscv_cpu_set_pc(CPUState * cs,vaddr value)881 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
882 {
883     RISCVCPU *cpu = RISCV_CPU(cs);
884     CPURISCVState *env = &cpu->env;
885 
886     if (env->xl == MXL_RV32) {
887         env->pc = (int32_t)value;
888     } else {
889         env->pc = value;
890     }
891 }
892 
riscv_cpu_get_pc(CPUState * cs)893 static vaddr riscv_cpu_get_pc(CPUState *cs)
894 {
895     RISCVCPU *cpu = RISCV_CPU(cs);
896     CPURISCVState *env = &cpu->env;
897 
898     /* Match cpu_get_tb_cpu_state. */
899     if (env->xl == MXL_RV32) {
900         return env->pc & UINT32_MAX;
901     }
902     return env->pc;
903 }
904 
riscv_cpu_has_work(CPUState * cs)905 bool riscv_cpu_has_work(CPUState *cs)
906 {
907 #ifndef CONFIG_USER_ONLY
908     RISCVCPU *cpu = RISCV_CPU(cs);
909     CPURISCVState *env = &cpu->env;
910     /*
911      * Definition of the WFI instruction requires it to ignore the privilege
912      * mode and delegation registers, but respect individual enables
913      */
914     return riscv_cpu_all_pending(env) != 0 ||
915         riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE ||
916         riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE;
917 #else
918     return true;
919 #endif
920 }
921 
riscv_cpu_mmu_index(CPUState * cs,bool ifetch)922 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch)
923 {
924     return riscv_env_mmu_index(cpu_env(cs), ifetch);
925 }
926 
riscv_cpu_reset_hold(Object * obj,ResetType type)927 static void riscv_cpu_reset_hold(Object *obj, ResetType type)
928 {
929 #ifndef CONFIG_USER_ONLY
930     uint8_t iprio;
931     int i, irq, rdzero;
932 #endif
933     CPUState *cs = CPU(obj);
934     RISCVCPU *cpu = RISCV_CPU(cs);
935     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
936     CPURISCVState *env = &cpu->env;
937 
938     if (mcc->parent_phases.hold) {
939         mcc->parent_phases.hold(obj, type);
940     }
941 #ifndef CONFIG_USER_ONLY
942     env->misa_mxl = mcc->misa_mxl_max;
943     env->priv = PRV_M;
944     env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
945     if (env->misa_mxl > MXL_RV32) {
946         /*
947          * The reset status of SXL/UXL is undefined, but mstatus is WARL
948          * and we must ensure that the value after init is valid for read.
949          */
950         env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
951         env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
952         if (riscv_has_ext(env, RVH)) {
953             env->vsstatus = set_field(env->vsstatus,
954                                       MSTATUS64_SXL, env->misa_mxl);
955             env->vsstatus = set_field(env->vsstatus,
956                                       MSTATUS64_UXL, env->misa_mxl);
957             env->mstatus_hs = set_field(env->mstatus_hs,
958                                         MSTATUS64_SXL, env->misa_mxl);
959             env->mstatus_hs = set_field(env->mstatus_hs,
960                                         MSTATUS64_UXL, env->misa_mxl);
961         }
962     }
963     env->mcause = 0;
964     env->miclaim = MIP_SGEIP;
965     env->pc = env->resetvec;
966     env->bins = 0;
967     env->two_stage_lookup = false;
968 
969     env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
970                    (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ?
971                     MENVCFG_ADUE : 0);
972     env->henvcfg = 0;
973 
974     /* Initialized default priorities of local interrupts. */
975     for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
976         iprio = riscv_cpu_default_priority(i);
977         env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
978         env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
979         env->hviprio[i] = 0;
980     }
981     i = 0;
982     while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
983         if (!rdzero) {
984             env->hviprio[irq] = env->miprio[irq];
985         }
986         i++;
987     }
988     /* mmte is supposed to have pm.current hardwired to 1 */
989     env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
990 
991     /*
992      * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor
993      * extension is enabled.
994      */
995     if (riscv_has_ext(env, RVH)) {
996         env->mideleg |= HS_MODE_INTERRUPTS;
997     }
998 
999     /*
1000      * Clear mseccfg and unlock all the PMP entries upon reset.
1001      * This is allowed as per the priv and smepmp specifications
1002      * and is needed to clear stale entries across reboots.
1003      */
1004     if (riscv_cpu_cfg(env)->ext_smepmp) {
1005         env->mseccfg = 0;
1006     }
1007 
1008     pmp_unlock_entries(env);
1009 #else
1010     env->priv = PRV_U;
1011     env->senvcfg = 0;
1012     env->menvcfg = 0;
1013 #endif
1014 
1015     /* on reset elp is clear */
1016     env->elp = false;
1017     /* on reset ssp is set to 0 */
1018     env->ssp = 0;
1019 
1020     env->xl = riscv_cpu_mxl(env);
1021     riscv_cpu_update_mask(env);
1022     cs->exception_index = RISCV_EXCP_NONE;
1023     env->load_res = -1;
1024     set_default_nan_mode(1, &env->fp_status);
1025     env->vill = true;
1026 
1027 #ifndef CONFIG_USER_ONLY
1028     if (cpu->cfg.debug) {
1029         riscv_trigger_reset_hold(env);
1030     }
1031 
1032     if (kvm_enabled()) {
1033         kvm_riscv_reset_vcpu(cpu);
1034     }
1035 #endif
1036 }
1037 
riscv_cpu_disas_set_info(CPUState * s,disassemble_info * info)1038 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
1039 {
1040     RISCVCPU *cpu = RISCV_CPU(s);
1041     CPURISCVState *env = &cpu->env;
1042     info->target_info = &cpu->cfg;
1043 
1044     switch (env->xl) {
1045     case MXL_RV32:
1046         info->print_insn = print_insn_riscv32;
1047         break;
1048     case MXL_RV64:
1049         info->print_insn = print_insn_riscv64;
1050         break;
1051     case MXL_RV128:
1052         info->print_insn = print_insn_riscv128;
1053         break;
1054     default:
1055         g_assert_not_reached();
1056     }
1057 }
1058 
1059 #ifndef CONFIG_USER_ONLY
riscv_cpu_satp_mode_finalize(RISCVCPU * cpu,Error ** errp)1060 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
1061 {
1062     bool rv32 = riscv_cpu_is_32bit(cpu);
1063     uint8_t satp_mode_map_max, satp_mode_supported_max;
1064 
1065     /* The CPU wants the OS to decide which satp mode to use */
1066     if (cpu->cfg.satp_mode.supported == 0) {
1067         return;
1068     }
1069 
1070     satp_mode_supported_max =
1071                     satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
1072 
1073     if (cpu->cfg.satp_mode.map == 0) {
1074         if (cpu->cfg.satp_mode.init == 0) {
1075             /* If unset by the user, we fallback to the default satp mode. */
1076             set_satp_mode_default_map(cpu);
1077         } else {
1078             /*
1079              * Find the lowest level that was disabled and then enable the
1080              * first valid level below which can be found in
1081              * valid_vm_1_10_32/64.
1082              */
1083             for (int i = 1; i < 16; ++i) {
1084                 if ((cpu->cfg.satp_mode.init & (1 << i)) &&
1085                     (cpu->cfg.satp_mode.supported & (1 << i))) {
1086                     for (int j = i - 1; j >= 0; --j) {
1087                         if (cpu->cfg.satp_mode.supported & (1 << j)) {
1088                             cpu->cfg.satp_mode.map |= (1 << j);
1089                             break;
1090                         }
1091                     }
1092                     break;
1093                 }
1094             }
1095         }
1096     }
1097 
1098     satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
1099 
1100     /* Make sure the user asked for a supported configuration (HW and qemu) */
1101     if (satp_mode_map_max > satp_mode_supported_max) {
1102         error_setg(errp, "satp_mode %s is higher than hw max capability %s",
1103                    satp_mode_str(satp_mode_map_max, rv32),
1104                    satp_mode_str(satp_mode_supported_max, rv32));
1105         return;
1106     }
1107 
1108     /*
1109      * Make sure the user did not ask for an invalid configuration as per
1110      * the specification.
1111      */
1112     if (!rv32) {
1113         for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1114             if (!(cpu->cfg.satp_mode.map & (1 << i)) &&
1115                 (cpu->cfg.satp_mode.init & (1 << i)) &&
1116                 (cpu->cfg.satp_mode.supported & (1 << i))) {
1117                 error_setg(errp, "cannot disable %s satp mode if %s "
1118                            "is enabled", satp_mode_str(i, false),
1119                            satp_mode_str(satp_mode_map_max, false));
1120                 return;
1121             }
1122         }
1123     }
1124 
1125     /* Finally expand the map so that all valid modes are set */
1126     for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1127         if (cpu->cfg.satp_mode.supported & (1 << i)) {
1128             cpu->cfg.satp_mode.map |= (1 << i);
1129         }
1130     }
1131 }
1132 #endif
1133 
riscv_cpu_finalize_features(RISCVCPU * cpu,Error ** errp)1134 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1135 {
1136     Error *local_err = NULL;
1137 
1138 #ifndef CONFIG_USER_ONLY
1139     riscv_cpu_satp_mode_finalize(cpu, &local_err);
1140     if (local_err != NULL) {
1141         error_propagate(errp, local_err);
1142         return;
1143     }
1144 #endif
1145 
1146     if (tcg_enabled()) {
1147         riscv_tcg_cpu_finalize_features(cpu, &local_err);
1148         if (local_err != NULL) {
1149             error_propagate(errp, local_err);
1150             return;
1151         }
1152         riscv_tcg_cpu_finalize_dynamic_decoder(cpu);
1153     } else if (kvm_enabled()) {
1154         riscv_kvm_cpu_finalize_features(cpu, &local_err);
1155         if (local_err != NULL) {
1156             error_propagate(errp, local_err);
1157             return;
1158         }
1159     }
1160 }
1161 
riscv_cpu_realize(DeviceState * dev,Error ** errp)1162 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
1163 {
1164     CPUState *cs = CPU(dev);
1165     RISCVCPU *cpu = RISCV_CPU(dev);
1166     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
1167     Error *local_err = NULL;
1168 
1169     cpu_exec_realizefn(cs, &local_err);
1170     if (local_err != NULL) {
1171         error_propagate(errp, local_err);
1172         return;
1173     }
1174 
1175     riscv_cpu_finalize_features(cpu, &local_err);
1176     if (local_err != NULL) {
1177         error_propagate(errp, local_err);
1178         return;
1179     }
1180 
1181     riscv_cpu_register_gdb_regs_for_features(cs);
1182 
1183 #ifndef CONFIG_USER_ONLY
1184     if (cpu->cfg.debug) {
1185         riscv_trigger_realize(&cpu->env);
1186     }
1187 #endif
1188 
1189     qemu_init_vcpu(cs);
1190     cpu_reset(cs);
1191 
1192     mcc->parent_realize(dev, errp);
1193 }
1194 
riscv_cpu_accelerator_compatible(RISCVCPU * cpu)1195 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu)
1196 {
1197     if (tcg_enabled()) {
1198         return riscv_cpu_tcg_compatible(cpu);
1199     }
1200 
1201     return true;
1202 }
1203 
1204 #ifndef CONFIG_USER_ONLY
cpu_riscv_get_satp(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1205 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
1206                                void *opaque, Error **errp)
1207 {
1208     RISCVSATPMap *satp_map = opaque;
1209     uint8_t satp = satp_mode_from_str(name);
1210     bool value;
1211 
1212     value = satp_map->map & (1 << satp);
1213 
1214     visit_type_bool(v, name, &value, errp);
1215 }
1216 
cpu_riscv_set_satp(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1217 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
1218                                void *opaque, Error **errp)
1219 {
1220     RISCVSATPMap *satp_map = opaque;
1221     uint8_t satp = satp_mode_from_str(name);
1222     bool value;
1223 
1224     if (!visit_type_bool(v, name, &value, errp)) {
1225         return;
1226     }
1227 
1228     satp_map->map = deposit32(satp_map->map, satp, 1, value);
1229     satp_map->init |= 1 << satp;
1230 }
1231 
riscv_add_satp_mode_properties(Object * obj)1232 void riscv_add_satp_mode_properties(Object *obj)
1233 {
1234     RISCVCPU *cpu = RISCV_CPU(obj);
1235 
1236     if (cpu->env.misa_mxl == MXL_RV32) {
1237         object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
1238                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1239     } else {
1240         object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
1241                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1242         object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
1243                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1244         object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
1245                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1246         object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
1247                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1248     }
1249 }
1250 
riscv_cpu_set_irq(void * opaque,int irq,int level)1251 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
1252 {
1253     RISCVCPU *cpu = RISCV_CPU(opaque);
1254     CPURISCVState *env = &cpu->env;
1255 
1256     if (irq < IRQ_LOCAL_MAX) {
1257         switch (irq) {
1258         case IRQ_U_SOFT:
1259         case IRQ_S_SOFT:
1260         case IRQ_VS_SOFT:
1261         case IRQ_M_SOFT:
1262         case IRQ_U_TIMER:
1263         case IRQ_S_TIMER:
1264         case IRQ_VS_TIMER:
1265         case IRQ_M_TIMER:
1266         case IRQ_U_EXT:
1267         case IRQ_VS_EXT:
1268         case IRQ_M_EXT:
1269             if (kvm_enabled()) {
1270                 kvm_riscv_set_irq(cpu, irq, level);
1271             } else {
1272                 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level));
1273             }
1274              break;
1275         case IRQ_S_EXT:
1276             if (kvm_enabled()) {
1277                 kvm_riscv_set_irq(cpu, irq, level);
1278             } else {
1279                 env->external_seip = level;
1280                 riscv_cpu_update_mip(env, 1 << irq,
1281                                      BOOL_TO_MASK(level | env->software_seip));
1282             }
1283             break;
1284         default:
1285             g_assert_not_reached();
1286         }
1287     } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
1288         /* Require H-extension for handling guest local interrupts */
1289         if (!riscv_has_ext(env, RVH)) {
1290             g_assert_not_reached();
1291         }
1292 
1293         /* Compute bit position in HGEIP CSR */
1294         irq = irq - IRQ_LOCAL_MAX + 1;
1295         if (env->geilen < irq) {
1296             g_assert_not_reached();
1297         }
1298 
1299         /* Update HGEIP CSR */
1300         env->hgeip &= ~((target_ulong)1 << irq);
1301         if (level) {
1302             env->hgeip |= (target_ulong)1 << irq;
1303         }
1304 
1305         /* Update mip.SGEIP bit */
1306         riscv_cpu_update_mip(env, MIP_SGEIP,
1307                              BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
1308     } else {
1309         g_assert_not_reached();
1310     }
1311 }
1312 #endif /* CONFIG_USER_ONLY */
1313 
riscv_cpu_is_dynamic(Object * cpu_obj)1314 static bool riscv_cpu_is_dynamic(Object *cpu_obj)
1315 {
1316     return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
1317 }
1318 
riscv_cpu_post_init(Object * obj)1319 static void riscv_cpu_post_init(Object *obj)
1320 {
1321     accel_cpu_instance_init(CPU(obj));
1322 }
1323 
riscv_cpu_init(Object * obj)1324 static void riscv_cpu_init(Object *obj)
1325 {
1326     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
1327     RISCVCPU *cpu = RISCV_CPU(obj);
1328     CPURISCVState *env = &cpu->env;
1329 
1330     env->misa_mxl = mcc->misa_mxl_max;
1331 
1332 #ifndef CONFIG_USER_ONLY
1333     qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
1334                       IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
1335 #endif /* CONFIG_USER_ONLY */
1336 
1337     general_user_opts = g_hash_table_new(g_str_hash, g_str_equal);
1338 
1339     /*
1340      * The timer and performance counters extensions were supported
1341      * in QEMU before they were added as discrete extensions in the
1342      * ISA. To keep compatibility we'll always default them to 'true'
1343      * for all CPUs. Each accelerator will decide what to do when
1344      * users disable them.
1345      */
1346     RISCV_CPU(obj)->cfg.ext_zicntr = true;
1347     RISCV_CPU(obj)->cfg.ext_zihpm = true;
1348 
1349     /* Default values for non-bool cpu properties */
1350     cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16);
1351     cpu->cfg.vlenb = 128 >> 3;
1352     cpu->cfg.elen = 64;
1353     cpu->cfg.cbom_blocksize = 64;
1354     cpu->cfg.cbop_blocksize = 64;
1355     cpu->cfg.cboz_blocksize = 64;
1356     cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1357 }
1358 
riscv_bare_cpu_init(Object * obj)1359 static void riscv_bare_cpu_init(Object *obj)
1360 {
1361     RISCVCPU *cpu = RISCV_CPU(obj);
1362 
1363     /*
1364      * Bare CPUs do not inherit the timer and performance
1365      * counters from the parent class (see riscv_cpu_init()
1366      * for info on why the parent enables them).
1367      *
1368      * Users have to explicitly enable these counters for
1369      * bare CPUs.
1370      */
1371     cpu->cfg.ext_zicntr = false;
1372     cpu->cfg.ext_zihpm = false;
1373 
1374     /* Set to QEMU's first supported priv version */
1375     cpu->env.priv_ver = PRIV_VERSION_1_10_0;
1376 
1377     /*
1378      * Support all available satp_mode settings. The default
1379      * value will be set to MBARE if the user doesn't set
1380      * satp_mode manually (see set_satp_mode_default()).
1381      */
1382 #ifndef CONFIG_USER_ONLY
1383     set_satp_mode_max_supported(cpu, VM_1_10_SV64);
1384 #endif
1385 }
1386 
1387 typedef struct misa_ext_info {
1388     const char *name;
1389     const char *description;
1390 } MISAExtInfo;
1391 
1392 #define MISA_INFO_IDX(_bit) \
1393     __builtin_ctz(_bit)
1394 
1395 #define MISA_EXT_INFO(_bit, _propname, _descr) \
1396     [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr}
1397 
1398 static const MISAExtInfo misa_ext_info_arr[] = {
1399     MISA_EXT_INFO(RVA, "a", "Atomic instructions"),
1400     MISA_EXT_INFO(RVC, "c", "Compressed instructions"),
1401     MISA_EXT_INFO(RVD, "d", "Double-precision float point"),
1402     MISA_EXT_INFO(RVF, "f", "Single-precision float point"),
1403     MISA_EXT_INFO(RVI, "i", "Base integer instruction set"),
1404     MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"),
1405     MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"),
1406     MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
1407     MISA_EXT_INFO(RVU, "u", "User-level instructions"),
1408     MISA_EXT_INFO(RVH, "h", "Hypervisor"),
1409     MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
1410     MISA_EXT_INFO(RVV, "v", "Vector operations"),
1411     MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
1412     MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)")
1413 };
1414 
riscv_cpu_validate_misa_mxl(RISCVCPUClass * mcc)1415 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc)
1416 {
1417     CPUClass *cc = CPU_CLASS(mcc);
1418 
1419     /* Validate that MISA_MXL is set properly. */
1420     switch (mcc->misa_mxl_max) {
1421 #ifdef TARGET_RISCV64
1422     case MXL_RV64:
1423     case MXL_RV128:
1424         cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
1425         break;
1426 #endif
1427     case MXL_RV32:
1428         cc->gdb_core_xml_file = "riscv-32bit-cpu.xml";
1429         break;
1430     default:
1431         g_assert_not_reached();
1432     }
1433 }
1434 
riscv_validate_misa_info_idx(uint32_t bit)1435 static int riscv_validate_misa_info_idx(uint32_t bit)
1436 {
1437     int idx;
1438 
1439     /*
1440      * Our lowest valid input (RVA) is 1 and
1441      * __builtin_ctz() is UB with zero.
1442      */
1443     g_assert(bit != 0);
1444     idx = MISA_INFO_IDX(bit);
1445 
1446     g_assert(idx < ARRAY_SIZE(misa_ext_info_arr));
1447     return idx;
1448 }
1449 
riscv_get_misa_ext_name(uint32_t bit)1450 const char *riscv_get_misa_ext_name(uint32_t bit)
1451 {
1452     int idx = riscv_validate_misa_info_idx(bit);
1453     const char *val = misa_ext_info_arr[idx].name;
1454 
1455     g_assert(val != NULL);
1456     return val;
1457 }
1458 
riscv_get_misa_ext_description(uint32_t bit)1459 const char *riscv_get_misa_ext_description(uint32_t bit)
1460 {
1461     int idx = riscv_validate_misa_info_idx(bit);
1462     const char *val = misa_ext_info_arr[idx].description;
1463 
1464     g_assert(val != NULL);
1465     return val;
1466 }
1467 
1468 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \
1469     {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
1470      .enabled = _defval}
1471 
1472 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
1473     /* Defaults for standard extensions */
1474     MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
1475     MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false),
1476     MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
1477     MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false),
1478     MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false),
1479     MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
1480     MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
1481     MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
1482     MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false),
1483     MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false),
1484     MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false),
1485     MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false),
1486     MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false),
1487     MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false),
1488     MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false),
1489     MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
1490     MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true),
1491     MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false),
1492     MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false),
1493     MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false),
1494     MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false),
1495     MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false),
1496     MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false),
1497     MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false),
1498     MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false),
1499     MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false),
1500     MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false),
1501     MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false),
1502     MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false),
1503     MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
1504 
1505     MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false),
1506     MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
1507     MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
1508     MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false),
1509     MULTI_EXT_CFG_BOOL("svade", ext_svade, false),
1510     MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
1511     MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
1512     MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false),
1513     MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false),
1514     MULTI_EXT_CFG_BOOL("svvptc", ext_svvptc, true),
1515 
1516     MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true),
1517     MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true),
1518 
1519     MULTI_EXT_CFG_BOOL("zba", ext_zba, true),
1520     MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true),
1521     MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true),
1522     MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false),
1523     MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false),
1524     MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false),
1525     MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true),
1526     MULTI_EXT_CFG_BOOL("zk", ext_zk, false),
1527     MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false),
1528     MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false),
1529     MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false),
1530     MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false),
1531     MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false),
1532     MULTI_EXT_CFG_BOOL("zks", ext_zks, false),
1533     MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false),
1534     MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false),
1535     MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false),
1536     MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false),
1537 
1538     MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false),
1539     MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false),
1540     MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false),
1541     MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false),
1542 
1543     MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true),
1544     MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true),
1545     MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true),
1546 
1547     MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false),
1548 
1549     MULTI_EXT_CFG_BOOL("zca", ext_zca, false),
1550     MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false),
1551     MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false),
1552     MULTI_EXT_CFG_BOOL("zce", ext_zce, false),
1553     MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false),
1554     MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false),
1555     MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false),
1556     MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false),
1557 
1558     /* Vector cryptography extensions */
1559     MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false),
1560     MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false),
1561     MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false),
1562     MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false),
1563     MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false),
1564     MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false),
1565     MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false),
1566     MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false),
1567     MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false),
1568     MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false),
1569     MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false),
1570     MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false),
1571     MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false),
1572     MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false),
1573     MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false),
1574     MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false),
1575 
1576     DEFINE_PROP_END_OF_LIST(),
1577 };
1578 
1579 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = {
1580     MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false),
1581     MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false),
1582     MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false),
1583     MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false),
1584     MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false),
1585     MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false),
1586     MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false),
1587     MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false),
1588     MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false),
1589     MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false),
1590     MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false),
1591     MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false),
1592 
1593     DEFINE_PROP_END_OF_LIST(),
1594 };
1595 
1596 /* These are experimental so mark with 'x-' */
1597 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
1598     DEFINE_PROP_END_OF_LIST(),
1599 };
1600 
1601 /*
1602  * 'Named features' is the name we give to extensions that we
1603  * don't want to expose to users. They are either immutable
1604  * (always enabled/disable) or they'll vary depending on
1605  * the resulting CPU state. They have riscv,isa strings
1606  * and priv_ver like regular extensions.
1607  */
1608 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
1609     MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true),
1610 
1611     DEFINE_PROP_END_OF_LIST(),
1612 };
1613 
1614 /* Deprecated entries marked for future removal */
1615 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = {
1616     MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true),
1617     MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true),
1618     MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true),
1619     MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true),
1620     MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true),
1621     MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true),
1622     MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false),
1623     MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false),
1624     MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false),
1625     MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false),
1626     MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false),
1627 
1628     DEFINE_PROP_END_OF_LIST(),
1629 };
1630 
cpu_set_prop_err(RISCVCPU * cpu,const char * propname,Error ** errp)1631 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname,
1632                              Error **errp)
1633 {
1634     g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1635     error_setg(errp, "CPU '%s' does not allow changing the value of '%s'",
1636                cpuname, propname);
1637 }
1638 
prop_pmu_num_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1639 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name,
1640                              void *opaque, Error **errp)
1641 {
1642     RISCVCPU *cpu = RISCV_CPU(obj);
1643     uint8_t pmu_num, curr_pmu_num;
1644     uint32_t pmu_mask;
1645 
1646     visit_type_uint8(v, name, &pmu_num, errp);
1647 
1648     curr_pmu_num = ctpop32(cpu->cfg.pmu_mask);
1649 
1650     if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) {
1651         cpu_set_prop_err(cpu, name, errp);
1652         error_append_hint(errp, "Current '%s' val: %u\n",
1653                           name, curr_pmu_num);
1654         return;
1655     }
1656 
1657     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1658         error_setg(errp, "Number of counters exceeds maximum available");
1659         return;
1660     }
1661 
1662     if (pmu_num == 0) {
1663         pmu_mask = 0;
1664     } else {
1665         pmu_mask = MAKE_64BIT_MASK(3, pmu_num);
1666     }
1667 
1668     warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\"");
1669     cpu->cfg.pmu_mask = pmu_mask;
1670     cpu_option_add_user_setting("pmu-mask", pmu_mask);
1671 }
1672 
prop_pmu_num_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1673 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name,
1674                              void *opaque, Error **errp)
1675 {
1676     RISCVCPU *cpu = RISCV_CPU(obj);
1677     uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask);
1678 
1679     visit_type_uint8(v, name, &pmu_num, errp);
1680 }
1681 
1682 static const PropertyInfo prop_pmu_num = {
1683     .name = "pmu-num",
1684     .get = prop_pmu_num_get,
1685     .set = prop_pmu_num_set,
1686 };
1687 
prop_pmu_mask_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1688 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name,
1689                              void *opaque, Error **errp)
1690 {
1691     RISCVCPU *cpu = RISCV_CPU(obj);
1692     uint32_t value;
1693     uint8_t pmu_num;
1694 
1695     visit_type_uint32(v, name, &value, errp);
1696 
1697     if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) {
1698         cpu_set_prop_err(cpu, name, errp);
1699         error_append_hint(errp, "Current '%s' val: %x\n",
1700                           name, cpu->cfg.pmu_mask);
1701         return;
1702     }
1703 
1704     pmu_num = ctpop32(value);
1705 
1706     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1707         error_setg(errp, "Number of counters exceeds maximum available");
1708         return;
1709     }
1710 
1711     cpu_option_add_user_setting(name, value);
1712     cpu->cfg.pmu_mask = value;
1713 }
1714 
prop_pmu_mask_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1715 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name,
1716                              void *opaque, Error **errp)
1717 {
1718     uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask;
1719 
1720     visit_type_uint8(v, name, &pmu_mask, errp);
1721 }
1722 
1723 static const PropertyInfo prop_pmu_mask = {
1724     .name = "pmu-mask",
1725     .get = prop_pmu_mask_get,
1726     .set = prop_pmu_mask_set,
1727 };
1728 
prop_mmu_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1729 static void prop_mmu_set(Object *obj, Visitor *v, const char *name,
1730                          void *opaque, Error **errp)
1731 {
1732     RISCVCPU *cpu = RISCV_CPU(obj);
1733     bool value;
1734 
1735     visit_type_bool(v, name, &value, errp);
1736 
1737     if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) {
1738         cpu_set_prop_err(cpu, "mmu", errp);
1739         return;
1740     }
1741 
1742     cpu_option_add_user_setting(name, value);
1743     cpu->cfg.mmu = value;
1744 }
1745 
prop_mmu_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1746 static void prop_mmu_get(Object *obj, Visitor *v, const char *name,
1747                          void *opaque, Error **errp)
1748 {
1749     bool value = RISCV_CPU(obj)->cfg.mmu;
1750 
1751     visit_type_bool(v, name, &value, errp);
1752 }
1753 
1754 static const PropertyInfo prop_mmu = {
1755     .name = "mmu",
1756     .get = prop_mmu_get,
1757     .set = prop_mmu_set,
1758 };
1759 
prop_pmp_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1760 static void prop_pmp_set(Object *obj, Visitor *v, const char *name,
1761                          void *opaque, Error **errp)
1762 {
1763     RISCVCPU *cpu = RISCV_CPU(obj);
1764     bool value;
1765 
1766     visit_type_bool(v, name, &value, errp);
1767 
1768     if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) {
1769         cpu_set_prop_err(cpu, name, errp);
1770         return;
1771     }
1772 
1773     cpu_option_add_user_setting(name, value);
1774     cpu->cfg.pmp = value;
1775 }
1776 
prop_pmp_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1777 static void prop_pmp_get(Object *obj, Visitor *v, const char *name,
1778                          void *opaque, Error **errp)
1779 {
1780     bool value = RISCV_CPU(obj)->cfg.pmp;
1781 
1782     visit_type_bool(v, name, &value, errp);
1783 }
1784 
1785 static const PropertyInfo prop_pmp = {
1786     .name = "pmp",
1787     .get = prop_pmp_get,
1788     .set = prop_pmp_set,
1789 };
1790 
priv_spec_from_str(const char * priv_spec_str)1791 static int priv_spec_from_str(const char *priv_spec_str)
1792 {
1793     int priv_version = -1;
1794 
1795     if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) {
1796         priv_version = PRIV_VERSION_1_13_0;
1797     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) {
1798         priv_version = PRIV_VERSION_1_12_0;
1799     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) {
1800         priv_version = PRIV_VERSION_1_11_0;
1801     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) {
1802         priv_version = PRIV_VERSION_1_10_0;
1803     }
1804 
1805     return priv_version;
1806 }
1807 
priv_spec_to_str(int priv_version)1808 const char *priv_spec_to_str(int priv_version)
1809 {
1810     switch (priv_version) {
1811     case PRIV_VERSION_1_10_0:
1812         return PRIV_VER_1_10_0_STR;
1813     case PRIV_VERSION_1_11_0:
1814         return PRIV_VER_1_11_0_STR;
1815     case PRIV_VERSION_1_12_0:
1816         return PRIV_VER_1_12_0_STR;
1817     case PRIV_VERSION_1_13_0:
1818         return PRIV_VER_1_13_0_STR;
1819     default:
1820         return NULL;
1821     }
1822 }
1823 
prop_priv_spec_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1824 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name,
1825                                void *opaque, Error **errp)
1826 {
1827     RISCVCPU *cpu = RISCV_CPU(obj);
1828     g_autofree char *value = NULL;
1829     int priv_version = -1;
1830 
1831     visit_type_str(v, name, &value, errp);
1832 
1833     priv_version = priv_spec_from_str(value);
1834     if (priv_version < 0) {
1835         error_setg(errp, "Unsupported privilege spec version '%s'", value);
1836         return;
1837     }
1838 
1839     if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) {
1840         cpu_set_prop_err(cpu, name, errp);
1841         error_append_hint(errp, "Current '%s' val: %s\n", name,
1842                           object_property_get_str(obj, name, NULL));
1843         return;
1844     }
1845 
1846     cpu_option_add_user_setting(name, priv_version);
1847     cpu->env.priv_ver = priv_version;
1848 }
1849 
prop_priv_spec_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1850 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name,
1851                                void *opaque, Error **errp)
1852 {
1853     RISCVCPU *cpu = RISCV_CPU(obj);
1854     const char *value = priv_spec_to_str(cpu->env.priv_ver);
1855 
1856     visit_type_str(v, name, (char **)&value, errp);
1857 }
1858 
1859 static const PropertyInfo prop_priv_spec = {
1860     .name = "priv_spec",
1861     .get = prop_priv_spec_get,
1862     .set = prop_priv_spec_set,
1863 };
1864 
prop_vext_spec_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1865 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name,
1866                                void *opaque, Error **errp)
1867 {
1868     RISCVCPU *cpu = RISCV_CPU(obj);
1869     g_autofree char *value = NULL;
1870 
1871     visit_type_str(v, name, &value, errp);
1872 
1873     if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) {
1874         error_setg(errp, "Unsupported vector spec version '%s'", value);
1875         return;
1876     }
1877 
1878     cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0);
1879     cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1880 }
1881 
prop_vext_spec_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1882 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name,
1883                                void *opaque, Error **errp)
1884 {
1885     const char *value = VEXT_VER_1_00_0_STR;
1886 
1887     visit_type_str(v, name, (char **)&value, errp);
1888 }
1889 
1890 static const PropertyInfo prop_vext_spec = {
1891     .name = "vext_spec",
1892     .get = prop_vext_spec_get,
1893     .set = prop_vext_spec_set,
1894 };
1895 
prop_vlen_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1896 static void prop_vlen_set(Object *obj, Visitor *v, const char *name,
1897                          void *opaque, Error **errp)
1898 {
1899     RISCVCPU *cpu = RISCV_CPU(obj);
1900     uint16_t cpu_vlen = cpu->cfg.vlenb << 3;
1901     uint16_t value;
1902 
1903     if (!visit_type_uint16(v, name, &value, errp)) {
1904         return;
1905     }
1906 
1907     if (!is_power_of_2(value)) {
1908         error_setg(errp, "Vector extension VLEN must be power of 2");
1909         return;
1910     }
1911 
1912     if (value != cpu_vlen && riscv_cpu_is_vendor(obj)) {
1913         cpu_set_prop_err(cpu, name, errp);
1914         error_append_hint(errp, "Current '%s' val: %u\n",
1915                           name, cpu_vlen);
1916         return;
1917     }
1918 
1919     cpu_option_add_user_setting(name, value);
1920     cpu->cfg.vlenb = value >> 3;
1921 }
1922 
prop_vlen_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1923 static void prop_vlen_get(Object *obj, Visitor *v, const char *name,
1924                          void *opaque, Error **errp)
1925 {
1926     uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3;
1927 
1928     visit_type_uint16(v, name, &value, errp);
1929 }
1930 
1931 static const PropertyInfo prop_vlen = {
1932     .name = "vlen",
1933     .get = prop_vlen_get,
1934     .set = prop_vlen_set,
1935 };
1936 
prop_elen_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1937 static void prop_elen_set(Object *obj, Visitor *v, const char *name,
1938                          void *opaque, Error **errp)
1939 {
1940     RISCVCPU *cpu = RISCV_CPU(obj);
1941     uint16_t value;
1942 
1943     if (!visit_type_uint16(v, name, &value, errp)) {
1944         return;
1945     }
1946 
1947     if (!is_power_of_2(value)) {
1948         error_setg(errp, "Vector extension ELEN must be power of 2");
1949         return;
1950     }
1951 
1952     if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) {
1953         cpu_set_prop_err(cpu, name, errp);
1954         error_append_hint(errp, "Current '%s' val: %u\n",
1955                           name, cpu->cfg.elen);
1956         return;
1957     }
1958 
1959     cpu_option_add_user_setting(name, value);
1960     cpu->cfg.elen = value;
1961 }
1962 
prop_elen_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1963 static void prop_elen_get(Object *obj, Visitor *v, const char *name,
1964                          void *opaque, Error **errp)
1965 {
1966     uint16_t value = RISCV_CPU(obj)->cfg.elen;
1967 
1968     visit_type_uint16(v, name, &value, errp);
1969 }
1970 
1971 static const PropertyInfo prop_elen = {
1972     .name = "elen",
1973     .get = prop_elen_get,
1974     .set = prop_elen_set,
1975 };
1976 
prop_cbom_blksize_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1977 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name,
1978                                   void *opaque, Error **errp)
1979 {
1980     RISCVCPU *cpu = RISCV_CPU(obj);
1981     uint16_t value;
1982 
1983     if (!visit_type_uint16(v, name, &value, errp)) {
1984         return;
1985     }
1986 
1987     if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) {
1988         cpu_set_prop_err(cpu, name, errp);
1989         error_append_hint(errp, "Current '%s' val: %u\n",
1990                           name, cpu->cfg.cbom_blocksize);
1991         return;
1992     }
1993 
1994     cpu_option_add_user_setting(name, value);
1995     cpu->cfg.cbom_blocksize = value;
1996 }
1997 
prop_cbom_blksize_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1998 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name,
1999                          void *opaque, Error **errp)
2000 {
2001     uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize;
2002 
2003     visit_type_uint16(v, name, &value, errp);
2004 }
2005 
2006 static const PropertyInfo prop_cbom_blksize = {
2007     .name = "cbom_blocksize",
2008     .get = prop_cbom_blksize_get,
2009     .set = prop_cbom_blksize_set,
2010 };
2011 
prop_cbop_blksize_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2012 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name,
2013                                   void *opaque, Error **errp)
2014 {
2015     RISCVCPU *cpu = RISCV_CPU(obj);
2016     uint16_t value;
2017 
2018     if (!visit_type_uint16(v, name, &value, errp)) {
2019         return;
2020     }
2021 
2022     if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) {
2023         cpu_set_prop_err(cpu, name, errp);
2024         error_append_hint(errp, "Current '%s' val: %u\n",
2025                           name, cpu->cfg.cbop_blocksize);
2026         return;
2027     }
2028 
2029     cpu_option_add_user_setting(name, value);
2030     cpu->cfg.cbop_blocksize = value;
2031 }
2032 
prop_cbop_blksize_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2033 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name,
2034                          void *opaque, Error **errp)
2035 {
2036     uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize;
2037 
2038     visit_type_uint16(v, name, &value, errp);
2039 }
2040 
2041 static const PropertyInfo prop_cbop_blksize = {
2042     .name = "cbop_blocksize",
2043     .get = prop_cbop_blksize_get,
2044     .set = prop_cbop_blksize_set,
2045 };
2046 
prop_cboz_blksize_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2047 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name,
2048                                   void *opaque, Error **errp)
2049 {
2050     RISCVCPU *cpu = RISCV_CPU(obj);
2051     uint16_t value;
2052 
2053     if (!visit_type_uint16(v, name, &value, errp)) {
2054         return;
2055     }
2056 
2057     if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) {
2058         cpu_set_prop_err(cpu, name, errp);
2059         error_append_hint(errp, "Current '%s' val: %u\n",
2060                           name, cpu->cfg.cboz_blocksize);
2061         return;
2062     }
2063 
2064     cpu_option_add_user_setting(name, value);
2065     cpu->cfg.cboz_blocksize = value;
2066 }
2067 
prop_cboz_blksize_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2068 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name,
2069                          void *opaque, Error **errp)
2070 {
2071     uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize;
2072 
2073     visit_type_uint16(v, name, &value, errp);
2074 }
2075 
2076 static const PropertyInfo prop_cboz_blksize = {
2077     .name = "cboz_blocksize",
2078     .get = prop_cboz_blksize_get,
2079     .set = prop_cboz_blksize_set,
2080 };
2081 
prop_mvendorid_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2082 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name,
2083                                void *opaque, Error **errp)
2084 {
2085     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2086     RISCVCPU *cpu = RISCV_CPU(obj);
2087     uint32_t prev_val = cpu->cfg.mvendorid;
2088     uint32_t value;
2089 
2090     if (!visit_type_uint32(v, name, &value, errp)) {
2091         return;
2092     }
2093 
2094     if (!dynamic_cpu && prev_val != value) {
2095         error_setg(errp, "Unable to change %s mvendorid (0x%x)",
2096                    object_get_typename(obj), prev_val);
2097         return;
2098     }
2099 
2100     cpu->cfg.mvendorid = value;
2101 }
2102 
prop_mvendorid_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2103 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name,
2104                                void *opaque, Error **errp)
2105 {
2106     uint32_t value = RISCV_CPU(obj)->cfg.mvendorid;
2107 
2108     visit_type_uint32(v, name, &value, errp);
2109 }
2110 
2111 static const PropertyInfo prop_mvendorid = {
2112     .name = "mvendorid",
2113     .get = prop_mvendorid_get,
2114     .set = prop_mvendorid_set,
2115 };
2116 
prop_mimpid_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2117 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name,
2118                             void *opaque, Error **errp)
2119 {
2120     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2121     RISCVCPU *cpu = RISCV_CPU(obj);
2122     uint64_t prev_val = cpu->cfg.mimpid;
2123     uint64_t value;
2124 
2125     if (!visit_type_uint64(v, name, &value, errp)) {
2126         return;
2127     }
2128 
2129     if (!dynamic_cpu && prev_val != value) {
2130         error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")",
2131                    object_get_typename(obj), prev_val);
2132         return;
2133     }
2134 
2135     cpu->cfg.mimpid = value;
2136 }
2137 
prop_mimpid_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2138 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name,
2139                             void *opaque, Error **errp)
2140 {
2141     uint64_t value = RISCV_CPU(obj)->cfg.mimpid;
2142 
2143     visit_type_uint64(v, name, &value, errp);
2144 }
2145 
2146 static const PropertyInfo prop_mimpid = {
2147     .name = "mimpid",
2148     .get = prop_mimpid_get,
2149     .set = prop_mimpid_set,
2150 };
2151 
prop_marchid_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2152 static void prop_marchid_set(Object *obj, Visitor *v, const char *name,
2153                              void *opaque, Error **errp)
2154 {
2155     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2156     RISCVCPU *cpu = RISCV_CPU(obj);
2157     uint64_t prev_val = cpu->cfg.marchid;
2158     uint64_t value, invalid_val;
2159     uint32_t mxlen = 0;
2160 
2161     if (!visit_type_uint64(v, name, &value, errp)) {
2162         return;
2163     }
2164 
2165     if (!dynamic_cpu && prev_val != value) {
2166         error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")",
2167                    object_get_typename(obj), prev_val);
2168         return;
2169     }
2170 
2171     switch (riscv_cpu_mxl(&cpu->env)) {
2172     case MXL_RV32:
2173         mxlen = 32;
2174         break;
2175     case MXL_RV64:
2176     case MXL_RV128:
2177         mxlen = 64;
2178         break;
2179     default:
2180         g_assert_not_reached();
2181     }
2182 
2183     invalid_val = 1LL << (mxlen - 1);
2184 
2185     if (value == invalid_val) {
2186         error_setg(errp, "Unable to set marchid with MSB (%u) bit set "
2187                          "and the remaining bits zero", mxlen);
2188         return;
2189     }
2190 
2191     cpu->cfg.marchid = value;
2192 }
2193 
prop_marchid_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2194 static void prop_marchid_get(Object *obj, Visitor *v, const char *name,
2195                              void *opaque, Error **errp)
2196 {
2197     uint64_t value = RISCV_CPU(obj)->cfg.marchid;
2198 
2199     visit_type_uint64(v, name, &value, errp);
2200 }
2201 
2202 static const PropertyInfo prop_marchid = {
2203     .name = "marchid",
2204     .get = prop_marchid_get,
2205     .set = prop_marchid_set,
2206 };
2207 
2208 /*
2209  * RVA22U64 defines some 'named features' that are cache
2210  * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa
2211  * and Zicclsm. They are always implemented in TCG and
2212  * doesn't need to be manually enabled by the profile.
2213  */
2214 static RISCVCPUProfile RVA22U64 = {
2215     .parent = NULL,
2216     .name = "rva22u64",
2217     .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU,
2218     .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
2219     .satp_mode = RISCV_PROFILE_ATTR_UNUSED,
2220     .ext_offsets = {
2221         CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause),
2222         CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb),
2223         CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin),
2224         CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr),
2225         CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom),
2226         CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz),
2227 
2228         /* mandatory named features for this profile */
2229         CPU_CFG_OFFSET(ext_zic64b),
2230 
2231         RISCV_PROFILE_EXT_LIST_END
2232     }
2233 };
2234 
2235 /*
2236  * As with RVA22U64, RVA22S64 also defines 'named features'.
2237  *
2238  * Cache related features that we consider enabled since we don't
2239  * implement cache: Ssccptr
2240  *
2241  * Other named features that we already implement: Sstvecd, Sstvala,
2242  * Sscounterenw
2243  *
2244  * The remaining features/extensions comes from RVA22U64.
2245  */
2246 static RISCVCPUProfile RVA22S64 = {
2247     .parent = &RVA22U64,
2248     .name = "rva22s64",
2249     .misa_ext = RVS,
2250     .priv_spec = PRIV_VERSION_1_12_0,
2251     .satp_mode = VM_1_10_SV39,
2252     .ext_offsets = {
2253         /* rva22s64 exts */
2254         CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt),
2255         CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade),
2256 
2257         RISCV_PROFILE_EXT_LIST_END
2258     }
2259 };
2260 
2261 RISCVCPUProfile *riscv_profiles[] = {
2262     &RVA22U64,
2263     &RVA22S64,
2264     NULL,
2265 };
2266 
2267 static RISCVCPUImpliedExtsRule RVA_IMPLIED = {
2268     .is_misa = true,
2269     .ext = RVA,
2270     .implied_multi_exts = {
2271         CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo),
2272 
2273         RISCV_IMPLIED_EXTS_RULE_END
2274     },
2275 };
2276 
2277 static RISCVCPUImpliedExtsRule RVD_IMPLIED = {
2278     .is_misa = true,
2279     .ext = RVD,
2280     .implied_misa_exts = RVF,
2281     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2282 };
2283 
2284 static RISCVCPUImpliedExtsRule RVF_IMPLIED = {
2285     .is_misa = true,
2286     .ext = RVF,
2287     .implied_multi_exts = {
2288         CPU_CFG_OFFSET(ext_zicsr),
2289 
2290         RISCV_IMPLIED_EXTS_RULE_END
2291     },
2292 };
2293 
2294 static RISCVCPUImpliedExtsRule RVM_IMPLIED = {
2295     .is_misa = true,
2296     .ext = RVM,
2297     .implied_multi_exts = {
2298         CPU_CFG_OFFSET(ext_zmmul),
2299 
2300         RISCV_IMPLIED_EXTS_RULE_END
2301     },
2302 };
2303 
2304 static RISCVCPUImpliedExtsRule RVV_IMPLIED = {
2305     .is_misa = true,
2306     .ext = RVV,
2307     .implied_multi_exts = {
2308         CPU_CFG_OFFSET(ext_zve64d),
2309 
2310         RISCV_IMPLIED_EXTS_RULE_END
2311     },
2312 };
2313 
2314 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = {
2315     .ext = CPU_CFG_OFFSET(ext_zcb),
2316     .implied_multi_exts = {
2317         CPU_CFG_OFFSET(ext_zca),
2318 
2319         RISCV_IMPLIED_EXTS_RULE_END
2320     },
2321 };
2322 
2323 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = {
2324     .ext = CPU_CFG_OFFSET(ext_zcd),
2325     .implied_misa_exts = RVD,
2326     .implied_multi_exts = {
2327         CPU_CFG_OFFSET(ext_zca),
2328 
2329         RISCV_IMPLIED_EXTS_RULE_END
2330     },
2331 };
2332 
2333 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = {
2334     .ext = CPU_CFG_OFFSET(ext_zce),
2335     .implied_multi_exts = {
2336         CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp),
2337         CPU_CFG_OFFSET(ext_zcmt),
2338 
2339         RISCV_IMPLIED_EXTS_RULE_END
2340     },
2341 };
2342 
2343 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = {
2344     .ext = CPU_CFG_OFFSET(ext_zcf),
2345     .implied_misa_exts = RVF,
2346     .implied_multi_exts = {
2347         CPU_CFG_OFFSET(ext_zca),
2348 
2349         RISCV_IMPLIED_EXTS_RULE_END
2350     },
2351 };
2352 
2353 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = {
2354     .ext = CPU_CFG_OFFSET(ext_zcmp),
2355     .implied_multi_exts = {
2356         CPU_CFG_OFFSET(ext_zca),
2357 
2358         RISCV_IMPLIED_EXTS_RULE_END
2359     },
2360 };
2361 
2362 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = {
2363     .ext = CPU_CFG_OFFSET(ext_zcmt),
2364     .implied_multi_exts = {
2365         CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr),
2366 
2367         RISCV_IMPLIED_EXTS_RULE_END
2368     },
2369 };
2370 
2371 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = {
2372     .ext = CPU_CFG_OFFSET(ext_zdinx),
2373     .implied_multi_exts = {
2374         CPU_CFG_OFFSET(ext_zfinx),
2375 
2376         RISCV_IMPLIED_EXTS_RULE_END
2377     },
2378 };
2379 
2380 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = {
2381     .ext = CPU_CFG_OFFSET(ext_zfa),
2382     .implied_misa_exts = RVF,
2383     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2384 };
2385 
2386 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = {
2387     .ext = CPU_CFG_OFFSET(ext_zfbfmin),
2388     .implied_misa_exts = RVF,
2389     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2390 };
2391 
2392 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = {
2393     .ext = CPU_CFG_OFFSET(ext_zfh),
2394     .implied_multi_exts = {
2395         CPU_CFG_OFFSET(ext_zfhmin),
2396 
2397         RISCV_IMPLIED_EXTS_RULE_END
2398     },
2399 };
2400 
2401 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = {
2402     .ext = CPU_CFG_OFFSET(ext_zfhmin),
2403     .implied_misa_exts = RVF,
2404     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2405 };
2406 
2407 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = {
2408     .ext = CPU_CFG_OFFSET(ext_zfinx),
2409     .implied_multi_exts = {
2410         CPU_CFG_OFFSET(ext_zicsr),
2411 
2412         RISCV_IMPLIED_EXTS_RULE_END
2413     },
2414 };
2415 
2416 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = {
2417     .ext = CPU_CFG_OFFSET(ext_zhinx),
2418     .implied_multi_exts = {
2419         CPU_CFG_OFFSET(ext_zhinxmin),
2420 
2421         RISCV_IMPLIED_EXTS_RULE_END
2422     },
2423 };
2424 
2425 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = {
2426     .ext = CPU_CFG_OFFSET(ext_zhinxmin),
2427     .implied_multi_exts = {
2428         CPU_CFG_OFFSET(ext_zfinx),
2429 
2430         RISCV_IMPLIED_EXTS_RULE_END
2431     },
2432 };
2433 
2434 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = {
2435     .ext = CPU_CFG_OFFSET(ext_zicntr),
2436     .implied_multi_exts = {
2437         CPU_CFG_OFFSET(ext_zicsr),
2438 
2439         RISCV_IMPLIED_EXTS_RULE_END
2440     },
2441 };
2442 
2443 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = {
2444     .ext = CPU_CFG_OFFSET(ext_zihpm),
2445     .implied_multi_exts = {
2446         CPU_CFG_OFFSET(ext_zicsr),
2447 
2448         RISCV_IMPLIED_EXTS_RULE_END
2449     },
2450 };
2451 
2452 static RISCVCPUImpliedExtsRule ZK_IMPLIED = {
2453     .ext = CPU_CFG_OFFSET(ext_zk),
2454     .implied_multi_exts = {
2455         CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr),
2456         CPU_CFG_OFFSET(ext_zkt),
2457 
2458         RISCV_IMPLIED_EXTS_RULE_END
2459     },
2460 };
2461 
2462 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = {
2463     .ext = CPU_CFG_OFFSET(ext_zkn),
2464     .implied_multi_exts = {
2465         CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
2466         CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne),
2467         CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh),
2468 
2469         RISCV_IMPLIED_EXTS_RULE_END
2470     },
2471 };
2472 
2473 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = {
2474     .ext = CPU_CFG_OFFSET(ext_zks),
2475     .implied_multi_exts = {
2476         CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
2477         CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed),
2478         CPU_CFG_OFFSET(ext_zksh),
2479 
2480         RISCV_IMPLIED_EXTS_RULE_END
2481     },
2482 };
2483 
2484 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = {
2485     .ext = CPU_CFG_OFFSET(ext_zvbb),
2486     .implied_multi_exts = {
2487         CPU_CFG_OFFSET(ext_zvkb),
2488 
2489         RISCV_IMPLIED_EXTS_RULE_END
2490     },
2491 };
2492 
2493 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = {
2494     .ext = CPU_CFG_OFFSET(ext_zve32f),
2495     .implied_misa_exts = RVF,
2496     .implied_multi_exts = {
2497         CPU_CFG_OFFSET(ext_zve32x),
2498 
2499         RISCV_IMPLIED_EXTS_RULE_END
2500     },
2501 };
2502 
2503 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = {
2504     .ext = CPU_CFG_OFFSET(ext_zve32x),
2505     .implied_multi_exts = {
2506         CPU_CFG_OFFSET(ext_zicsr),
2507 
2508         RISCV_IMPLIED_EXTS_RULE_END
2509     },
2510 };
2511 
2512 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = {
2513     .ext = CPU_CFG_OFFSET(ext_zve64d),
2514     .implied_misa_exts = RVD,
2515     .implied_multi_exts = {
2516         CPU_CFG_OFFSET(ext_zve64f),
2517 
2518         RISCV_IMPLIED_EXTS_RULE_END
2519     },
2520 };
2521 
2522 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = {
2523     .ext = CPU_CFG_OFFSET(ext_zve64f),
2524     .implied_misa_exts = RVF,
2525     .implied_multi_exts = {
2526         CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x),
2527 
2528         RISCV_IMPLIED_EXTS_RULE_END
2529     },
2530 };
2531 
2532 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = {
2533     .ext = CPU_CFG_OFFSET(ext_zve64x),
2534     .implied_multi_exts = {
2535         CPU_CFG_OFFSET(ext_zve32x),
2536 
2537         RISCV_IMPLIED_EXTS_RULE_END
2538     },
2539 };
2540 
2541 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = {
2542     .ext = CPU_CFG_OFFSET(ext_zvfbfmin),
2543     .implied_multi_exts = {
2544         CPU_CFG_OFFSET(ext_zve32f),
2545 
2546         RISCV_IMPLIED_EXTS_RULE_END
2547     },
2548 };
2549 
2550 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = {
2551     .ext = CPU_CFG_OFFSET(ext_zvfbfwma),
2552     .implied_multi_exts = {
2553         CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin),
2554 
2555         RISCV_IMPLIED_EXTS_RULE_END
2556     },
2557 };
2558 
2559 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = {
2560     .ext = CPU_CFG_OFFSET(ext_zvfh),
2561     .implied_multi_exts = {
2562         CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin),
2563 
2564         RISCV_IMPLIED_EXTS_RULE_END
2565     },
2566 };
2567 
2568 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = {
2569     .ext = CPU_CFG_OFFSET(ext_zvfhmin),
2570     .implied_multi_exts = {
2571         CPU_CFG_OFFSET(ext_zve32f),
2572 
2573         RISCV_IMPLIED_EXTS_RULE_END
2574     },
2575 };
2576 
2577 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = {
2578     .ext = CPU_CFG_OFFSET(ext_zvkn),
2579     .implied_multi_exts = {
2580         CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb),
2581         CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
2582 
2583         RISCV_IMPLIED_EXTS_RULE_END
2584     },
2585 };
2586 
2587 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = {
2588     .ext = CPU_CFG_OFFSET(ext_zvknc),
2589     .implied_multi_exts = {
2590         CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc),
2591 
2592         RISCV_IMPLIED_EXTS_RULE_END
2593     },
2594 };
2595 
2596 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = {
2597     .ext = CPU_CFG_OFFSET(ext_zvkng),
2598     .implied_multi_exts = {
2599         CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg),
2600 
2601         RISCV_IMPLIED_EXTS_RULE_END
2602     },
2603 };
2604 
2605 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = {
2606     .ext = CPU_CFG_OFFSET(ext_zvknhb),
2607     .implied_multi_exts = {
2608         CPU_CFG_OFFSET(ext_zve64x),
2609 
2610         RISCV_IMPLIED_EXTS_RULE_END
2611     },
2612 };
2613 
2614 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = {
2615     .ext = CPU_CFG_OFFSET(ext_zvks),
2616     .implied_multi_exts = {
2617         CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh),
2618         CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
2619 
2620         RISCV_IMPLIED_EXTS_RULE_END
2621     },
2622 };
2623 
2624 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = {
2625     .ext = CPU_CFG_OFFSET(ext_zvksc),
2626     .implied_multi_exts = {
2627         CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc),
2628 
2629         RISCV_IMPLIED_EXTS_RULE_END
2630     },
2631 };
2632 
2633 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = {
2634     .ext = CPU_CFG_OFFSET(ext_zvksg),
2635     .implied_multi_exts = {
2636         CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg),
2637 
2638         RISCV_IMPLIED_EXTS_RULE_END
2639     },
2640 };
2641 
2642 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = {
2643     &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED,
2644     &RVM_IMPLIED, &RVV_IMPLIED, NULL
2645 };
2646 
2647 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = {
2648     &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED,
2649     &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED,
2650     &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED,
2651     &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED,
2652     &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED,
2653     &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED,
2654     &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED,
2655     &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED,
2656     &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED,
2657     &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED,
2658     &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED,
2659     &ZVKS_IMPLIED,  &ZVKSC_IMPLIED, &ZVKSG_IMPLIED,
2660     NULL
2661 };
2662 
2663 static Property riscv_cpu_properties[] = {
2664     DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
2665 
2666     {.name = "pmu-mask", .info = &prop_pmu_mask},
2667     {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */
2668 
2669     {.name = "mmu", .info = &prop_mmu},
2670     {.name = "pmp", .info = &prop_pmp},
2671 
2672     {.name = "priv_spec", .info = &prop_priv_spec},
2673     {.name = "vext_spec", .info = &prop_vext_spec},
2674 
2675     {.name = "vlen", .info = &prop_vlen},
2676     {.name = "elen", .info = &prop_elen},
2677 
2678     {.name = "cbom_blocksize", .info = &prop_cbom_blksize},
2679     {.name = "cbop_blocksize", .info = &prop_cbop_blksize},
2680     {.name = "cboz_blocksize", .info = &prop_cboz_blksize},
2681 
2682      {.name = "mvendorid", .info = &prop_mvendorid},
2683      {.name = "mimpid", .info = &prop_mimpid},
2684      {.name = "marchid", .info = &prop_marchid},
2685 
2686 #ifndef CONFIG_USER_ONLY
2687     DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
2688 #endif
2689 
2690     DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
2691 
2692     DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
2693     DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
2694     DEFINE_PROP_BOOL("rvv_vl_half_avl", RISCVCPU, cfg.rvv_vl_half_avl, false),
2695 
2696     /*
2697      * write_misa() is marked as experimental for now so mark
2698      * it with -x and default to 'false'.
2699      */
2700     DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
2701     DEFINE_PROP_END_OF_LIST(),
2702 };
2703 
2704 #if defined(TARGET_RISCV64)
rva22u64_profile_cpu_init(Object * obj)2705 static void rva22u64_profile_cpu_init(Object *obj)
2706 {
2707     rv64i_bare_cpu_init(obj);
2708 
2709     RVA22U64.enabled = true;
2710 }
2711 
rva22s64_profile_cpu_init(Object * obj)2712 static void rva22s64_profile_cpu_init(Object *obj)
2713 {
2714     rv64i_bare_cpu_init(obj);
2715 
2716     RVA22S64.enabled = true;
2717 }
2718 #endif
2719 
riscv_gdb_arch_name(CPUState * cs)2720 static const gchar *riscv_gdb_arch_name(CPUState *cs)
2721 {
2722     RISCVCPU *cpu = RISCV_CPU(cs);
2723     CPURISCVState *env = &cpu->env;
2724 
2725     switch (riscv_cpu_mxl(env)) {
2726     case MXL_RV32:
2727         return "riscv:rv32";
2728     case MXL_RV64:
2729     case MXL_RV128:
2730         return "riscv:rv64";
2731     default:
2732         g_assert_not_reached();
2733     }
2734 }
2735 
2736 #ifndef CONFIG_USER_ONLY
riscv_get_arch_id(CPUState * cs)2737 static int64_t riscv_get_arch_id(CPUState *cs)
2738 {
2739     RISCVCPU *cpu = RISCV_CPU(cs);
2740 
2741     return cpu->env.mhartid;
2742 }
2743 
2744 #include "hw/core/sysemu-cpu-ops.h"
2745 
2746 static const struct SysemuCPUOps riscv_sysemu_ops = {
2747     .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
2748     .write_elf64_note = riscv_cpu_write_elf64_note,
2749     .write_elf32_note = riscv_cpu_write_elf32_note,
2750     .legacy_vmsd = &vmstate_riscv_cpu,
2751 };
2752 #endif
2753 
riscv_cpu_common_class_init(ObjectClass * c,void * data)2754 static void riscv_cpu_common_class_init(ObjectClass *c, void *data)
2755 {
2756     RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2757     CPUClass *cc = CPU_CLASS(c);
2758     DeviceClass *dc = DEVICE_CLASS(c);
2759     ResettableClass *rc = RESETTABLE_CLASS(c);
2760 
2761     device_class_set_parent_realize(dc, riscv_cpu_realize,
2762                                     &mcc->parent_realize);
2763 
2764     resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL,
2765                                        &mcc->parent_phases);
2766 
2767     cc->class_by_name = riscv_cpu_class_by_name;
2768     cc->has_work = riscv_cpu_has_work;
2769     cc->mmu_index = riscv_cpu_mmu_index;
2770     cc->dump_state = riscv_cpu_dump_state;
2771     cc->set_pc = riscv_cpu_set_pc;
2772     cc->get_pc = riscv_cpu_get_pc;
2773     cc->gdb_read_register = riscv_cpu_gdb_read_register;
2774     cc->gdb_write_register = riscv_cpu_gdb_write_register;
2775     cc->gdb_stop_before_watchpoint = true;
2776     cc->disas_set_info = riscv_cpu_disas_set_info;
2777 #ifndef CONFIG_USER_ONLY
2778     cc->sysemu_ops = &riscv_sysemu_ops;
2779     cc->get_arch_id = riscv_get_arch_id;
2780 #endif
2781     cc->gdb_arch_name = riscv_gdb_arch_name;
2782 
2783     device_class_set_props(dc, riscv_cpu_properties);
2784 }
2785 
riscv_cpu_class_init(ObjectClass * c,void * data)2786 static void riscv_cpu_class_init(ObjectClass *c, void *data)
2787 {
2788     RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2789 
2790     mcc->misa_mxl_max = (uint32_t)(uintptr_t)data;
2791     riscv_cpu_validate_misa_mxl(mcc);
2792 }
2793 
riscv_isa_string_ext(RISCVCPU * cpu,char ** isa_str,int max_str_len)2794 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
2795                                  int max_str_len)
2796 {
2797     const RISCVIsaExtData *edata;
2798     char *old = *isa_str;
2799     char *new = *isa_str;
2800 
2801     for (edata = isa_edata_arr; edata && edata->name; edata++) {
2802         if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2803             new = g_strconcat(old, "_", edata->name, NULL);
2804             g_free(old);
2805             old = new;
2806         }
2807     }
2808 
2809     *isa_str = new;
2810 }
2811 
riscv_isa_string(RISCVCPU * cpu)2812 char *riscv_isa_string(RISCVCPU *cpu)
2813 {
2814     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
2815     int i;
2816     const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
2817     char *isa_str = g_new(char, maxlen);
2818     int xlen = riscv_cpu_max_xlen(mcc);
2819     char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen);
2820 
2821     for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2822         if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2823             *p++ = qemu_tolower(riscv_single_letter_exts[i]);
2824         }
2825     }
2826     *p = '\0';
2827     if (!cpu->cfg.short_isa_string) {
2828         riscv_isa_string_ext(cpu, &isa_str, maxlen);
2829     }
2830     return isa_str;
2831 }
2832 
2833 #ifndef CONFIG_USER_ONLY
riscv_isa_extensions_list(RISCVCPU * cpu,int * count)2834 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count)
2835 {
2836     int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr);
2837     char **extensions = g_new(char *, maxlen);
2838 
2839     for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2840         if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2841             extensions[*count] = g_new(char, 2);
2842             snprintf(extensions[*count], 2, "%c",
2843                      qemu_tolower(riscv_single_letter_exts[i]));
2844             (*count)++;
2845         }
2846     }
2847 
2848     for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) {
2849         if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2850             extensions[*count] = g_strdup(edata->name);
2851             (*count)++;
2852         }
2853     }
2854 
2855     return extensions;
2856 }
2857 
riscv_isa_write_fdt(RISCVCPU * cpu,void * fdt,char * nodename)2858 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename)
2859 {
2860     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
2861     const size_t maxlen = sizeof("rv128i");
2862     g_autofree char *isa_base = g_new(char, maxlen);
2863     g_autofree char *riscv_isa;
2864     char **isa_extensions;
2865     int count = 0;
2866     int xlen = riscv_cpu_max_xlen(mcc);
2867 
2868     riscv_isa = riscv_isa_string(cpu);
2869     qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa);
2870 
2871     snprintf(isa_base, maxlen, "rv%di", xlen);
2872     qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base);
2873 
2874     isa_extensions = riscv_isa_extensions_list(cpu, &count);
2875     qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions",
2876                                   isa_extensions, count);
2877 
2878     for (int i = 0; i < count; i++) {
2879         g_free(isa_extensions[i]);
2880     }
2881 
2882     g_free(isa_extensions);
2883 }
2884 #endif
2885 
2886 #define DEFINE_CPU(type_name, misa_mxl_max, initfn)         \
2887     {                                                       \
2888         .name = (type_name),                                \
2889         .parent = TYPE_RISCV_CPU,                           \
2890         .instance_init = (initfn),                          \
2891         .class_init = riscv_cpu_class_init,                 \
2892         .class_data = (void *)(misa_mxl_max)                \
2893     }
2894 
2895 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \
2896     {                                                       \
2897         .name = (type_name),                                \
2898         .parent = TYPE_RISCV_DYNAMIC_CPU,                   \
2899         .instance_init = (initfn),                          \
2900         .class_init = riscv_cpu_class_init,                 \
2901         .class_data = (void *)(misa_mxl_max)                \
2902     }
2903 
2904 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn)  \
2905     {                                                       \
2906         .name = (type_name),                                \
2907         .parent = TYPE_RISCV_VENDOR_CPU,                    \
2908         .instance_init = (initfn),                          \
2909         .class_init = riscv_cpu_class_init,                 \
2910         .class_data = (void *)(misa_mxl_max)                \
2911     }
2912 
2913 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn)    \
2914     {                                                       \
2915         .name = (type_name),                                \
2916         .parent = TYPE_RISCV_BARE_CPU,                      \
2917         .instance_init = (initfn),                          \
2918         .class_init = riscv_cpu_class_init,                 \
2919         .class_data = (void *)(misa_mxl_max)                \
2920     }
2921 
2922 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \
2923     {                                                       \
2924         .name = (type_name),                                \
2925         .parent = TYPE_RISCV_BARE_CPU,                      \
2926         .instance_init = (initfn),                          \
2927         .class_init = riscv_cpu_class_init,                 \
2928         .class_data = (void *)(misa_mxl_max)                \
2929     }
2930 
2931 static const TypeInfo riscv_cpu_type_infos[] = {
2932     {
2933         .name = TYPE_RISCV_CPU,
2934         .parent = TYPE_CPU,
2935         .instance_size = sizeof(RISCVCPU),
2936         .instance_align = __alignof(RISCVCPU),
2937         .instance_init = riscv_cpu_init,
2938         .instance_post_init = riscv_cpu_post_init,
2939         .abstract = true,
2940         .class_size = sizeof(RISCVCPUClass),
2941         .class_init = riscv_cpu_common_class_init,
2942     },
2943     {
2944         .name = TYPE_RISCV_DYNAMIC_CPU,
2945         .parent = TYPE_RISCV_CPU,
2946         .abstract = true,
2947     },
2948     {
2949         .name = TYPE_RISCV_VENDOR_CPU,
2950         .parent = TYPE_RISCV_CPU,
2951         .abstract = true,
2952     },
2953     {
2954         .name = TYPE_RISCV_BARE_CPU,
2955         .parent = TYPE_RISCV_CPU,
2956         .instance_init = riscv_bare_cpu_init,
2957         .abstract = true,
2958     },
2959 #if defined(TARGET_RISCV32)
2960     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX,       MXL_RV32,  riscv_max_cpu_init),
2961 #elif defined(TARGET_RISCV64)
2962     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX,       MXL_RV64,  riscv_max_cpu_init),
2963 #endif
2964 
2965 #if defined(TARGET_RISCV32) || \
2966     (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY))
2967     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32,    MXL_RV32,  rv32_base_cpu_init),
2968     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX,       MXL_RV32,  rv32_ibex_cpu_init),
2969     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32,  rv32_sifive_e_cpu_init),
2970     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32,  rv32_imafcu_nommu_cpu_init),
2971     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32,  rv32_sifive_u_cpu_init),
2972     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I,        MXL_RV32,  rv32i_bare_cpu_init),
2973     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E,        MXL_RV32,  rv32e_bare_cpu_init),
2974 #endif
2975 
2976 #if (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY))
2977     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX32,     MXL_RV32,  riscv_max_cpu_init),
2978 #endif
2979 
2980 #if defined(TARGET_RISCV64)
2981     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64,    MXL_RV64,  rv64_base_cpu_init),
2982     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64,  rv64_sifive_e_cpu_init),
2983     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64,  rv64_sifive_u_cpu_init),
2984     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C,   MXL_RV64,  rv64_sifive_u_cpu_init),
2985     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64,  rv64_thead_c906_cpu_init),
2986     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1,  MXL_RV64,  rv64_veyron_v1_cpu_init),
2987 #ifdef CONFIG_TCG
2988     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128,   MXL_RV128, rv128_base_cpu_init),
2989 #endif /* CONFIG_TCG */
2990     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I,        MXL_RV64,  rv64i_bare_cpu_init),
2991     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E,        MXL_RV64,  rv64e_bare_cpu_init),
2992     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64,  MXL_RV64,  rva22u64_profile_cpu_init),
2993     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64,  MXL_RV64,  rva22s64_profile_cpu_init),
2994 #endif /* TARGET_RISCV64 */
2995 };
2996 
2997 DEFINE_TYPES(riscv_cpu_type_infos)
2998