1 /*
2 * QEMU RISC-V CPU
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "cpu_vendorid.h"
26 #include "internals.h"
27 #include "exec/exec-all.h"
28 #include "qapi/error.h"
29 #include "qapi/visitor.h"
30 #include "qemu/error-report.h"
31 #include "hw/qdev-properties.h"
32 #include "hw/core/qdev-prop-internal.h"
33 #include "migration/vmstate.h"
34 #include "fpu/softfloat-helpers.h"
35 #include "sysemu/device_tree.h"
36 #include "sysemu/kvm.h"
37 #include "sysemu/tcg.h"
38 #include "kvm/kvm_riscv.h"
39 #include "tcg/tcg-cpu.h"
40 #include "tcg/tcg.h"
41
42 /* RISC-V CPU definitions */
43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH";
44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
45 RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0};
46
47 /*
48 * From vector_helper.c
49 * Note that vector data is stored in host-endian 64-bit chunks,
50 * so addressing bytes needs a host-endian fixup.
51 */
52 #if HOST_BIG_ENDIAN
53 #define BYTE(x) ((x) ^ 7)
54 #else
55 #define BYTE(x) (x)
56 #endif
57
riscv_cpu_is_32bit(RISCVCPU * cpu)58 bool riscv_cpu_is_32bit(RISCVCPU *cpu)
59 {
60 return riscv_cpu_mxl(&cpu->env) == MXL_RV32;
61 }
62
63 /* Hash that stores general user set numeric options */
64 static GHashTable *general_user_opts;
65
cpu_option_add_user_setting(const char * optname,uint32_t value)66 static void cpu_option_add_user_setting(const char *optname, uint32_t value)
67 {
68 g_hash_table_insert(general_user_opts, (gpointer)optname,
69 GUINT_TO_POINTER(value));
70 }
71
riscv_cpu_option_set(const char * optname)72 bool riscv_cpu_option_set(const char *optname)
73 {
74 return g_hash_table_contains(general_user_opts, optname);
75 }
76
77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
78 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)}
79
80 /*
81 * Here are the ordering rules of extension naming defined by RISC-V
82 * specification :
83 * 1. All extensions should be separated from other multi-letter extensions
84 * by an underscore.
85 * 2. The first letter following the 'Z' conventionally indicates the most
86 * closely related alphabetical extension category, IMAFDQLCBKJTPVH.
87 * If multiple 'Z' extensions are named, they should be ordered first
88 * by category, then alphabetically within a category.
89 * 3. Standard supervisor-level extensions (starts with 'S') should be
90 * listed after standard unprivileged extensions. If multiple
91 * supervisor-level extensions are listed, they should be ordered
92 * alphabetically.
93 * 4. Non-standard extensions (starts with 'X') must be listed after all
94 * standard extensions. They must be separated from other multi-letter
95 * extensions by an underscore.
96 *
97 * Single letter extensions are checked in riscv_cpu_validate_misa_priv()
98 * instead.
99 */
100 const RISCVIsaExtData isa_edata_arr[] = {
101 ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b),
102 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom),
103 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop),
104 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz),
105 ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11),
106 ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11),
107 ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11),
108 ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11),
109 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
110 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr),
111 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr),
112 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei),
113 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl),
114 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
115 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm),
116 ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop),
117 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
118 ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_11),
119 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo),
120 ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha),
121 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
122 ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b),
123 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc),
124 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
125 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
126 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
127 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh),
128 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin),
129 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx),
130 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx),
131 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca),
132 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb),
133 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf),
134 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd),
135 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce),
136 ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop),
137 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp),
138 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt),
139 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba),
140 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb),
141 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc),
142 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb),
143 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc),
144 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx),
145 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs),
146 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk),
147 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn),
148 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd),
149 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne),
150 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh),
151 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr),
152 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks),
153 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
154 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
155 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
156 ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso),
157 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb),
158 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
159 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
160 ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x),
161 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
162 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
163 ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x),
164 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin),
165 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
166 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
167 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
168 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb),
169 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg),
170 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn),
171 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc),
172 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
173 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng),
174 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
175 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
176 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks),
177 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc),
178 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed),
179 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg),
180 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
181 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt),
182 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
183 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
184 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
185 ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf),
186 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
187 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
188 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
189 ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11),
190 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
191 ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
192 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
193 ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
194 ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
195 ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade),
196 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
197 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
198 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
199 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt),
200 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba),
201 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb),
202 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs),
203 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo),
204 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov),
205 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx),
206 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv),
207 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac),
208 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx),
209 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair),
210 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync),
211 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
212
213 DEFINE_PROP_END_OF_LIST(),
214 };
215
isa_ext_is_enabled(RISCVCPU * cpu,uint32_t ext_offset)216 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset)
217 {
218 bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
219
220 return *ext_enabled;
221 }
222
isa_ext_update_enabled(RISCVCPU * cpu,uint32_t ext_offset,bool en)223 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en)
224 {
225 bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
226
227 *ext_enabled = en;
228 }
229
riscv_cpu_is_vendor(Object * cpu_obj)230 bool riscv_cpu_is_vendor(Object *cpu_obj)
231 {
232 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL;
233 }
234
235 const char * const riscv_int_regnames[] = {
236 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1",
237 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3",
238 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4",
239 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
240 "x28/t3", "x29/t4", "x30/t5", "x31/t6"
241 };
242
243 const char * const riscv_int_regnamesh[] = {
244 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h",
245 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h",
246 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h",
247 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h",
248 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
249 "x30h/t5h", "x31h/t6h"
250 };
251
252 const char * const riscv_fpr_regnames[] = {
253 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5",
254 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1",
255 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7",
256 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7",
257 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
258 "f30/ft10", "f31/ft11"
259 };
260
261 const char * const riscv_rvv_regnames[] = {
262 "v0", "v1", "v2", "v3", "v4", "v5", "v6",
263 "v7", "v8", "v9", "v10", "v11", "v12", "v13",
264 "v14", "v15", "v16", "v17", "v18", "v19", "v20",
265 "v21", "v22", "v23", "v24", "v25", "v26", "v27",
266 "v28", "v29", "v30", "v31"
267 };
268
269 static const char * const riscv_excp_names[] = {
270 "misaligned_fetch",
271 "fault_fetch",
272 "illegal_instruction",
273 "breakpoint",
274 "misaligned_load",
275 "fault_load",
276 "misaligned_store",
277 "fault_store",
278 "user_ecall",
279 "supervisor_ecall",
280 "hypervisor_ecall",
281 "machine_ecall",
282 "exec_page_fault",
283 "load_page_fault",
284 "reserved",
285 "store_page_fault",
286 "reserved",
287 "reserved",
288 "reserved",
289 "reserved",
290 "guest_exec_page_fault",
291 "guest_load_page_fault",
292 "reserved",
293 "guest_store_page_fault",
294 };
295
296 static const char * const riscv_intr_names[] = {
297 "u_software",
298 "s_software",
299 "vs_software",
300 "m_software",
301 "u_timer",
302 "s_timer",
303 "vs_timer",
304 "m_timer",
305 "u_external",
306 "s_external",
307 "vs_external",
308 "m_external",
309 "reserved",
310 "reserved",
311 "reserved",
312 "reserved"
313 };
314
riscv_cpu_get_trap_name(target_ulong cause,bool async)315 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
316 {
317 if (async) {
318 return (cause < ARRAY_SIZE(riscv_intr_names)) ?
319 riscv_intr_names[cause] : "(unknown)";
320 } else {
321 return (cause < ARRAY_SIZE(riscv_excp_names)) ?
322 riscv_excp_names[cause] : "(unknown)";
323 }
324 }
325
riscv_cpu_set_misa_ext(CPURISCVState * env,uint32_t ext)326 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext)
327 {
328 env->misa_ext_mask = env->misa_ext = ext;
329 }
330
riscv_cpu_max_xlen(RISCVCPUClass * mcc)331 int riscv_cpu_max_xlen(RISCVCPUClass *mcc)
332 {
333 return 16 << mcc->misa_mxl_max;
334 }
335
336 #ifndef CONFIG_USER_ONLY
satp_mode_from_str(const char * satp_mode_str)337 static uint8_t satp_mode_from_str(const char *satp_mode_str)
338 {
339 if (!strncmp(satp_mode_str, "mbare", 5)) {
340 return VM_1_10_MBARE;
341 }
342
343 if (!strncmp(satp_mode_str, "sv32", 4)) {
344 return VM_1_10_SV32;
345 }
346
347 if (!strncmp(satp_mode_str, "sv39", 4)) {
348 return VM_1_10_SV39;
349 }
350
351 if (!strncmp(satp_mode_str, "sv48", 4)) {
352 return VM_1_10_SV48;
353 }
354
355 if (!strncmp(satp_mode_str, "sv57", 4)) {
356 return VM_1_10_SV57;
357 }
358
359 if (!strncmp(satp_mode_str, "sv64", 4)) {
360 return VM_1_10_SV64;
361 }
362
363 g_assert_not_reached();
364 }
365
satp_mode_max_from_map(uint32_t map)366 uint8_t satp_mode_max_from_map(uint32_t map)
367 {
368 /*
369 * 'map = 0' will make us return (31 - 32), which C will
370 * happily overflow to UINT_MAX. There's no good result to
371 * return if 'map = 0' (e.g. returning 0 will be ambiguous
372 * with the result for 'map = 1').
373 *
374 * Assert out if map = 0. Callers will have to deal with
375 * it outside of this function.
376 */
377 g_assert(map > 0);
378
379 /* map here has at least one bit set, so no problem with clz */
380 return 31 - __builtin_clz(map);
381 }
382
satp_mode_str(uint8_t satp_mode,bool is_32_bit)383 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
384 {
385 if (is_32_bit) {
386 switch (satp_mode) {
387 case VM_1_10_SV32:
388 return "sv32";
389 case VM_1_10_MBARE:
390 return "none";
391 }
392 } else {
393 switch (satp_mode) {
394 case VM_1_10_SV64:
395 return "sv64";
396 case VM_1_10_SV57:
397 return "sv57";
398 case VM_1_10_SV48:
399 return "sv48";
400 case VM_1_10_SV39:
401 return "sv39";
402 case VM_1_10_MBARE:
403 return "none";
404 }
405 }
406
407 g_assert_not_reached();
408 }
409
set_satp_mode_max_supported(RISCVCPU * cpu,uint8_t satp_mode)410 static void set_satp_mode_max_supported(RISCVCPU *cpu,
411 uint8_t satp_mode)
412 {
413 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
414 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
415
416 for (int i = 0; i <= satp_mode; ++i) {
417 if (valid_vm[i]) {
418 cpu->cfg.satp_mode.supported |= (1 << i);
419 }
420 }
421 }
422
423 /* Set the satp mode to the max supported */
set_satp_mode_default_map(RISCVCPU * cpu)424 static void set_satp_mode_default_map(RISCVCPU *cpu)
425 {
426 /*
427 * Bare CPUs do not default to the max available.
428 * Users must set a valid satp_mode in the command
429 * line.
430 */
431 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) {
432 warn_report("No satp mode set. Defaulting to 'bare'");
433 cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE);
434 return;
435 }
436
437 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
438 }
439 #endif
440
riscv_any_cpu_init(Object * obj)441 static void riscv_any_cpu_init(Object *obj)
442 {
443 RISCVCPU *cpu = RISCV_CPU(obj);
444 CPURISCVState *env = &cpu->env;
445 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
446
447 #ifndef CONFIG_USER_ONLY
448 set_satp_mode_max_supported(RISCV_CPU(obj),
449 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ?
450 VM_1_10_SV32 : VM_1_10_SV57);
451 #endif
452
453 env->priv_ver = PRIV_VERSION_LATEST;
454
455 /* inherited from parent obj via riscv_cpu_init() */
456 cpu->cfg.ext_zifencei = true;
457 cpu->cfg.ext_zicsr = true;
458 cpu->cfg.mmu = true;
459 cpu->cfg.pmp = true;
460 }
461
riscv_max_cpu_init(Object * obj)462 static void riscv_max_cpu_init(Object *obj)
463 {
464 RISCVCPU *cpu = RISCV_CPU(obj);
465 CPURISCVState *env = &cpu->env;
466
467 cpu->cfg.mmu = true;
468 cpu->cfg.pmp = true;
469
470 env->priv_ver = PRIV_VERSION_LATEST;
471 #ifndef CONFIG_USER_ONLY
472 #ifdef TARGET_RISCV32
473 set_satp_mode_max_supported(cpu, VM_1_10_SV32);
474 #else
475 set_satp_mode_max_supported(cpu, VM_1_10_SV57);
476 #endif
477 #endif
478 }
479
480 #if defined(TARGET_RISCV64)
rv64_base_cpu_init(Object * obj)481 static void rv64_base_cpu_init(Object *obj)
482 {
483 RISCVCPU *cpu = RISCV_CPU(obj);
484 CPURISCVState *env = &cpu->env;
485
486 cpu->cfg.mmu = true;
487 cpu->cfg.pmp = true;
488
489 /* Set latest version of privileged specification */
490 env->priv_ver = PRIV_VERSION_LATEST;
491 #ifndef CONFIG_USER_ONLY
492 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
493 #endif
494 }
495
rv64_sifive_u_cpu_init(Object * obj)496 static void rv64_sifive_u_cpu_init(Object *obj)
497 {
498 RISCVCPU *cpu = RISCV_CPU(obj);
499 CPURISCVState *env = &cpu->env;
500 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
501 env->priv_ver = PRIV_VERSION_1_10_0;
502 #ifndef CONFIG_USER_ONLY
503 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39);
504 #endif
505
506 /* inherited from parent obj via riscv_cpu_init() */
507 cpu->cfg.ext_zifencei = true;
508 cpu->cfg.ext_zicsr = true;
509 cpu->cfg.mmu = true;
510 cpu->cfg.pmp = true;
511 }
512
rv64_sifive_e_cpu_init(Object * obj)513 static void rv64_sifive_e_cpu_init(Object *obj)
514 {
515 CPURISCVState *env = &RISCV_CPU(obj)->env;
516 RISCVCPU *cpu = RISCV_CPU(obj);
517
518 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
519 env->priv_ver = PRIV_VERSION_1_10_0;
520 #ifndef CONFIG_USER_ONLY
521 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
522 #endif
523
524 /* inherited from parent obj via riscv_cpu_init() */
525 cpu->cfg.ext_zifencei = true;
526 cpu->cfg.ext_zicsr = true;
527 cpu->cfg.pmp = true;
528 }
529
rv64_thead_c906_cpu_init(Object * obj)530 static void rv64_thead_c906_cpu_init(Object *obj)
531 {
532 CPURISCVState *env = &RISCV_CPU(obj)->env;
533 RISCVCPU *cpu = RISCV_CPU(obj);
534
535 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU);
536 env->priv_ver = PRIV_VERSION_1_11_0;
537
538 cpu->cfg.ext_zfa = true;
539 cpu->cfg.ext_zfh = true;
540 cpu->cfg.mmu = true;
541 cpu->cfg.ext_xtheadba = true;
542 cpu->cfg.ext_xtheadbb = true;
543 cpu->cfg.ext_xtheadbs = true;
544 cpu->cfg.ext_xtheadcmo = true;
545 cpu->cfg.ext_xtheadcondmov = true;
546 cpu->cfg.ext_xtheadfmemidx = true;
547 cpu->cfg.ext_xtheadmac = true;
548 cpu->cfg.ext_xtheadmemidx = true;
549 cpu->cfg.ext_xtheadmempair = true;
550 cpu->cfg.ext_xtheadsync = true;
551
552 cpu->cfg.mvendorid = THEAD_VENDOR_ID;
553 #ifndef CONFIG_USER_ONLY
554 set_satp_mode_max_supported(cpu, VM_1_10_SV39);
555 th_register_custom_csrs(cpu);
556 #endif
557
558 /* inherited from parent obj via riscv_cpu_init() */
559 cpu->cfg.pmp = true;
560 }
561
rv64_veyron_v1_cpu_init(Object * obj)562 static void rv64_veyron_v1_cpu_init(Object *obj)
563 {
564 CPURISCVState *env = &RISCV_CPU(obj)->env;
565 RISCVCPU *cpu = RISCV_CPU(obj);
566
567 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH);
568 env->priv_ver = PRIV_VERSION_1_12_0;
569
570 /* Enable ISA extensions */
571 cpu->cfg.mmu = true;
572 cpu->cfg.ext_zifencei = true;
573 cpu->cfg.ext_zicsr = true;
574 cpu->cfg.pmp = true;
575 cpu->cfg.ext_zicbom = true;
576 cpu->cfg.cbom_blocksize = 64;
577 cpu->cfg.cboz_blocksize = 64;
578 cpu->cfg.ext_zicboz = true;
579 cpu->cfg.ext_smaia = true;
580 cpu->cfg.ext_ssaia = true;
581 cpu->cfg.ext_sscofpmf = true;
582 cpu->cfg.ext_sstc = true;
583 cpu->cfg.ext_svinval = true;
584 cpu->cfg.ext_svnapot = true;
585 cpu->cfg.ext_svpbmt = true;
586 cpu->cfg.ext_smstateen = true;
587 cpu->cfg.ext_zba = true;
588 cpu->cfg.ext_zbb = true;
589 cpu->cfg.ext_zbc = true;
590 cpu->cfg.ext_zbs = true;
591 cpu->cfg.ext_XVentanaCondOps = true;
592
593 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID;
594 cpu->cfg.marchid = VEYRON_V1_MARCHID;
595 cpu->cfg.mimpid = VEYRON_V1_MIMPID;
596
597 #ifndef CONFIG_USER_ONLY
598 set_satp_mode_max_supported(cpu, VM_1_10_SV48);
599 #endif
600 }
601
602 #ifdef CONFIG_TCG
rv128_base_cpu_init(Object * obj)603 static void rv128_base_cpu_init(Object *obj)
604 {
605 RISCVCPU *cpu = RISCV_CPU(obj);
606 CPURISCVState *env = &cpu->env;
607
608 if (qemu_tcg_mttcg_enabled()) {
609 /* Missing 128-bit aligned atomics */
610 error_report("128-bit RISC-V currently does not work with Multi "
611 "Threaded TCG. Please use: -accel tcg,thread=single");
612 exit(EXIT_FAILURE);
613 }
614
615 cpu->cfg.mmu = true;
616 cpu->cfg.pmp = true;
617
618 /* Set latest version of privileged specification */
619 env->priv_ver = PRIV_VERSION_LATEST;
620 #ifndef CONFIG_USER_ONLY
621 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
622 #endif
623 }
624 #endif /* CONFIG_TCG */
625
rv64i_bare_cpu_init(Object * obj)626 static void rv64i_bare_cpu_init(Object *obj)
627 {
628 CPURISCVState *env = &RISCV_CPU(obj)->env;
629 riscv_cpu_set_misa_ext(env, RVI);
630 }
631
rv64e_bare_cpu_init(Object * obj)632 static void rv64e_bare_cpu_init(Object *obj)
633 {
634 CPURISCVState *env = &RISCV_CPU(obj)->env;
635 riscv_cpu_set_misa_ext(env, RVE);
636 }
637
638 #else /* !TARGET_RISCV64 */
639
rv32_base_cpu_init(Object * obj)640 static void rv32_base_cpu_init(Object *obj)
641 {
642 RISCVCPU *cpu = RISCV_CPU(obj);
643 CPURISCVState *env = &cpu->env;
644
645 cpu->cfg.mmu = true;
646 cpu->cfg.pmp = true;
647
648 /* Set latest version of privileged specification */
649 env->priv_ver = PRIV_VERSION_LATEST;
650 #ifndef CONFIG_USER_ONLY
651 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
652 #endif
653 }
654
rv32_sifive_u_cpu_init(Object * obj)655 static void rv32_sifive_u_cpu_init(Object *obj)
656 {
657 RISCVCPU *cpu = RISCV_CPU(obj);
658 CPURISCVState *env = &cpu->env;
659 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
660 env->priv_ver = PRIV_VERSION_1_10_0;
661 #ifndef CONFIG_USER_ONLY
662 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
663 #endif
664
665 /* inherited from parent obj via riscv_cpu_init() */
666 cpu->cfg.ext_zifencei = true;
667 cpu->cfg.ext_zicsr = true;
668 cpu->cfg.mmu = true;
669 cpu->cfg.pmp = true;
670 }
671
rv32_sifive_e_cpu_init(Object * obj)672 static void rv32_sifive_e_cpu_init(Object *obj)
673 {
674 CPURISCVState *env = &RISCV_CPU(obj)->env;
675 RISCVCPU *cpu = RISCV_CPU(obj);
676
677 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
678 env->priv_ver = PRIV_VERSION_1_10_0;
679 #ifndef CONFIG_USER_ONLY
680 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
681 #endif
682
683 /* inherited from parent obj via riscv_cpu_init() */
684 cpu->cfg.ext_zifencei = true;
685 cpu->cfg.ext_zicsr = true;
686 cpu->cfg.pmp = true;
687 }
688
rv32_ibex_cpu_init(Object * obj)689 static void rv32_ibex_cpu_init(Object *obj)
690 {
691 CPURISCVState *env = &RISCV_CPU(obj)->env;
692 RISCVCPU *cpu = RISCV_CPU(obj);
693
694 riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU);
695 env->priv_ver = PRIV_VERSION_1_12_0;
696 #ifndef CONFIG_USER_ONLY
697 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
698 #endif
699 /* inherited from parent obj via riscv_cpu_init() */
700 cpu->cfg.ext_zifencei = true;
701 cpu->cfg.ext_zicsr = true;
702 cpu->cfg.pmp = true;
703 cpu->cfg.ext_smepmp = true;
704 }
705
rv32_imafcu_nommu_cpu_init(Object * obj)706 static void rv32_imafcu_nommu_cpu_init(Object *obj)
707 {
708 CPURISCVState *env = &RISCV_CPU(obj)->env;
709 RISCVCPU *cpu = RISCV_CPU(obj);
710
711 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU);
712 env->priv_ver = PRIV_VERSION_1_10_0;
713 #ifndef CONFIG_USER_ONLY
714 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
715 #endif
716
717 /* inherited from parent obj via riscv_cpu_init() */
718 cpu->cfg.ext_zifencei = true;
719 cpu->cfg.ext_zicsr = true;
720 cpu->cfg.pmp = true;
721 }
722
rv32i_bare_cpu_init(Object * obj)723 static void rv32i_bare_cpu_init(Object *obj)
724 {
725 CPURISCVState *env = &RISCV_CPU(obj)->env;
726 riscv_cpu_set_misa_ext(env, RVI);
727 }
728
rv32e_bare_cpu_init(Object * obj)729 static void rv32e_bare_cpu_init(Object *obj)
730 {
731 CPURISCVState *env = &RISCV_CPU(obj)->env;
732 riscv_cpu_set_misa_ext(env, RVE);
733 }
734 #endif
735
riscv_cpu_class_by_name(const char * cpu_model)736 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
737 {
738 ObjectClass *oc;
739 char *typename;
740 char **cpuname;
741
742 cpuname = g_strsplit(cpu_model, ",", 1);
743 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
744 oc = object_class_by_name(typename);
745 g_strfreev(cpuname);
746 g_free(typename);
747
748 return oc;
749 }
750
riscv_cpu_get_name(RISCVCPU * cpu)751 char *riscv_cpu_get_name(RISCVCPU *cpu)
752 {
753 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu);
754 const char *typename = object_class_get_name(OBJECT_CLASS(rcc));
755
756 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX));
757
758 return cpu_model_from_type(typename);
759 }
760
riscv_cpu_dump_state(CPUState * cs,FILE * f,int flags)761 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
762 {
763 RISCVCPU *cpu = RISCV_CPU(cs);
764 CPURISCVState *env = &cpu->env;
765 int i, j;
766 uint8_t *p;
767
768 #if !defined(CONFIG_USER_ONLY)
769 if (riscv_has_ext(env, RVH)) {
770 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled);
771 }
772 #endif
773 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc);
774 #ifndef CONFIG_USER_ONLY
775 {
776 static const int dump_csrs[] = {
777 CSR_MHARTID,
778 CSR_MSTATUS,
779 CSR_MSTATUSH,
780 /*
781 * CSR_SSTATUS is intentionally omitted here as its value
782 * can be figured out by looking at CSR_MSTATUS
783 */
784 CSR_HSTATUS,
785 CSR_VSSTATUS,
786 CSR_MIP,
787 CSR_MIE,
788 CSR_MIDELEG,
789 CSR_HIDELEG,
790 CSR_MEDELEG,
791 CSR_HEDELEG,
792 CSR_MTVEC,
793 CSR_STVEC,
794 CSR_VSTVEC,
795 CSR_MEPC,
796 CSR_SEPC,
797 CSR_VSEPC,
798 CSR_MCAUSE,
799 CSR_SCAUSE,
800 CSR_VSCAUSE,
801 CSR_MTVAL,
802 CSR_STVAL,
803 CSR_HTVAL,
804 CSR_MTVAL2,
805 CSR_MSCRATCH,
806 CSR_SSCRATCH,
807 CSR_SATP,
808 CSR_MMTE,
809 CSR_UPMBASE,
810 CSR_UPMMASK,
811 CSR_SPMBASE,
812 CSR_SPMMASK,
813 CSR_MPMBASE,
814 CSR_MPMMASK,
815 };
816
817 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
818 int csrno = dump_csrs[i];
819 target_ulong val = 0;
820 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
821
822 /*
823 * Rely on the smode, hmode, etc, predicates within csr.c
824 * to do the filtering of the registers that are present.
825 */
826 if (res == RISCV_EXCP_NONE) {
827 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
828 csr_ops[csrno].name, val);
829 }
830 }
831 }
832 #endif
833
834 for (i = 0; i < 32; i++) {
835 qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
836 riscv_int_regnames[i], env->gpr[i]);
837 if ((i & 3) == 3) {
838 qemu_fprintf(f, "\n");
839 }
840 }
841 if (flags & CPU_DUMP_FPU) {
842 for (i = 0; i < 32; i++) {
843 qemu_fprintf(f, " %-8s %016" PRIx64,
844 riscv_fpr_regnames[i], env->fpr[i]);
845 if ((i & 3) == 3) {
846 qemu_fprintf(f, "\n");
847 }
848 }
849 }
850 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) {
851 static const int dump_rvv_csrs[] = {
852 CSR_VSTART,
853 CSR_VXSAT,
854 CSR_VXRM,
855 CSR_VCSR,
856 CSR_VL,
857 CSR_VTYPE,
858 CSR_VLENB,
859 };
860 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
861 int csrno = dump_rvv_csrs[i];
862 target_ulong val = 0;
863 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
864
865 /*
866 * Rely on the smode, hmode, etc, predicates within csr.c
867 * to do the filtering of the registers that are present.
868 */
869 if (res == RISCV_EXCP_NONE) {
870 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
871 csr_ops[csrno].name, val);
872 }
873 }
874 uint16_t vlenb = cpu->cfg.vlenb;
875
876 for (i = 0; i < 32; i++) {
877 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]);
878 p = (uint8_t *)env->vreg;
879 for (j = vlenb - 1 ; j >= 0; j--) {
880 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j)));
881 }
882 qemu_fprintf(f, "\n");
883 }
884 }
885 }
886
riscv_cpu_set_pc(CPUState * cs,vaddr value)887 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
888 {
889 RISCVCPU *cpu = RISCV_CPU(cs);
890 CPURISCVState *env = &cpu->env;
891
892 if (env->xl == MXL_RV32) {
893 env->pc = (int32_t)value;
894 } else {
895 env->pc = value;
896 }
897 }
898
riscv_cpu_get_pc(CPUState * cs)899 static vaddr riscv_cpu_get_pc(CPUState *cs)
900 {
901 RISCVCPU *cpu = RISCV_CPU(cs);
902 CPURISCVState *env = &cpu->env;
903
904 /* Match cpu_get_tb_cpu_state. */
905 if (env->xl == MXL_RV32) {
906 return env->pc & UINT32_MAX;
907 }
908 return env->pc;
909 }
910
riscv_cpu_has_work(CPUState * cs)911 bool riscv_cpu_has_work(CPUState *cs)
912 {
913 #ifndef CONFIG_USER_ONLY
914 RISCVCPU *cpu = RISCV_CPU(cs);
915 CPURISCVState *env = &cpu->env;
916 /*
917 * Definition of the WFI instruction requires it to ignore the privilege
918 * mode and delegation registers, but respect individual enables
919 */
920 return riscv_cpu_all_pending(env) != 0 ||
921 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE ||
922 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE;
923 #else
924 return true;
925 #endif
926 }
927
riscv_cpu_mmu_index(CPUState * cs,bool ifetch)928 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch)
929 {
930 return riscv_env_mmu_index(cpu_env(cs), ifetch);
931 }
932
riscv_cpu_reset_hold(Object * obj,ResetType type)933 static void riscv_cpu_reset_hold(Object *obj, ResetType type)
934 {
935 #ifndef CONFIG_USER_ONLY
936 uint8_t iprio;
937 int i, irq, rdzero;
938 #endif
939 CPUState *cs = CPU(obj);
940 RISCVCPU *cpu = RISCV_CPU(cs);
941 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
942 CPURISCVState *env = &cpu->env;
943
944 if (mcc->parent_phases.hold) {
945 mcc->parent_phases.hold(obj, type);
946 }
947 #ifndef CONFIG_USER_ONLY
948 env->misa_mxl = mcc->misa_mxl_max;
949 env->priv = PRV_M;
950 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
951 if (env->misa_mxl > MXL_RV32) {
952 /*
953 * The reset status of SXL/UXL is undefined, but mstatus is WARL
954 * and we must ensure that the value after init is valid for read.
955 */
956 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
957 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
958 if (riscv_has_ext(env, RVH)) {
959 env->vsstatus = set_field(env->vsstatus,
960 MSTATUS64_SXL, env->misa_mxl);
961 env->vsstatus = set_field(env->vsstatus,
962 MSTATUS64_UXL, env->misa_mxl);
963 env->mstatus_hs = set_field(env->mstatus_hs,
964 MSTATUS64_SXL, env->misa_mxl);
965 env->mstatus_hs = set_field(env->mstatus_hs,
966 MSTATUS64_UXL, env->misa_mxl);
967 }
968 }
969 env->mcause = 0;
970 env->miclaim = MIP_SGEIP;
971 env->pc = env->resetvec;
972 env->bins = 0;
973 env->two_stage_lookup = false;
974
975 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
976 (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ?
977 MENVCFG_ADUE : 0);
978 env->henvcfg = 0;
979
980 /* Initialized default priorities of local interrupts. */
981 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
982 iprio = riscv_cpu_default_priority(i);
983 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
984 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
985 env->hviprio[i] = 0;
986 }
987 i = 0;
988 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
989 if (!rdzero) {
990 env->hviprio[irq] = env->miprio[irq];
991 }
992 i++;
993 }
994 /* mmte is supposed to have pm.current hardwired to 1 */
995 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
996
997 /*
998 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor
999 * extension is enabled.
1000 */
1001 if (riscv_has_ext(env, RVH)) {
1002 env->mideleg |= HS_MODE_INTERRUPTS;
1003 }
1004
1005 /*
1006 * Clear mseccfg and unlock all the PMP entries upon reset.
1007 * This is allowed as per the priv and smepmp specifications
1008 * and is needed to clear stale entries across reboots.
1009 */
1010 if (riscv_cpu_cfg(env)->ext_smepmp) {
1011 env->mseccfg = 0;
1012 }
1013
1014 pmp_unlock_entries(env);
1015 #endif
1016 env->xl = riscv_cpu_mxl(env);
1017 riscv_cpu_update_mask(env);
1018 cs->exception_index = RISCV_EXCP_NONE;
1019 env->load_res = -1;
1020 set_default_nan_mode(1, &env->fp_status);
1021
1022 #ifndef CONFIG_USER_ONLY
1023 if (cpu->cfg.debug) {
1024 riscv_trigger_reset_hold(env);
1025 }
1026
1027 if (kvm_enabled()) {
1028 kvm_riscv_reset_vcpu(cpu);
1029 }
1030 #endif
1031 }
1032
riscv_cpu_disas_set_info(CPUState * s,disassemble_info * info)1033 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
1034 {
1035 RISCVCPU *cpu = RISCV_CPU(s);
1036 CPURISCVState *env = &cpu->env;
1037 info->target_info = &cpu->cfg;
1038
1039 switch (env->xl) {
1040 case MXL_RV32:
1041 info->print_insn = print_insn_riscv32;
1042 break;
1043 case MXL_RV64:
1044 info->print_insn = print_insn_riscv64;
1045 break;
1046 case MXL_RV128:
1047 info->print_insn = print_insn_riscv128;
1048 break;
1049 default:
1050 g_assert_not_reached();
1051 }
1052 }
1053
1054 #ifndef CONFIG_USER_ONLY
riscv_cpu_satp_mode_finalize(RISCVCPU * cpu,Error ** errp)1055 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
1056 {
1057 bool rv32 = riscv_cpu_is_32bit(cpu);
1058 uint8_t satp_mode_map_max, satp_mode_supported_max;
1059
1060 /* The CPU wants the OS to decide which satp mode to use */
1061 if (cpu->cfg.satp_mode.supported == 0) {
1062 return;
1063 }
1064
1065 satp_mode_supported_max =
1066 satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
1067
1068 if (cpu->cfg.satp_mode.map == 0) {
1069 if (cpu->cfg.satp_mode.init == 0) {
1070 /* If unset by the user, we fallback to the default satp mode. */
1071 set_satp_mode_default_map(cpu);
1072 } else {
1073 /*
1074 * Find the lowest level that was disabled and then enable the
1075 * first valid level below which can be found in
1076 * valid_vm_1_10_32/64.
1077 */
1078 for (int i = 1; i < 16; ++i) {
1079 if ((cpu->cfg.satp_mode.init & (1 << i)) &&
1080 (cpu->cfg.satp_mode.supported & (1 << i))) {
1081 for (int j = i - 1; j >= 0; --j) {
1082 if (cpu->cfg.satp_mode.supported & (1 << j)) {
1083 cpu->cfg.satp_mode.map |= (1 << j);
1084 break;
1085 }
1086 }
1087 break;
1088 }
1089 }
1090 }
1091 }
1092
1093 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
1094
1095 /* Make sure the user asked for a supported configuration (HW and qemu) */
1096 if (satp_mode_map_max > satp_mode_supported_max) {
1097 error_setg(errp, "satp_mode %s is higher than hw max capability %s",
1098 satp_mode_str(satp_mode_map_max, rv32),
1099 satp_mode_str(satp_mode_supported_max, rv32));
1100 return;
1101 }
1102
1103 /*
1104 * Make sure the user did not ask for an invalid configuration as per
1105 * the specification.
1106 */
1107 if (!rv32) {
1108 for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1109 if (!(cpu->cfg.satp_mode.map & (1 << i)) &&
1110 (cpu->cfg.satp_mode.init & (1 << i)) &&
1111 (cpu->cfg.satp_mode.supported & (1 << i))) {
1112 error_setg(errp, "cannot disable %s satp mode if %s "
1113 "is enabled", satp_mode_str(i, false),
1114 satp_mode_str(satp_mode_map_max, false));
1115 return;
1116 }
1117 }
1118 }
1119
1120 /* Finally expand the map so that all valid modes are set */
1121 for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1122 if (cpu->cfg.satp_mode.supported & (1 << i)) {
1123 cpu->cfg.satp_mode.map |= (1 << i);
1124 }
1125 }
1126 }
1127 #endif
1128
riscv_cpu_finalize_features(RISCVCPU * cpu,Error ** errp)1129 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1130 {
1131 Error *local_err = NULL;
1132
1133 #ifndef CONFIG_USER_ONLY
1134 riscv_cpu_satp_mode_finalize(cpu, &local_err);
1135 if (local_err != NULL) {
1136 error_propagate(errp, local_err);
1137 return;
1138 }
1139 #endif
1140
1141 if (tcg_enabled()) {
1142 riscv_tcg_cpu_finalize_features(cpu, &local_err);
1143 if (local_err != NULL) {
1144 error_propagate(errp, local_err);
1145 return;
1146 }
1147 riscv_tcg_cpu_finalize_dynamic_decoder(cpu);
1148 } else if (kvm_enabled()) {
1149 riscv_kvm_cpu_finalize_features(cpu, &local_err);
1150 if (local_err != NULL) {
1151 error_propagate(errp, local_err);
1152 return;
1153 }
1154 }
1155 }
1156
riscv_cpu_realize(DeviceState * dev,Error ** errp)1157 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
1158 {
1159 CPUState *cs = CPU(dev);
1160 RISCVCPU *cpu = RISCV_CPU(dev);
1161 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
1162 Error *local_err = NULL;
1163
1164 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) {
1165 warn_report("The 'any' CPU is deprecated and will be "
1166 "removed in the future.");
1167 }
1168
1169 cpu_exec_realizefn(cs, &local_err);
1170 if (local_err != NULL) {
1171 error_propagate(errp, local_err);
1172 return;
1173 }
1174
1175 riscv_cpu_finalize_features(cpu, &local_err);
1176 if (local_err != NULL) {
1177 error_propagate(errp, local_err);
1178 return;
1179 }
1180
1181 riscv_cpu_register_gdb_regs_for_features(cs);
1182
1183 #ifndef CONFIG_USER_ONLY
1184 if (cpu->cfg.debug) {
1185 riscv_trigger_realize(&cpu->env);
1186 }
1187 #endif
1188
1189 qemu_init_vcpu(cs);
1190 cpu_reset(cs);
1191
1192 mcc->parent_realize(dev, errp);
1193 }
1194
riscv_cpu_accelerator_compatible(RISCVCPU * cpu)1195 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu)
1196 {
1197 if (tcg_enabled()) {
1198 return riscv_cpu_tcg_compatible(cpu);
1199 }
1200
1201 return true;
1202 }
1203
1204 #ifndef CONFIG_USER_ONLY
cpu_riscv_get_satp(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1205 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
1206 void *opaque, Error **errp)
1207 {
1208 RISCVSATPMap *satp_map = opaque;
1209 uint8_t satp = satp_mode_from_str(name);
1210 bool value;
1211
1212 value = satp_map->map & (1 << satp);
1213
1214 visit_type_bool(v, name, &value, errp);
1215 }
1216
cpu_riscv_set_satp(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1217 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
1218 void *opaque, Error **errp)
1219 {
1220 RISCVSATPMap *satp_map = opaque;
1221 uint8_t satp = satp_mode_from_str(name);
1222 bool value;
1223
1224 if (!visit_type_bool(v, name, &value, errp)) {
1225 return;
1226 }
1227
1228 satp_map->map = deposit32(satp_map->map, satp, 1, value);
1229 satp_map->init |= 1 << satp;
1230 }
1231
riscv_add_satp_mode_properties(Object * obj)1232 void riscv_add_satp_mode_properties(Object *obj)
1233 {
1234 RISCVCPU *cpu = RISCV_CPU(obj);
1235
1236 if (cpu->env.misa_mxl == MXL_RV32) {
1237 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
1238 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1239 } else {
1240 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
1241 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1242 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
1243 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1244 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
1245 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1246 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
1247 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1248 }
1249 }
1250
riscv_cpu_set_irq(void * opaque,int irq,int level)1251 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
1252 {
1253 RISCVCPU *cpu = RISCV_CPU(opaque);
1254 CPURISCVState *env = &cpu->env;
1255
1256 if (irq < IRQ_LOCAL_MAX) {
1257 switch (irq) {
1258 case IRQ_U_SOFT:
1259 case IRQ_S_SOFT:
1260 case IRQ_VS_SOFT:
1261 case IRQ_M_SOFT:
1262 case IRQ_U_TIMER:
1263 case IRQ_S_TIMER:
1264 case IRQ_VS_TIMER:
1265 case IRQ_M_TIMER:
1266 case IRQ_U_EXT:
1267 case IRQ_VS_EXT:
1268 case IRQ_M_EXT:
1269 if (kvm_enabled()) {
1270 kvm_riscv_set_irq(cpu, irq, level);
1271 } else {
1272 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level));
1273 }
1274 break;
1275 case IRQ_S_EXT:
1276 if (kvm_enabled()) {
1277 kvm_riscv_set_irq(cpu, irq, level);
1278 } else {
1279 env->external_seip = level;
1280 riscv_cpu_update_mip(env, 1 << irq,
1281 BOOL_TO_MASK(level | env->software_seip));
1282 }
1283 break;
1284 default:
1285 g_assert_not_reached();
1286 }
1287 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
1288 /* Require H-extension for handling guest local interrupts */
1289 if (!riscv_has_ext(env, RVH)) {
1290 g_assert_not_reached();
1291 }
1292
1293 /* Compute bit position in HGEIP CSR */
1294 irq = irq - IRQ_LOCAL_MAX + 1;
1295 if (env->geilen < irq) {
1296 g_assert_not_reached();
1297 }
1298
1299 /* Update HGEIP CSR */
1300 env->hgeip &= ~((target_ulong)1 << irq);
1301 if (level) {
1302 env->hgeip |= (target_ulong)1 << irq;
1303 }
1304
1305 /* Update mip.SGEIP bit */
1306 riscv_cpu_update_mip(env, MIP_SGEIP,
1307 BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
1308 } else {
1309 g_assert_not_reached();
1310 }
1311 }
1312 #endif /* CONFIG_USER_ONLY */
1313
riscv_cpu_is_dynamic(Object * cpu_obj)1314 static bool riscv_cpu_is_dynamic(Object *cpu_obj)
1315 {
1316 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
1317 }
1318
riscv_cpu_post_init(Object * obj)1319 static void riscv_cpu_post_init(Object *obj)
1320 {
1321 accel_cpu_instance_init(CPU(obj));
1322 }
1323
riscv_cpu_init(Object * obj)1324 static void riscv_cpu_init(Object *obj)
1325 {
1326 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
1327 RISCVCPU *cpu = RISCV_CPU(obj);
1328 CPURISCVState *env = &cpu->env;
1329
1330 env->misa_mxl = mcc->misa_mxl_max;
1331
1332 #ifndef CONFIG_USER_ONLY
1333 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
1334 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
1335 #endif /* CONFIG_USER_ONLY */
1336
1337 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal);
1338
1339 /*
1340 * The timer and performance counters extensions were supported
1341 * in QEMU before they were added as discrete extensions in the
1342 * ISA. To keep compatibility we'll always default them to 'true'
1343 * for all CPUs. Each accelerator will decide what to do when
1344 * users disable them.
1345 */
1346 RISCV_CPU(obj)->cfg.ext_zicntr = true;
1347 RISCV_CPU(obj)->cfg.ext_zihpm = true;
1348
1349 /* Default values for non-bool cpu properties */
1350 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16);
1351 cpu->cfg.vlenb = 128 >> 3;
1352 cpu->cfg.elen = 64;
1353 cpu->cfg.cbom_blocksize = 64;
1354 cpu->cfg.cbop_blocksize = 64;
1355 cpu->cfg.cboz_blocksize = 64;
1356 cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1357 }
1358
riscv_bare_cpu_init(Object * obj)1359 static void riscv_bare_cpu_init(Object *obj)
1360 {
1361 RISCVCPU *cpu = RISCV_CPU(obj);
1362
1363 /*
1364 * Bare CPUs do not inherit the timer and performance
1365 * counters from the parent class (see riscv_cpu_init()
1366 * for info on why the parent enables them).
1367 *
1368 * Users have to explicitly enable these counters for
1369 * bare CPUs.
1370 */
1371 cpu->cfg.ext_zicntr = false;
1372 cpu->cfg.ext_zihpm = false;
1373
1374 /* Set to QEMU's first supported priv version */
1375 cpu->env.priv_ver = PRIV_VERSION_1_10_0;
1376
1377 /*
1378 * Support all available satp_mode settings. The default
1379 * value will be set to MBARE if the user doesn't set
1380 * satp_mode manually (see set_satp_mode_default()).
1381 */
1382 #ifndef CONFIG_USER_ONLY
1383 set_satp_mode_max_supported(cpu, VM_1_10_SV64);
1384 #endif
1385 }
1386
1387 typedef struct misa_ext_info {
1388 const char *name;
1389 const char *description;
1390 } MISAExtInfo;
1391
1392 #define MISA_INFO_IDX(_bit) \
1393 __builtin_ctz(_bit)
1394
1395 #define MISA_EXT_INFO(_bit, _propname, _descr) \
1396 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr}
1397
1398 static const MISAExtInfo misa_ext_info_arr[] = {
1399 MISA_EXT_INFO(RVA, "a", "Atomic instructions"),
1400 MISA_EXT_INFO(RVC, "c", "Compressed instructions"),
1401 MISA_EXT_INFO(RVD, "d", "Double-precision float point"),
1402 MISA_EXT_INFO(RVF, "f", "Single-precision float point"),
1403 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"),
1404 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"),
1405 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"),
1406 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
1407 MISA_EXT_INFO(RVU, "u", "User-level instructions"),
1408 MISA_EXT_INFO(RVH, "h", "Hypervisor"),
1409 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
1410 MISA_EXT_INFO(RVV, "v", "Vector operations"),
1411 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
1412 MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)")
1413 };
1414
riscv_cpu_validate_misa_mxl(RISCVCPUClass * mcc)1415 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc)
1416 {
1417 CPUClass *cc = CPU_CLASS(mcc);
1418
1419 /* Validate that MISA_MXL is set properly. */
1420 switch (mcc->misa_mxl_max) {
1421 #ifdef TARGET_RISCV64
1422 case MXL_RV64:
1423 case MXL_RV128:
1424 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
1425 break;
1426 #endif
1427 case MXL_RV32:
1428 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml";
1429 break;
1430 default:
1431 g_assert_not_reached();
1432 }
1433 }
1434
riscv_validate_misa_info_idx(uint32_t bit)1435 static int riscv_validate_misa_info_idx(uint32_t bit)
1436 {
1437 int idx;
1438
1439 /*
1440 * Our lowest valid input (RVA) is 1 and
1441 * __builtin_ctz() is UB with zero.
1442 */
1443 g_assert(bit != 0);
1444 idx = MISA_INFO_IDX(bit);
1445
1446 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr));
1447 return idx;
1448 }
1449
riscv_get_misa_ext_name(uint32_t bit)1450 const char *riscv_get_misa_ext_name(uint32_t bit)
1451 {
1452 int idx = riscv_validate_misa_info_idx(bit);
1453 const char *val = misa_ext_info_arr[idx].name;
1454
1455 g_assert(val != NULL);
1456 return val;
1457 }
1458
riscv_get_misa_ext_description(uint32_t bit)1459 const char *riscv_get_misa_ext_description(uint32_t bit)
1460 {
1461 int idx = riscv_validate_misa_info_idx(bit);
1462 const char *val = misa_ext_info_arr[idx].description;
1463
1464 g_assert(val != NULL);
1465 return val;
1466 }
1467
1468 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \
1469 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
1470 .enabled = _defval}
1471
1472 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
1473 /* Defaults for standard extensions */
1474 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
1475 MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false),
1476 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
1477 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
1478 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
1479 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
1480 MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false),
1481 MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false),
1482 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false),
1483 MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false),
1484 MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false),
1485 MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false),
1486 MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false),
1487 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
1488 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true),
1489 MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false),
1490 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false),
1491 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false),
1492 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false),
1493 MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false),
1494 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false),
1495 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false),
1496 MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false),
1497 MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false),
1498 MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false),
1499 MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false),
1500 MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false),
1501 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
1502
1503 MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false),
1504 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
1505 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
1506 MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false),
1507 MULTI_EXT_CFG_BOOL("svade", ext_svade, false),
1508 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
1509 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
1510 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false),
1511 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false),
1512
1513 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true),
1514 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true),
1515
1516 MULTI_EXT_CFG_BOOL("zba", ext_zba, true),
1517 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true),
1518 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true),
1519 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false),
1520 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false),
1521 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false),
1522 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true),
1523 MULTI_EXT_CFG_BOOL("zk", ext_zk, false),
1524 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false),
1525 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false),
1526 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false),
1527 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false),
1528 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false),
1529 MULTI_EXT_CFG_BOOL("zks", ext_zks, false),
1530 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false),
1531 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false),
1532 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false),
1533 MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false),
1534
1535 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false),
1536 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false),
1537 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false),
1538 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false),
1539
1540 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true),
1541 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true),
1542 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true),
1543
1544 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false),
1545
1546 MULTI_EXT_CFG_BOOL("zca", ext_zca, false),
1547 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false),
1548 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false),
1549 MULTI_EXT_CFG_BOOL("zce", ext_zce, false),
1550 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false),
1551 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false),
1552 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false),
1553 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false),
1554
1555 /* Vector cryptography extensions */
1556 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false),
1557 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false),
1558 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false),
1559 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false),
1560 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false),
1561 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false),
1562 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false),
1563 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false),
1564 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false),
1565 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false),
1566 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false),
1567 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false),
1568 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false),
1569 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false),
1570 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false),
1571 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false),
1572
1573 DEFINE_PROP_END_OF_LIST(),
1574 };
1575
1576 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = {
1577 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false),
1578 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false),
1579 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false),
1580 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false),
1581 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false),
1582 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false),
1583 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false),
1584 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false),
1585 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false),
1586 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false),
1587 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false),
1588 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false),
1589
1590 DEFINE_PROP_END_OF_LIST(),
1591 };
1592
1593 /* These are experimental so mark with 'x-' */
1594 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
1595 DEFINE_PROP_END_OF_LIST(),
1596 };
1597
1598 /*
1599 * 'Named features' is the name we give to extensions that we
1600 * don't want to expose to users. They are either immutable
1601 * (always enabled/disable) or they'll vary depending on
1602 * the resulting CPU state. They have riscv,isa strings
1603 * and priv_ver like regular extensions.
1604 */
1605 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
1606 MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true),
1607
1608 DEFINE_PROP_END_OF_LIST(),
1609 };
1610
1611 /* Deprecated entries marked for future removal */
1612 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = {
1613 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true),
1614 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true),
1615 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true),
1616 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true),
1617 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true),
1618 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true),
1619 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false),
1620 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false),
1621 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false),
1622 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false),
1623 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false),
1624
1625 DEFINE_PROP_END_OF_LIST(),
1626 };
1627
cpu_set_prop_err(RISCVCPU * cpu,const char * propname,Error ** errp)1628 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname,
1629 Error **errp)
1630 {
1631 g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1632 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'",
1633 cpuname, propname);
1634 }
1635
prop_pmu_num_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1636 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name,
1637 void *opaque, Error **errp)
1638 {
1639 RISCVCPU *cpu = RISCV_CPU(obj);
1640 uint8_t pmu_num, curr_pmu_num;
1641 uint32_t pmu_mask;
1642
1643 visit_type_uint8(v, name, &pmu_num, errp);
1644
1645 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask);
1646
1647 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) {
1648 cpu_set_prop_err(cpu, name, errp);
1649 error_append_hint(errp, "Current '%s' val: %u\n",
1650 name, curr_pmu_num);
1651 return;
1652 }
1653
1654 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1655 error_setg(errp, "Number of counters exceeds maximum available");
1656 return;
1657 }
1658
1659 if (pmu_num == 0) {
1660 pmu_mask = 0;
1661 } else {
1662 pmu_mask = MAKE_64BIT_MASK(3, pmu_num);
1663 }
1664
1665 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\"");
1666 cpu->cfg.pmu_mask = pmu_mask;
1667 cpu_option_add_user_setting("pmu-mask", pmu_mask);
1668 }
1669
prop_pmu_num_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1670 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name,
1671 void *opaque, Error **errp)
1672 {
1673 RISCVCPU *cpu = RISCV_CPU(obj);
1674 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask);
1675
1676 visit_type_uint8(v, name, &pmu_num, errp);
1677 }
1678
1679 static const PropertyInfo prop_pmu_num = {
1680 .name = "pmu-num",
1681 .get = prop_pmu_num_get,
1682 .set = prop_pmu_num_set,
1683 };
1684
prop_pmu_mask_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1685 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name,
1686 void *opaque, Error **errp)
1687 {
1688 RISCVCPU *cpu = RISCV_CPU(obj);
1689 uint32_t value;
1690 uint8_t pmu_num;
1691
1692 visit_type_uint32(v, name, &value, errp);
1693
1694 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) {
1695 cpu_set_prop_err(cpu, name, errp);
1696 error_append_hint(errp, "Current '%s' val: %x\n",
1697 name, cpu->cfg.pmu_mask);
1698 return;
1699 }
1700
1701 pmu_num = ctpop32(value);
1702
1703 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1704 error_setg(errp, "Number of counters exceeds maximum available");
1705 return;
1706 }
1707
1708 cpu_option_add_user_setting(name, value);
1709 cpu->cfg.pmu_mask = value;
1710 }
1711
prop_pmu_mask_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1712 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name,
1713 void *opaque, Error **errp)
1714 {
1715 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask;
1716
1717 visit_type_uint8(v, name, &pmu_mask, errp);
1718 }
1719
1720 static const PropertyInfo prop_pmu_mask = {
1721 .name = "pmu-mask",
1722 .get = prop_pmu_mask_get,
1723 .set = prop_pmu_mask_set,
1724 };
1725
prop_mmu_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1726 static void prop_mmu_set(Object *obj, Visitor *v, const char *name,
1727 void *opaque, Error **errp)
1728 {
1729 RISCVCPU *cpu = RISCV_CPU(obj);
1730 bool value;
1731
1732 visit_type_bool(v, name, &value, errp);
1733
1734 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) {
1735 cpu_set_prop_err(cpu, "mmu", errp);
1736 return;
1737 }
1738
1739 cpu_option_add_user_setting(name, value);
1740 cpu->cfg.mmu = value;
1741 }
1742
prop_mmu_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1743 static void prop_mmu_get(Object *obj, Visitor *v, const char *name,
1744 void *opaque, Error **errp)
1745 {
1746 bool value = RISCV_CPU(obj)->cfg.mmu;
1747
1748 visit_type_bool(v, name, &value, errp);
1749 }
1750
1751 static const PropertyInfo prop_mmu = {
1752 .name = "mmu",
1753 .get = prop_mmu_get,
1754 .set = prop_mmu_set,
1755 };
1756
prop_pmp_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1757 static void prop_pmp_set(Object *obj, Visitor *v, const char *name,
1758 void *opaque, Error **errp)
1759 {
1760 RISCVCPU *cpu = RISCV_CPU(obj);
1761 bool value;
1762
1763 visit_type_bool(v, name, &value, errp);
1764
1765 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) {
1766 cpu_set_prop_err(cpu, name, errp);
1767 return;
1768 }
1769
1770 cpu_option_add_user_setting(name, value);
1771 cpu->cfg.pmp = value;
1772 }
1773
prop_pmp_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1774 static void prop_pmp_get(Object *obj, Visitor *v, const char *name,
1775 void *opaque, Error **errp)
1776 {
1777 bool value = RISCV_CPU(obj)->cfg.pmp;
1778
1779 visit_type_bool(v, name, &value, errp);
1780 }
1781
1782 static const PropertyInfo prop_pmp = {
1783 .name = "pmp",
1784 .get = prop_pmp_get,
1785 .set = prop_pmp_set,
1786 };
1787
priv_spec_from_str(const char * priv_spec_str)1788 static int priv_spec_from_str(const char *priv_spec_str)
1789 {
1790 int priv_version = -1;
1791
1792 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) {
1793 priv_version = PRIV_VERSION_1_13_0;
1794 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) {
1795 priv_version = PRIV_VERSION_1_12_0;
1796 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) {
1797 priv_version = PRIV_VERSION_1_11_0;
1798 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) {
1799 priv_version = PRIV_VERSION_1_10_0;
1800 }
1801
1802 return priv_version;
1803 }
1804
priv_spec_to_str(int priv_version)1805 const char *priv_spec_to_str(int priv_version)
1806 {
1807 switch (priv_version) {
1808 case PRIV_VERSION_1_10_0:
1809 return PRIV_VER_1_10_0_STR;
1810 case PRIV_VERSION_1_11_0:
1811 return PRIV_VER_1_11_0_STR;
1812 case PRIV_VERSION_1_12_0:
1813 return PRIV_VER_1_12_0_STR;
1814 case PRIV_VERSION_1_13_0:
1815 return PRIV_VER_1_13_0_STR;
1816 default:
1817 return NULL;
1818 }
1819 }
1820
prop_priv_spec_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1821 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name,
1822 void *opaque, Error **errp)
1823 {
1824 RISCVCPU *cpu = RISCV_CPU(obj);
1825 g_autofree char *value = NULL;
1826 int priv_version = -1;
1827
1828 visit_type_str(v, name, &value, errp);
1829
1830 priv_version = priv_spec_from_str(value);
1831 if (priv_version < 0) {
1832 error_setg(errp, "Unsupported privilege spec version '%s'", value);
1833 return;
1834 }
1835
1836 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) {
1837 cpu_set_prop_err(cpu, name, errp);
1838 error_append_hint(errp, "Current '%s' val: %s\n", name,
1839 object_property_get_str(obj, name, NULL));
1840 return;
1841 }
1842
1843 cpu_option_add_user_setting(name, priv_version);
1844 cpu->env.priv_ver = priv_version;
1845 }
1846
prop_priv_spec_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1847 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name,
1848 void *opaque, Error **errp)
1849 {
1850 RISCVCPU *cpu = RISCV_CPU(obj);
1851 const char *value = priv_spec_to_str(cpu->env.priv_ver);
1852
1853 visit_type_str(v, name, (char **)&value, errp);
1854 }
1855
1856 static const PropertyInfo prop_priv_spec = {
1857 .name = "priv_spec",
1858 .get = prop_priv_spec_get,
1859 .set = prop_priv_spec_set,
1860 };
1861
prop_vext_spec_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1862 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name,
1863 void *opaque, Error **errp)
1864 {
1865 RISCVCPU *cpu = RISCV_CPU(obj);
1866 g_autofree char *value = NULL;
1867
1868 visit_type_str(v, name, &value, errp);
1869
1870 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) {
1871 error_setg(errp, "Unsupported vector spec version '%s'", value);
1872 return;
1873 }
1874
1875 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0);
1876 cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1877 }
1878
prop_vext_spec_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1879 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name,
1880 void *opaque, Error **errp)
1881 {
1882 const char *value = VEXT_VER_1_00_0_STR;
1883
1884 visit_type_str(v, name, (char **)&value, errp);
1885 }
1886
1887 static const PropertyInfo prop_vext_spec = {
1888 .name = "vext_spec",
1889 .get = prop_vext_spec_get,
1890 .set = prop_vext_spec_set,
1891 };
1892
prop_vlen_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1893 static void prop_vlen_set(Object *obj, Visitor *v, const char *name,
1894 void *opaque, Error **errp)
1895 {
1896 RISCVCPU *cpu = RISCV_CPU(obj);
1897 uint16_t value;
1898
1899 if (!visit_type_uint16(v, name, &value, errp)) {
1900 return;
1901 }
1902
1903 if (!is_power_of_2(value)) {
1904 error_setg(errp, "Vector extension VLEN must be power of 2");
1905 return;
1906 }
1907
1908 if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) {
1909 cpu_set_prop_err(cpu, name, errp);
1910 error_append_hint(errp, "Current '%s' val: %u\n",
1911 name, cpu->cfg.vlenb << 3);
1912 return;
1913 }
1914
1915 cpu_option_add_user_setting(name, value);
1916 cpu->cfg.vlenb = value >> 3;
1917 }
1918
prop_vlen_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1919 static void prop_vlen_get(Object *obj, Visitor *v, const char *name,
1920 void *opaque, Error **errp)
1921 {
1922 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3;
1923
1924 visit_type_uint16(v, name, &value, errp);
1925 }
1926
1927 static const PropertyInfo prop_vlen = {
1928 .name = "vlen",
1929 .get = prop_vlen_get,
1930 .set = prop_vlen_set,
1931 };
1932
prop_elen_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1933 static void prop_elen_set(Object *obj, Visitor *v, const char *name,
1934 void *opaque, Error **errp)
1935 {
1936 RISCVCPU *cpu = RISCV_CPU(obj);
1937 uint16_t value;
1938
1939 if (!visit_type_uint16(v, name, &value, errp)) {
1940 return;
1941 }
1942
1943 if (!is_power_of_2(value)) {
1944 error_setg(errp, "Vector extension ELEN must be power of 2");
1945 return;
1946 }
1947
1948 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) {
1949 cpu_set_prop_err(cpu, name, errp);
1950 error_append_hint(errp, "Current '%s' val: %u\n",
1951 name, cpu->cfg.elen);
1952 return;
1953 }
1954
1955 cpu_option_add_user_setting(name, value);
1956 cpu->cfg.elen = value;
1957 }
1958
prop_elen_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1959 static void prop_elen_get(Object *obj, Visitor *v, const char *name,
1960 void *opaque, Error **errp)
1961 {
1962 uint16_t value = RISCV_CPU(obj)->cfg.elen;
1963
1964 visit_type_uint16(v, name, &value, errp);
1965 }
1966
1967 static const PropertyInfo prop_elen = {
1968 .name = "elen",
1969 .get = prop_elen_get,
1970 .set = prop_elen_set,
1971 };
1972
prop_cbom_blksize_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1973 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name,
1974 void *opaque, Error **errp)
1975 {
1976 RISCVCPU *cpu = RISCV_CPU(obj);
1977 uint16_t value;
1978
1979 if (!visit_type_uint16(v, name, &value, errp)) {
1980 return;
1981 }
1982
1983 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) {
1984 cpu_set_prop_err(cpu, name, errp);
1985 error_append_hint(errp, "Current '%s' val: %u\n",
1986 name, cpu->cfg.cbom_blocksize);
1987 return;
1988 }
1989
1990 cpu_option_add_user_setting(name, value);
1991 cpu->cfg.cbom_blocksize = value;
1992 }
1993
prop_cbom_blksize_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1994 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name,
1995 void *opaque, Error **errp)
1996 {
1997 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize;
1998
1999 visit_type_uint16(v, name, &value, errp);
2000 }
2001
2002 static const PropertyInfo prop_cbom_blksize = {
2003 .name = "cbom_blocksize",
2004 .get = prop_cbom_blksize_get,
2005 .set = prop_cbom_blksize_set,
2006 };
2007
prop_cbop_blksize_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2008 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name,
2009 void *opaque, Error **errp)
2010 {
2011 RISCVCPU *cpu = RISCV_CPU(obj);
2012 uint16_t value;
2013
2014 if (!visit_type_uint16(v, name, &value, errp)) {
2015 return;
2016 }
2017
2018 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) {
2019 cpu_set_prop_err(cpu, name, errp);
2020 error_append_hint(errp, "Current '%s' val: %u\n",
2021 name, cpu->cfg.cbop_blocksize);
2022 return;
2023 }
2024
2025 cpu_option_add_user_setting(name, value);
2026 cpu->cfg.cbop_blocksize = value;
2027 }
2028
prop_cbop_blksize_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2029 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name,
2030 void *opaque, Error **errp)
2031 {
2032 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize;
2033
2034 visit_type_uint16(v, name, &value, errp);
2035 }
2036
2037 static const PropertyInfo prop_cbop_blksize = {
2038 .name = "cbop_blocksize",
2039 .get = prop_cbop_blksize_get,
2040 .set = prop_cbop_blksize_set,
2041 };
2042
prop_cboz_blksize_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2043 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name,
2044 void *opaque, Error **errp)
2045 {
2046 RISCVCPU *cpu = RISCV_CPU(obj);
2047 uint16_t value;
2048
2049 if (!visit_type_uint16(v, name, &value, errp)) {
2050 return;
2051 }
2052
2053 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) {
2054 cpu_set_prop_err(cpu, name, errp);
2055 error_append_hint(errp, "Current '%s' val: %u\n",
2056 name, cpu->cfg.cboz_blocksize);
2057 return;
2058 }
2059
2060 cpu_option_add_user_setting(name, value);
2061 cpu->cfg.cboz_blocksize = value;
2062 }
2063
prop_cboz_blksize_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2064 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name,
2065 void *opaque, Error **errp)
2066 {
2067 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize;
2068
2069 visit_type_uint16(v, name, &value, errp);
2070 }
2071
2072 static const PropertyInfo prop_cboz_blksize = {
2073 .name = "cboz_blocksize",
2074 .get = prop_cboz_blksize_get,
2075 .set = prop_cboz_blksize_set,
2076 };
2077
prop_mvendorid_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2078 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name,
2079 void *opaque, Error **errp)
2080 {
2081 bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2082 RISCVCPU *cpu = RISCV_CPU(obj);
2083 uint32_t prev_val = cpu->cfg.mvendorid;
2084 uint32_t value;
2085
2086 if (!visit_type_uint32(v, name, &value, errp)) {
2087 return;
2088 }
2089
2090 if (!dynamic_cpu && prev_val != value) {
2091 error_setg(errp, "Unable to change %s mvendorid (0x%x)",
2092 object_get_typename(obj), prev_val);
2093 return;
2094 }
2095
2096 cpu->cfg.mvendorid = value;
2097 }
2098
prop_mvendorid_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2099 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name,
2100 void *opaque, Error **errp)
2101 {
2102 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid;
2103
2104 visit_type_uint32(v, name, &value, errp);
2105 }
2106
2107 static const PropertyInfo prop_mvendorid = {
2108 .name = "mvendorid",
2109 .get = prop_mvendorid_get,
2110 .set = prop_mvendorid_set,
2111 };
2112
prop_mimpid_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2113 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name,
2114 void *opaque, Error **errp)
2115 {
2116 bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2117 RISCVCPU *cpu = RISCV_CPU(obj);
2118 uint64_t prev_val = cpu->cfg.mimpid;
2119 uint64_t value;
2120
2121 if (!visit_type_uint64(v, name, &value, errp)) {
2122 return;
2123 }
2124
2125 if (!dynamic_cpu && prev_val != value) {
2126 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")",
2127 object_get_typename(obj), prev_val);
2128 return;
2129 }
2130
2131 cpu->cfg.mimpid = value;
2132 }
2133
prop_mimpid_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2134 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name,
2135 void *opaque, Error **errp)
2136 {
2137 uint64_t value = RISCV_CPU(obj)->cfg.mimpid;
2138
2139 visit_type_uint64(v, name, &value, errp);
2140 }
2141
2142 static const PropertyInfo prop_mimpid = {
2143 .name = "mimpid",
2144 .get = prop_mimpid_get,
2145 .set = prop_mimpid_set,
2146 };
2147
prop_marchid_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2148 static void prop_marchid_set(Object *obj, Visitor *v, const char *name,
2149 void *opaque, Error **errp)
2150 {
2151 bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2152 RISCVCPU *cpu = RISCV_CPU(obj);
2153 uint64_t prev_val = cpu->cfg.marchid;
2154 uint64_t value, invalid_val;
2155 uint32_t mxlen = 0;
2156
2157 if (!visit_type_uint64(v, name, &value, errp)) {
2158 return;
2159 }
2160
2161 if (!dynamic_cpu && prev_val != value) {
2162 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")",
2163 object_get_typename(obj), prev_val);
2164 return;
2165 }
2166
2167 switch (riscv_cpu_mxl(&cpu->env)) {
2168 case MXL_RV32:
2169 mxlen = 32;
2170 break;
2171 case MXL_RV64:
2172 case MXL_RV128:
2173 mxlen = 64;
2174 break;
2175 default:
2176 g_assert_not_reached();
2177 }
2178
2179 invalid_val = 1LL << (mxlen - 1);
2180
2181 if (value == invalid_val) {
2182 error_setg(errp, "Unable to set marchid with MSB (%u) bit set "
2183 "and the remaining bits zero", mxlen);
2184 return;
2185 }
2186
2187 cpu->cfg.marchid = value;
2188 }
2189
prop_marchid_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2190 static void prop_marchid_get(Object *obj, Visitor *v, const char *name,
2191 void *opaque, Error **errp)
2192 {
2193 uint64_t value = RISCV_CPU(obj)->cfg.marchid;
2194
2195 visit_type_uint64(v, name, &value, errp);
2196 }
2197
2198 static const PropertyInfo prop_marchid = {
2199 .name = "marchid",
2200 .get = prop_marchid_get,
2201 .set = prop_marchid_set,
2202 };
2203
2204 /*
2205 * RVA22U64 defines some 'named features' that are cache
2206 * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa
2207 * and Zicclsm. They are always implemented in TCG and
2208 * doesn't need to be manually enabled by the profile.
2209 */
2210 static RISCVCPUProfile RVA22U64 = {
2211 .parent = NULL,
2212 .name = "rva22u64",
2213 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU,
2214 .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
2215 .satp_mode = RISCV_PROFILE_ATTR_UNUSED,
2216 .ext_offsets = {
2217 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause),
2218 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb),
2219 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin),
2220 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr),
2221 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom),
2222 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz),
2223
2224 /* mandatory named features for this profile */
2225 CPU_CFG_OFFSET(ext_zic64b),
2226
2227 RISCV_PROFILE_EXT_LIST_END
2228 }
2229 };
2230
2231 /*
2232 * As with RVA22U64, RVA22S64 also defines 'named features'.
2233 *
2234 * Cache related features that we consider enabled since we don't
2235 * implement cache: Ssccptr
2236 *
2237 * Other named features that we already implement: Sstvecd, Sstvala,
2238 * Sscounterenw
2239 *
2240 * The remaining features/extensions comes from RVA22U64.
2241 */
2242 static RISCVCPUProfile RVA22S64 = {
2243 .parent = &RVA22U64,
2244 .name = "rva22s64",
2245 .misa_ext = RVS,
2246 .priv_spec = PRIV_VERSION_1_12_0,
2247 .satp_mode = VM_1_10_SV39,
2248 .ext_offsets = {
2249 /* rva22s64 exts */
2250 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt),
2251 CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade),
2252
2253 RISCV_PROFILE_EXT_LIST_END
2254 }
2255 };
2256
2257 RISCVCPUProfile *riscv_profiles[] = {
2258 &RVA22U64,
2259 &RVA22S64,
2260 NULL,
2261 };
2262
2263 static RISCVCPUImpliedExtsRule RVA_IMPLIED = {
2264 .is_misa = true,
2265 .ext = RVA,
2266 .implied_multi_exts = {
2267 CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo),
2268
2269 RISCV_IMPLIED_EXTS_RULE_END
2270 },
2271 };
2272
2273 static RISCVCPUImpliedExtsRule RVD_IMPLIED = {
2274 .is_misa = true,
2275 .ext = RVD,
2276 .implied_misa_exts = RVF,
2277 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2278 };
2279
2280 static RISCVCPUImpliedExtsRule RVF_IMPLIED = {
2281 .is_misa = true,
2282 .ext = RVF,
2283 .implied_multi_exts = {
2284 CPU_CFG_OFFSET(ext_zicsr),
2285
2286 RISCV_IMPLIED_EXTS_RULE_END
2287 },
2288 };
2289
2290 static RISCVCPUImpliedExtsRule RVM_IMPLIED = {
2291 .is_misa = true,
2292 .ext = RVM,
2293 .implied_multi_exts = {
2294 CPU_CFG_OFFSET(ext_zmmul),
2295
2296 RISCV_IMPLIED_EXTS_RULE_END
2297 },
2298 };
2299
2300 static RISCVCPUImpliedExtsRule RVV_IMPLIED = {
2301 .is_misa = true,
2302 .ext = RVV,
2303 .implied_multi_exts = {
2304 CPU_CFG_OFFSET(ext_zve64d),
2305
2306 RISCV_IMPLIED_EXTS_RULE_END
2307 },
2308 };
2309
2310 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = {
2311 .ext = CPU_CFG_OFFSET(ext_zcb),
2312 .implied_multi_exts = {
2313 CPU_CFG_OFFSET(ext_zca),
2314
2315 RISCV_IMPLIED_EXTS_RULE_END
2316 },
2317 };
2318
2319 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = {
2320 .ext = CPU_CFG_OFFSET(ext_zcd),
2321 .implied_misa_exts = RVD,
2322 .implied_multi_exts = {
2323 CPU_CFG_OFFSET(ext_zca),
2324
2325 RISCV_IMPLIED_EXTS_RULE_END
2326 },
2327 };
2328
2329 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = {
2330 .ext = CPU_CFG_OFFSET(ext_zce),
2331 .implied_multi_exts = {
2332 CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp),
2333 CPU_CFG_OFFSET(ext_zcmt),
2334
2335 RISCV_IMPLIED_EXTS_RULE_END
2336 },
2337 };
2338
2339 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = {
2340 .ext = CPU_CFG_OFFSET(ext_zcf),
2341 .implied_misa_exts = RVF,
2342 .implied_multi_exts = {
2343 CPU_CFG_OFFSET(ext_zca),
2344
2345 RISCV_IMPLIED_EXTS_RULE_END
2346 },
2347 };
2348
2349 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = {
2350 .ext = CPU_CFG_OFFSET(ext_zcmp),
2351 .implied_multi_exts = {
2352 CPU_CFG_OFFSET(ext_zca),
2353
2354 RISCV_IMPLIED_EXTS_RULE_END
2355 },
2356 };
2357
2358 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = {
2359 .ext = CPU_CFG_OFFSET(ext_zcmt),
2360 .implied_multi_exts = {
2361 CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr),
2362
2363 RISCV_IMPLIED_EXTS_RULE_END
2364 },
2365 };
2366
2367 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = {
2368 .ext = CPU_CFG_OFFSET(ext_zdinx),
2369 .implied_multi_exts = {
2370 CPU_CFG_OFFSET(ext_zfinx),
2371
2372 RISCV_IMPLIED_EXTS_RULE_END
2373 },
2374 };
2375
2376 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = {
2377 .ext = CPU_CFG_OFFSET(ext_zfa),
2378 .implied_misa_exts = RVF,
2379 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2380 };
2381
2382 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = {
2383 .ext = CPU_CFG_OFFSET(ext_zfbfmin),
2384 .implied_misa_exts = RVF,
2385 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2386 };
2387
2388 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = {
2389 .ext = CPU_CFG_OFFSET(ext_zfh),
2390 .implied_multi_exts = {
2391 CPU_CFG_OFFSET(ext_zfhmin),
2392
2393 RISCV_IMPLIED_EXTS_RULE_END
2394 },
2395 };
2396
2397 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = {
2398 .ext = CPU_CFG_OFFSET(ext_zfhmin),
2399 .implied_misa_exts = RVF,
2400 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2401 };
2402
2403 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = {
2404 .ext = CPU_CFG_OFFSET(ext_zfinx),
2405 .implied_multi_exts = {
2406 CPU_CFG_OFFSET(ext_zicsr),
2407
2408 RISCV_IMPLIED_EXTS_RULE_END
2409 },
2410 };
2411
2412 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = {
2413 .ext = CPU_CFG_OFFSET(ext_zhinx),
2414 .implied_multi_exts = {
2415 CPU_CFG_OFFSET(ext_zhinxmin),
2416
2417 RISCV_IMPLIED_EXTS_RULE_END
2418 },
2419 };
2420
2421 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = {
2422 .ext = CPU_CFG_OFFSET(ext_zhinxmin),
2423 .implied_multi_exts = {
2424 CPU_CFG_OFFSET(ext_zfinx),
2425
2426 RISCV_IMPLIED_EXTS_RULE_END
2427 },
2428 };
2429
2430 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = {
2431 .ext = CPU_CFG_OFFSET(ext_zicntr),
2432 .implied_multi_exts = {
2433 CPU_CFG_OFFSET(ext_zicsr),
2434
2435 RISCV_IMPLIED_EXTS_RULE_END
2436 },
2437 };
2438
2439 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = {
2440 .ext = CPU_CFG_OFFSET(ext_zihpm),
2441 .implied_multi_exts = {
2442 CPU_CFG_OFFSET(ext_zicsr),
2443
2444 RISCV_IMPLIED_EXTS_RULE_END
2445 },
2446 };
2447
2448 static RISCVCPUImpliedExtsRule ZK_IMPLIED = {
2449 .ext = CPU_CFG_OFFSET(ext_zk),
2450 .implied_multi_exts = {
2451 CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr),
2452 CPU_CFG_OFFSET(ext_zkt),
2453
2454 RISCV_IMPLIED_EXTS_RULE_END
2455 },
2456 };
2457
2458 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = {
2459 .ext = CPU_CFG_OFFSET(ext_zkn),
2460 .implied_multi_exts = {
2461 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
2462 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne),
2463 CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh),
2464
2465 RISCV_IMPLIED_EXTS_RULE_END
2466 },
2467 };
2468
2469 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = {
2470 .ext = CPU_CFG_OFFSET(ext_zks),
2471 .implied_multi_exts = {
2472 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
2473 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed),
2474 CPU_CFG_OFFSET(ext_zksh),
2475
2476 RISCV_IMPLIED_EXTS_RULE_END
2477 },
2478 };
2479
2480 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = {
2481 .ext = CPU_CFG_OFFSET(ext_zvbb),
2482 .implied_multi_exts = {
2483 CPU_CFG_OFFSET(ext_zvkb),
2484
2485 RISCV_IMPLIED_EXTS_RULE_END
2486 },
2487 };
2488
2489 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = {
2490 .ext = CPU_CFG_OFFSET(ext_zve32f),
2491 .implied_misa_exts = RVF,
2492 .implied_multi_exts = {
2493 CPU_CFG_OFFSET(ext_zve32x),
2494
2495 RISCV_IMPLIED_EXTS_RULE_END
2496 },
2497 };
2498
2499 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = {
2500 .ext = CPU_CFG_OFFSET(ext_zve32x),
2501 .implied_multi_exts = {
2502 CPU_CFG_OFFSET(ext_zicsr),
2503
2504 RISCV_IMPLIED_EXTS_RULE_END
2505 },
2506 };
2507
2508 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = {
2509 .ext = CPU_CFG_OFFSET(ext_zve64d),
2510 .implied_misa_exts = RVD,
2511 .implied_multi_exts = {
2512 CPU_CFG_OFFSET(ext_zve64f),
2513
2514 RISCV_IMPLIED_EXTS_RULE_END
2515 },
2516 };
2517
2518 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = {
2519 .ext = CPU_CFG_OFFSET(ext_zve64f),
2520 .implied_misa_exts = RVF,
2521 .implied_multi_exts = {
2522 CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x),
2523
2524 RISCV_IMPLIED_EXTS_RULE_END
2525 },
2526 };
2527
2528 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = {
2529 .ext = CPU_CFG_OFFSET(ext_zve64x),
2530 .implied_multi_exts = {
2531 CPU_CFG_OFFSET(ext_zve32x),
2532
2533 RISCV_IMPLIED_EXTS_RULE_END
2534 },
2535 };
2536
2537 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = {
2538 .ext = CPU_CFG_OFFSET(ext_zvfbfmin),
2539 .implied_multi_exts = {
2540 CPU_CFG_OFFSET(ext_zve32f),
2541
2542 RISCV_IMPLIED_EXTS_RULE_END
2543 },
2544 };
2545
2546 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = {
2547 .ext = CPU_CFG_OFFSET(ext_zvfbfwma),
2548 .implied_multi_exts = {
2549 CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin),
2550
2551 RISCV_IMPLIED_EXTS_RULE_END
2552 },
2553 };
2554
2555 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = {
2556 .ext = CPU_CFG_OFFSET(ext_zvfh),
2557 .implied_multi_exts = {
2558 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin),
2559
2560 RISCV_IMPLIED_EXTS_RULE_END
2561 },
2562 };
2563
2564 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = {
2565 .ext = CPU_CFG_OFFSET(ext_zvfhmin),
2566 .implied_multi_exts = {
2567 CPU_CFG_OFFSET(ext_zve32f),
2568
2569 RISCV_IMPLIED_EXTS_RULE_END
2570 },
2571 };
2572
2573 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = {
2574 .ext = CPU_CFG_OFFSET(ext_zvkn),
2575 .implied_multi_exts = {
2576 CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb),
2577 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
2578
2579 RISCV_IMPLIED_EXTS_RULE_END
2580 },
2581 };
2582
2583 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = {
2584 .ext = CPU_CFG_OFFSET(ext_zvknc),
2585 .implied_multi_exts = {
2586 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc),
2587
2588 RISCV_IMPLIED_EXTS_RULE_END
2589 },
2590 };
2591
2592 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = {
2593 .ext = CPU_CFG_OFFSET(ext_zvkng),
2594 .implied_multi_exts = {
2595 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg),
2596
2597 RISCV_IMPLIED_EXTS_RULE_END
2598 },
2599 };
2600
2601 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = {
2602 .ext = CPU_CFG_OFFSET(ext_zvknhb),
2603 .implied_multi_exts = {
2604 CPU_CFG_OFFSET(ext_zve64x),
2605
2606 RISCV_IMPLIED_EXTS_RULE_END
2607 },
2608 };
2609
2610 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = {
2611 .ext = CPU_CFG_OFFSET(ext_zvks),
2612 .implied_multi_exts = {
2613 CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh),
2614 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
2615
2616 RISCV_IMPLIED_EXTS_RULE_END
2617 },
2618 };
2619
2620 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = {
2621 .ext = CPU_CFG_OFFSET(ext_zvksc),
2622 .implied_multi_exts = {
2623 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc),
2624
2625 RISCV_IMPLIED_EXTS_RULE_END
2626 },
2627 };
2628
2629 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = {
2630 .ext = CPU_CFG_OFFSET(ext_zvksg),
2631 .implied_multi_exts = {
2632 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg),
2633
2634 RISCV_IMPLIED_EXTS_RULE_END
2635 },
2636 };
2637
2638 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = {
2639 &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED,
2640 &RVM_IMPLIED, &RVV_IMPLIED, NULL
2641 };
2642
2643 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = {
2644 &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED,
2645 &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED,
2646 &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED,
2647 &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED,
2648 &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED,
2649 &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED,
2650 &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED,
2651 &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED,
2652 &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED,
2653 &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED,
2654 &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED,
2655 &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED,
2656 NULL
2657 };
2658
2659 static Property riscv_cpu_properties[] = {
2660 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
2661
2662 {.name = "pmu-mask", .info = &prop_pmu_mask},
2663 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */
2664
2665 {.name = "mmu", .info = &prop_mmu},
2666 {.name = "pmp", .info = &prop_pmp},
2667
2668 {.name = "priv_spec", .info = &prop_priv_spec},
2669 {.name = "vext_spec", .info = &prop_vext_spec},
2670
2671 {.name = "vlen", .info = &prop_vlen},
2672 {.name = "elen", .info = &prop_elen},
2673
2674 {.name = "cbom_blocksize", .info = &prop_cbom_blksize},
2675 {.name = "cbop_blocksize", .info = &prop_cbop_blksize},
2676 {.name = "cboz_blocksize", .info = &prop_cboz_blksize},
2677
2678 {.name = "mvendorid", .info = &prop_mvendorid},
2679 {.name = "mimpid", .info = &prop_mimpid},
2680 {.name = "marchid", .info = &prop_marchid},
2681
2682 #ifndef CONFIG_USER_ONLY
2683 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
2684 #endif
2685
2686 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
2687
2688 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
2689 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
2690
2691 /*
2692 * write_misa() is marked as experimental for now so mark
2693 * it with -x and default to 'false'.
2694 */
2695 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
2696 DEFINE_PROP_END_OF_LIST(),
2697 };
2698
2699 #if defined(TARGET_RISCV64)
rva22u64_profile_cpu_init(Object * obj)2700 static void rva22u64_profile_cpu_init(Object *obj)
2701 {
2702 rv64i_bare_cpu_init(obj);
2703
2704 RVA22U64.enabled = true;
2705 }
2706
rva22s64_profile_cpu_init(Object * obj)2707 static void rva22s64_profile_cpu_init(Object *obj)
2708 {
2709 rv64i_bare_cpu_init(obj);
2710
2711 RVA22S64.enabled = true;
2712 }
2713 #endif
2714
riscv_gdb_arch_name(CPUState * cs)2715 static const gchar *riscv_gdb_arch_name(CPUState *cs)
2716 {
2717 RISCVCPU *cpu = RISCV_CPU(cs);
2718 CPURISCVState *env = &cpu->env;
2719
2720 switch (riscv_cpu_mxl(env)) {
2721 case MXL_RV32:
2722 return "riscv:rv32";
2723 case MXL_RV64:
2724 case MXL_RV128:
2725 return "riscv:rv64";
2726 default:
2727 g_assert_not_reached();
2728 }
2729 }
2730
2731 #ifndef CONFIG_USER_ONLY
riscv_get_arch_id(CPUState * cs)2732 static int64_t riscv_get_arch_id(CPUState *cs)
2733 {
2734 RISCVCPU *cpu = RISCV_CPU(cs);
2735
2736 return cpu->env.mhartid;
2737 }
2738
2739 #include "hw/core/sysemu-cpu-ops.h"
2740
2741 static const struct SysemuCPUOps riscv_sysemu_ops = {
2742 .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
2743 .write_elf64_note = riscv_cpu_write_elf64_note,
2744 .write_elf32_note = riscv_cpu_write_elf32_note,
2745 .legacy_vmsd = &vmstate_riscv_cpu,
2746 };
2747 #endif
2748
riscv_cpu_common_class_init(ObjectClass * c,void * data)2749 static void riscv_cpu_common_class_init(ObjectClass *c, void *data)
2750 {
2751 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2752 CPUClass *cc = CPU_CLASS(c);
2753 DeviceClass *dc = DEVICE_CLASS(c);
2754 ResettableClass *rc = RESETTABLE_CLASS(c);
2755
2756 device_class_set_parent_realize(dc, riscv_cpu_realize,
2757 &mcc->parent_realize);
2758
2759 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL,
2760 &mcc->parent_phases);
2761
2762 cc->class_by_name = riscv_cpu_class_by_name;
2763 cc->has_work = riscv_cpu_has_work;
2764 cc->mmu_index = riscv_cpu_mmu_index;
2765 cc->dump_state = riscv_cpu_dump_state;
2766 cc->set_pc = riscv_cpu_set_pc;
2767 cc->get_pc = riscv_cpu_get_pc;
2768 cc->gdb_read_register = riscv_cpu_gdb_read_register;
2769 cc->gdb_write_register = riscv_cpu_gdb_write_register;
2770 cc->gdb_stop_before_watchpoint = true;
2771 cc->disas_set_info = riscv_cpu_disas_set_info;
2772 #ifndef CONFIG_USER_ONLY
2773 cc->sysemu_ops = &riscv_sysemu_ops;
2774 cc->get_arch_id = riscv_get_arch_id;
2775 #endif
2776 cc->gdb_arch_name = riscv_gdb_arch_name;
2777
2778 device_class_set_props(dc, riscv_cpu_properties);
2779 }
2780
riscv_cpu_class_init(ObjectClass * c,void * data)2781 static void riscv_cpu_class_init(ObjectClass *c, void *data)
2782 {
2783 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2784
2785 mcc->misa_mxl_max = (uint32_t)(uintptr_t)data;
2786 riscv_cpu_validate_misa_mxl(mcc);
2787 }
2788
riscv_isa_string_ext(RISCVCPU * cpu,char ** isa_str,int max_str_len)2789 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
2790 int max_str_len)
2791 {
2792 const RISCVIsaExtData *edata;
2793 char *old = *isa_str;
2794 char *new = *isa_str;
2795
2796 for (edata = isa_edata_arr; edata && edata->name; edata++) {
2797 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2798 new = g_strconcat(old, "_", edata->name, NULL);
2799 g_free(old);
2800 old = new;
2801 }
2802 }
2803
2804 *isa_str = new;
2805 }
2806
riscv_isa_string(RISCVCPU * cpu)2807 char *riscv_isa_string(RISCVCPU *cpu)
2808 {
2809 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
2810 int i;
2811 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
2812 char *isa_str = g_new(char, maxlen);
2813 int xlen = riscv_cpu_max_xlen(mcc);
2814 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen);
2815
2816 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2817 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2818 *p++ = qemu_tolower(riscv_single_letter_exts[i]);
2819 }
2820 }
2821 *p = '\0';
2822 if (!cpu->cfg.short_isa_string) {
2823 riscv_isa_string_ext(cpu, &isa_str, maxlen);
2824 }
2825 return isa_str;
2826 }
2827
2828 #ifndef CONFIG_USER_ONLY
riscv_isa_extensions_list(RISCVCPU * cpu,int * count)2829 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count)
2830 {
2831 int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr);
2832 char **extensions = g_new(char *, maxlen);
2833
2834 for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2835 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2836 extensions[*count] = g_new(char, 2);
2837 snprintf(extensions[*count], 2, "%c",
2838 qemu_tolower(riscv_single_letter_exts[i]));
2839 (*count)++;
2840 }
2841 }
2842
2843 for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) {
2844 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2845 extensions[*count] = g_strdup(edata->name);
2846 (*count)++;
2847 }
2848 }
2849
2850 return extensions;
2851 }
2852
riscv_isa_write_fdt(RISCVCPU * cpu,void * fdt,char * nodename)2853 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename)
2854 {
2855 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
2856 const size_t maxlen = sizeof("rv128i");
2857 g_autofree char *isa_base = g_new(char, maxlen);
2858 g_autofree char *riscv_isa;
2859 char **isa_extensions;
2860 int count = 0;
2861 int xlen = riscv_cpu_max_xlen(mcc);
2862
2863 riscv_isa = riscv_isa_string(cpu);
2864 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa);
2865
2866 snprintf(isa_base, maxlen, "rv%di", xlen);
2867 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base);
2868
2869 isa_extensions = riscv_isa_extensions_list(cpu, &count);
2870 qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions",
2871 isa_extensions, count);
2872
2873 for (int i = 0; i < count; i++) {
2874 g_free(isa_extensions[i]);
2875 }
2876
2877 g_free(isa_extensions);
2878 }
2879 #endif
2880
2881 #define DEFINE_CPU(type_name, misa_mxl_max, initfn) \
2882 { \
2883 .name = (type_name), \
2884 .parent = TYPE_RISCV_CPU, \
2885 .instance_init = (initfn), \
2886 .class_init = riscv_cpu_class_init, \
2887 .class_data = (void *)(misa_mxl_max) \
2888 }
2889
2890 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \
2891 { \
2892 .name = (type_name), \
2893 .parent = TYPE_RISCV_DYNAMIC_CPU, \
2894 .instance_init = (initfn), \
2895 .class_init = riscv_cpu_class_init, \
2896 .class_data = (void *)(misa_mxl_max) \
2897 }
2898
2899 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \
2900 { \
2901 .name = (type_name), \
2902 .parent = TYPE_RISCV_VENDOR_CPU, \
2903 .instance_init = (initfn), \
2904 .class_init = riscv_cpu_class_init, \
2905 .class_data = (void *)(misa_mxl_max) \
2906 }
2907
2908 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \
2909 { \
2910 .name = (type_name), \
2911 .parent = TYPE_RISCV_BARE_CPU, \
2912 .instance_init = (initfn), \
2913 .class_init = riscv_cpu_class_init, \
2914 .class_data = (void *)(misa_mxl_max) \
2915 }
2916
2917 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \
2918 { \
2919 .name = (type_name), \
2920 .parent = TYPE_RISCV_BARE_CPU, \
2921 .instance_init = (initfn), \
2922 .class_init = riscv_cpu_class_init, \
2923 .class_data = (void *)(misa_mxl_max) \
2924 }
2925
2926 static const TypeInfo riscv_cpu_type_infos[] = {
2927 {
2928 .name = TYPE_RISCV_CPU,
2929 .parent = TYPE_CPU,
2930 .instance_size = sizeof(RISCVCPU),
2931 .instance_align = __alignof(RISCVCPU),
2932 .instance_init = riscv_cpu_init,
2933 .instance_post_init = riscv_cpu_post_init,
2934 .abstract = true,
2935 .class_size = sizeof(RISCVCPUClass),
2936 .class_init = riscv_cpu_common_class_init,
2937 },
2938 {
2939 .name = TYPE_RISCV_DYNAMIC_CPU,
2940 .parent = TYPE_RISCV_CPU,
2941 .abstract = true,
2942 },
2943 {
2944 .name = TYPE_RISCV_VENDOR_CPU,
2945 .parent = TYPE_RISCV_CPU,
2946 .abstract = true,
2947 },
2948 {
2949 .name = TYPE_RISCV_BARE_CPU,
2950 .parent = TYPE_RISCV_CPU,
2951 .instance_init = riscv_bare_cpu_init,
2952 .abstract = true,
2953 },
2954 #if defined(TARGET_RISCV32)
2955 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV32, riscv_any_cpu_init),
2956 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init),
2957 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init),
2958 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init),
2959 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init),
2960 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init),
2961 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init),
2962 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init),
2963 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init),
2964 #elif defined(TARGET_RISCV64)
2965 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV64, riscv_any_cpu_init),
2966 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init),
2967 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init),
2968 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init),
2969 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init),
2970 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init),
2971 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init),
2972 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init),
2973 #ifdef CONFIG_TCG
2974 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init),
2975 #endif /* CONFIG_TCG */
2976 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init),
2977 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init),
2978 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init),
2979 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init),
2980 #endif /* TARGET_RISCV64 */
2981 };
2982
2983 DEFINE_TYPES(riscv_cpu_type_infos)
2984