1 /*
2 * QEMU RISC-V CPU
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "cpu_vendorid.h"
26 #include "internals.h"
27 #include "exec/exec-all.h"
28 #include "qapi/error.h"
29 #include "qapi/visitor.h"
30 #include "qemu/error-report.h"
31 #include "hw/qdev-properties.h"
32 #include "hw/core/qdev-prop-internal.h"
33 #include "migration/vmstate.h"
34 #include "fpu/softfloat-helpers.h"
35 #include "sysemu/device_tree.h"
36 #include "sysemu/kvm.h"
37 #include "sysemu/tcg.h"
38 #include "kvm/kvm_riscv.h"
39 #include "tcg/tcg-cpu.h"
40 #include "tcg/tcg.h"
41
42 /* RISC-V CPU definitions */
43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH";
44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
45 RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0};
46
47 /*
48 * From vector_helper.c
49 * Note that vector data is stored in host-endian 64-bit chunks,
50 * so addressing bytes needs a host-endian fixup.
51 */
52 #if HOST_BIG_ENDIAN
53 #define BYTE(x) ((x) ^ 7)
54 #else
55 #define BYTE(x) (x)
56 #endif
57
riscv_cpu_is_32bit(RISCVCPU * cpu)58 bool riscv_cpu_is_32bit(RISCVCPU *cpu)
59 {
60 return riscv_cpu_mxl(&cpu->env) == MXL_RV32;
61 }
62
63 /* Hash that stores general user set numeric options */
64 static GHashTable *general_user_opts;
65
cpu_option_add_user_setting(const char * optname,uint32_t value)66 static void cpu_option_add_user_setting(const char *optname, uint32_t value)
67 {
68 g_hash_table_insert(general_user_opts, (gpointer)optname,
69 GUINT_TO_POINTER(value));
70 }
71
riscv_cpu_option_set(const char * optname)72 bool riscv_cpu_option_set(const char *optname)
73 {
74 return g_hash_table_contains(general_user_opts, optname);
75 }
76
77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
78 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)}
79
80 /*
81 * Here are the ordering rules of extension naming defined by RISC-V
82 * specification :
83 * 1. All extensions should be separated from other multi-letter extensions
84 * by an underscore.
85 * 2. The first letter following the 'Z' conventionally indicates the most
86 * closely related alphabetical extension category, IMAFDQLCBKJTPVH.
87 * If multiple 'Z' extensions are named, they should be ordered first
88 * by category, then alphabetically within a category.
89 * 3. Standard supervisor-level extensions (starts with 'S') should be
90 * listed after standard unprivileged extensions. If multiple
91 * supervisor-level extensions are listed, they should be ordered
92 * alphabetically.
93 * 4. Non-standard extensions (starts with 'X') must be listed after all
94 * standard extensions. They must be separated from other multi-letter
95 * extensions by an underscore.
96 *
97 * Single letter extensions are checked in riscv_cpu_validate_misa_priv()
98 * instead.
99 */
100 const RISCVIsaExtData isa_edata_arr[] = {
101 ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b),
102 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom),
103 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop),
104 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz),
105 ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11),
106 ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11),
107 ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11),
108 ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11),
109 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
110 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr),
111 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr),
112 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei),
113 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl),
114 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
115 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm),
116 ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop),
117 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
118 ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_11),
119 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo),
120 ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha),
121 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
122 ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b),
123 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc),
124 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
125 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
126 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
127 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh),
128 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin),
129 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx),
130 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx),
131 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca),
132 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb),
133 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf),
134 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd),
135 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce),
136 ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop),
137 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp),
138 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt),
139 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba),
140 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb),
141 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc),
142 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb),
143 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc),
144 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx),
145 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs),
146 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk),
147 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn),
148 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd),
149 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne),
150 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh),
151 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr),
152 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks),
153 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
154 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
155 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
156 ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso),
157 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb),
158 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
159 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
160 ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x),
161 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
162 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
163 ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x),
164 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin),
165 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
166 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
167 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
168 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb),
169 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg),
170 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn),
171 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc),
172 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
173 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng),
174 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
175 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
176 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks),
177 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc),
178 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed),
179 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg),
180 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
181 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt),
182 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
183 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
184 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
185 ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf),
186 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
187 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
188 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
189 ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11),
190 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
191 ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
192 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
193 ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
194 ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
195 ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade),
196 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
197 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
198 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
199 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt),
200 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba),
201 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb),
202 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs),
203 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo),
204 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov),
205 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx),
206 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv),
207 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac),
208 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx),
209 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair),
210 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync),
211 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
212
213 DEFINE_PROP_END_OF_LIST(),
214 };
215
isa_ext_is_enabled(RISCVCPU * cpu,uint32_t ext_offset)216 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset)
217 {
218 bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
219
220 return *ext_enabled;
221 }
222
isa_ext_update_enabled(RISCVCPU * cpu,uint32_t ext_offset,bool en)223 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en)
224 {
225 bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
226
227 *ext_enabled = en;
228 }
229
riscv_cpu_is_vendor(Object * cpu_obj)230 bool riscv_cpu_is_vendor(Object *cpu_obj)
231 {
232 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL;
233 }
234
235 const char * const riscv_int_regnames[] = {
236 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1",
237 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3",
238 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4",
239 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
240 "x28/t3", "x29/t4", "x30/t5", "x31/t6"
241 };
242
243 const char * const riscv_int_regnamesh[] = {
244 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h",
245 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h",
246 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h",
247 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h",
248 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
249 "x30h/t5h", "x31h/t6h"
250 };
251
252 const char * const riscv_fpr_regnames[] = {
253 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5",
254 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1",
255 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7",
256 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7",
257 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
258 "f30/ft10", "f31/ft11"
259 };
260
261 const char * const riscv_rvv_regnames[] = {
262 "v0", "v1", "v2", "v3", "v4", "v5", "v6",
263 "v7", "v8", "v9", "v10", "v11", "v12", "v13",
264 "v14", "v15", "v16", "v17", "v18", "v19", "v20",
265 "v21", "v22", "v23", "v24", "v25", "v26", "v27",
266 "v28", "v29", "v30", "v31"
267 };
268
269 static const char * const riscv_excp_names[] = {
270 "misaligned_fetch",
271 "fault_fetch",
272 "illegal_instruction",
273 "breakpoint",
274 "misaligned_load",
275 "fault_load",
276 "misaligned_store",
277 "fault_store",
278 "user_ecall",
279 "supervisor_ecall",
280 "hypervisor_ecall",
281 "machine_ecall",
282 "exec_page_fault",
283 "load_page_fault",
284 "reserved",
285 "store_page_fault",
286 "reserved",
287 "reserved",
288 "reserved",
289 "reserved",
290 "guest_exec_page_fault",
291 "guest_load_page_fault",
292 "reserved",
293 "guest_store_page_fault",
294 };
295
296 static const char * const riscv_intr_names[] = {
297 "u_software",
298 "s_software",
299 "vs_software",
300 "m_software",
301 "u_timer",
302 "s_timer",
303 "vs_timer",
304 "m_timer",
305 "u_external",
306 "s_external",
307 "vs_external",
308 "m_external",
309 "reserved",
310 "reserved",
311 "reserved",
312 "reserved"
313 };
314
riscv_cpu_get_trap_name(target_ulong cause,bool async)315 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
316 {
317 if (async) {
318 return (cause < ARRAY_SIZE(riscv_intr_names)) ?
319 riscv_intr_names[cause] : "(unknown)";
320 } else {
321 return (cause < ARRAY_SIZE(riscv_excp_names)) ?
322 riscv_excp_names[cause] : "(unknown)";
323 }
324 }
325
riscv_cpu_set_misa_ext(CPURISCVState * env,uint32_t ext)326 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext)
327 {
328 env->misa_ext_mask = env->misa_ext = ext;
329 }
330
riscv_cpu_max_xlen(RISCVCPUClass * mcc)331 int riscv_cpu_max_xlen(RISCVCPUClass *mcc)
332 {
333 return 16 << mcc->misa_mxl_max;
334 }
335
336 #ifndef CONFIG_USER_ONLY
satp_mode_from_str(const char * satp_mode_str)337 static uint8_t satp_mode_from_str(const char *satp_mode_str)
338 {
339 if (!strncmp(satp_mode_str, "mbare", 5)) {
340 return VM_1_10_MBARE;
341 }
342
343 if (!strncmp(satp_mode_str, "sv32", 4)) {
344 return VM_1_10_SV32;
345 }
346
347 if (!strncmp(satp_mode_str, "sv39", 4)) {
348 return VM_1_10_SV39;
349 }
350
351 if (!strncmp(satp_mode_str, "sv48", 4)) {
352 return VM_1_10_SV48;
353 }
354
355 if (!strncmp(satp_mode_str, "sv57", 4)) {
356 return VM_1_10_SV57;
357 }
358
359 if (!strncmp(satp_mode_str, "sv64", 4)) {
360 return VM_1_10_SV64;
361 }
362
363 g_assert_not_reached();
364 }
365
satp_mode_max_from_map(uint32_t map)366 uint8_t satp_mode_max_from_map(uint32_t map)
367 {
368 /*
369 * 'map = 0' will make us return (31 - 32), which C will
370 * happily overflow to UINT_MAX. There's no good result to
371 * return if 'map = 0' (e.g. returning 0 will be ambiguous
372 * with the result for 'map = 1').
373 *
374 * Assert out if map = 0. Callers will have to deal with
375 * it outside of this function.
376 */
377 g_assert(map > 0);
378
379 /* map here has at least one bit set, so no problem with clz */
380 return 31 - __builtin_clz(map);
381 }
382
satp_mode_str(uint8_t satp_mode,bool is_32_bit)383 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
384 {
385 if (is_32_bit) {
386 switch (satp_mode) {
387 case VM_1_10_SV32:
388 return "sv32";
389 case VM_1_10_MBARE:
390 return "none";
391 }
392 } else {
393 switch (satp_mode) {
394 case VM_1_10_SV64:
395 return "sv64";
396 case VM_1_10_SV57:
397 return "sv57";
398 case VM_1_10_SV48:
399 return "sv48";
400 case VM_1_10_SV39:
401 return "sv39";
402 case VM_1_10_MBARE:
403 return "none";
404 }
405 }
406
407 g_assert_not_reached();
408 }
409
set_satp_mode_max_supported(RISCVCPU * cpu,uint8_t satp_mode)410 static void set_satp_mode_max_supported(RISCVCPU *cpu,
411 uint8_t satp_mode)
412 {
413 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
414 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
415
416 for (int i = 0; i <= satp_mode; ++i) {
417 if (valid_vm[i]) {
418 cpu->cfg.satp_mode.supported |= (1 << i);
419 }
420 }
421 }
422
423 /* Set the satp mode to the max supported */
set_satp_mode_default_map(RISCVCPU * cpu)424 static void set_satp_mode_default_map(RISCVCPU *cpu)
425 {
426 /*
427 * Bare CPUs do not default to the max available.
428 * Users must set a valid satp_mode in the command
429 * line.
430 */
431 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) {
432 warn_report("No satp mode set. Defaulting to 'bare'");
433 cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE);
434 return;
435 }
436
437 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
438 }
439 #endif
440
riscv_any_cpu_init(Object * obj)441 static void riscv_any_cpu_init(Object *obj)
442 {
443 RISCVCPU *cpu = RISCV_CPU(obj);
444 CPURISCVState *env = &cpu->env;
445 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
446
447 #ifndef CONFIG_USER_ONLY
448 set_satp_mode_max_supported(RISCV_CPU(obj),
449 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ?
450 VM_1_10_SV32 : VM_1_10_SV57);
451 #endif
452
453 env->priv_ver = PRIV_VERSION_LATEST;
454
455 /* inherited from parent obj via riscv_cpu_init() */
456 cpu->cfg.ext_zifencei = true;
457 cpu->cfg.ext_zicsr = true;
458 cpu->cfg.mmu = true;
459 cpu->cfg.pmp = true;
460 }
461
riscv_max_cpu_init(Object * obj)462 static void riscv_max_cpu_init(Object *obj)
463 {
464 RISCVCPU *cpu = RISCV_CPU(obj);
465 CPURISCVState *env = &cpu->env;
466
467 cpu->cfg.mmu = true;
468 cpu->cfg.pmp = true;
469
470 env->priv_ver = PRIV_VERSION_LATEST;
471 #ifndef CONFIG_USER_ONLY
472 #ifdef TARGET_RISCV32
473 set_satp_mode_max_supported(cpu, VM_1_10_SV32);
474 #else
475 set_satp_mode_max_supported(cpu, VM_1_10_SV57);
476 #endif
477 #endif
478 }
479
480 #if defined(TARGET_RISCV64)
rv64_base_cpu_init(Object * obj)481 static void rv64_base_cpu_init(Object *obj)
482 {
483 RISCVCPU *cpu = RISCV_CPU(obj);
484 CPURISCVState *env = &cpu->env;
485
486 cpu->cfg.mmu = true;
487 cpu->cfg.pmp = true;
488
489 /* Set latest version of privileged specification */
490 env->priv_ver = PRIV_VERSION_LATEST;
491 #ifndef CONFIG_USER_ONLY
492 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
493 #endif
494 }
495
rv64_sifive_u_cpu_init(Object * obj)496 static void rv64_sifive_u_cpu_init(Object *obj)
497 {
498 RISCVCPU *cpu = RISCV_CPU(obj);
499 CPURISCVState *env = &cpu->env;
500 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
501 env->priv_ver = PRIV_VERSION_1_10_0;
502 #ifndef CONFIG_USER_ONLY
503 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39);
504 #endif
505
506 /* inherited from parent obj via riscv_cpu_init() */
507 cpu->cfg.ext_zifencei = true;
508 cpu->cfg.ext_zicsr = true;
509 cpu->cfg.mmu = true;
510 cpu->cfg.pmp = true;
511 }
512
rv64_sifive_e_cpu_init(Object * obj)513 static void rv64_sifive_e_cpu_init(Object *obj)
514 {
515 CPURISCVState *env = &RISCV_CPU(obj)->env;
516 RISCVCPU *cpu = RISCV_CPU(obj);
517
518 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
519 env->priv_ver = PRIV_VERSION_1_10_0;
520 #ifndef CONFIG_USER_ONLY
521 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
522 #endif
523
524 /* inherited from parent obj via riscv_cpu_init() */
525 cpu->cfg.ext_zifencei = true;
526 cpu->cfg.ext_zicsr = true;
527 cpu->cfg.pmp = true;
528 }
529
rv64_thead_c906_cpu_init(Object * obj)530 static void rv64_thead_c906_cpu_init(Object *obj)
531 {
532 CPURISCVState *env = &RISCV_CPU(obj)->env;
533 RISCVCPU *cpu = RISCV_CPU(obj);
534
535 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU);
536 env->priv_ver = PRIV_VERSION_1_11_0;
537
538 cpu->cfg.ext_zfa = true;
539 cpu->cfg.ext_zfh = true;
540 cpu->cfg.mmu = true;
541 cpu->cfg.ext_xtheadba = true;
542 cpu->cfg.ext_xtheadbb = true;
543 cpu->cfg.ext_xtheadbs = true;
544 cpu->cfg.ext_xtheadcmo = true;
545 cpu->cfg.ext_xtheadcondmov = true;
546 cpu->cfg.ext_xtheadfmemidx = true;
547 cpu->cfg.ext_xtheadmac = true;
548 cpu->cfg.ext_xtheadmemidx = true;
549 cpu->cfg.ext_xtheadmempair = true;
550 cpu->cfg.ext_xtheadsync = true;
551
552 cpu->cfg.mvendorid = THEAD_VENDOR_ID;
553 #ifndef CONFIG_USER_ONLY
554 set_satp_mode_max_supported(cpu, VM_1_10_SV39);
555 th_register_custom_csrs(cpu);
556 #endif
557
558 /* inherited from parent obj via riscv_cpu_init() */
559 cpu->cfg.pmp = true;
560 }
561
rv64_veyron_v1_cpu_init(Object * obj)562 static void rv64_veyron_v1_cpu_init(Object *obj)
563 {
564 CPURISCVState *env = &RISCV_CPU(obj)->env;
565 RISCVCPU *cpu = RISCV_CPU(obj);
566
567 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH);
568 env->priv_ver = PRIV_VERSION_1_12_0;
569
570 /* Enable ISA extensions */
571 cpu->cfg.mmu = true;
572 cpu->cfg.ext_zifencei = true;
573 cpu->cfg.ext_zicsr = true;
574 cpu->cfg.pmp = true;
575 cpu->cfg.ext_zicbom = true;
576 cpu->cfg.cbom_blocksize = 64;
577 cpu->cfg.cboz_blocksize = 64;
578 cpu->cfg.ext_zicboz = true;
579 cpu->cfg.ext_smaia = true;
580 cpu->cfg.ext_ssaia = true;
581 cpu->cfg.ext_sscofpmf = true;
582 cpu->cfg.ext_sstc = true;
583 cpu->cfg.ext_svinval = true;
584 cpu->cfg.ext_svnapot = true;
585 cpu->cfg.ext_svpbmt = true;
586 cpu->cfg.ext_smstateen = true;
587 cpu->cfg.ext_zba = true;
588 cpu->cfg.ext_zbb = true;
589 cpu->cfg.ext_zbc = true;
590 cpu->cfg.ext_zbs = true;
591 cpu->cfg.ext_XVentanaCondOps = true;
592
593 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID;
594 cpu->cfg.marchid = VEYRON_V1_MARCHID;
595 cpu->cfg.mimpid = VEYRON_V1_MIMPID;
596
597 #ifndef CONFIG_USER_ONLY
598 set_satp_mode_max_supported(cpu, VM_1_10_SV48);
599 #endif
600 }
601
602 #ifdef CONFIG_TCG
rv128_base_cpu_init(Object * obj)603 static void rv128_base_cpu_init(Object *obj)
604 {
605 RISCVCPU *cpu = RISCV_CPU(obj);
606 CPURISCVState *env = &cpu->env;
607
608 if (qemu_tcg_mttcg_enabled()) {
609 /* Missing 128-bit aligned atomics */
610 error_report("128-bit RISC-V currently does not work with Multi "
611 "Threaded TCG. Please use: -accel tcg,thread=single");
612 exit(EXIT_FAILURE);
613 }
614
615 cpu->cfg.mmu = true;
616 cpu->cfg.pmp = true;
617
618 /* Set latest version of privileged specification */
619 env->priv_ver = PRIV_VERSION_LATEST;
620 #ifndef CONFIG_USER_ONLY
621 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
622 #endif
623 }
624 #endif /* CONFIG_TCG */
625
rv64i_bare_cpu_init(Object * obj)626 static void rv64i_bare_cpu_init(Object *obj)
627 {
628 CPURISCVState *env = &RISCV_CPU(obj)->env;
629 riscv_cpu_set_misa_ext(env, RVI);
630 }
631
rv64e_bare_cpu_init(Object * obj)632 static void rv64e_bare_cpu_init(Object *obj)
633 {
634 CPURISCVState *env = &RISCV_CPU(obj)->env;
635 riscv_cpu_set_misa_ext(env, RVE);
636 }
637
638 #else /* !TARGET_RISCV64 */
639
rv32_base_cpu_init(Object * obj)640 static void rv32_base_cpu_init(Object *obj)
641 {
642 RISCVCPU *cpu = RISCV_CPU(obj);
643 CPURISCVState *env = &cpu->env;
644
645 cpu->cfg.mmu = true;
646 cpu->cfg.pmp = true;
647
648 /* Set latest version of privileged specification */
649 env->priv_ver = PRIV_VERSION_LATEST;
650 #ifndef CONFIG_USER_ONLY
651 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
652 #endif
653 }
654
rv32_sifive_u_cpu_init(Object * obj)655 static void rv32_sifive_u_cpu_init(Object *obj)
656 {
657 RISCVCPU *cpu = RISCV_CPU(obj);
658 CPURISCVState *env = &cpu->env;
659 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
660 env->priv_ver = PRIV_VERSION_1_10_0;
661 #ifndef CONFIG_USER_ONLY
662 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
663 #endif
664
665 /* inherited from parent obj via riscv_cpu_init() */
666 cpu->cfg.ext_zifencei = true;
667 cpu->cfg.ext_zicsr = true;
668 cpu->cfg.mmu = true;
669 cpu->cfg.pmp = true;
670 }
671
rv32_sifive_e_cpu_init(Object * obj)672 static void rv32_sifive_e_cpu_init(Object *obj)
673 {
674 CPURISCVState *env = &RISCV_CPU(obj)->env;
675 RISCVCPU *cpu = RISCV_CPU(obj);
676
677 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
678 env->priv_ver = PRIV_VERSION_1_10_0;
679 #ifndef CONFIG_USER_ONLY
680 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
681 #endif
682
683 /* inherited from parent obj via riscv_cpu_init() */
684 cpu->cfg.ext_zifencei = true;
685 cpu->cfg.ext_zicsr = true;
686 cpu->cfg.pmp = true;
687 }
688
rv32_ibex_cpu_init(Object * obj)689 static void rv32_ibex_cpu_init(Object *obj)
690 {
691 CPURISCVState *env = &RISCV_CPU(obj)->env;
692 RISCVCPU *cpu = RISCV_CPU(obj);
693
694 riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU);
695 env->priv_ver = PRIV_VERSION_1_12_0;
696 #ifndef CONFIG_USER_ONLY
697 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
698 #endif
699 /* inherited from parent obj via riscv_cpu_init() */
700 cpu->cfg.ext_zifencei = true;
701 cpu->cfg.ext_zicsr = true;
702 cpu->cfg.pmp = true;
703 cpu->cfg.ext_smepmp = true;
704 }
705
rv32_imafcu_nommu_cpu_init(Object * obj)706 static void rv32_imafcu_nommu_cpu_init(Object *obj)
707 {
708 CPURISCVState *env = &RISCV_CPU(obj)->env;
709 RISCVCPU *cpu = RISCV_CPU(obj);
710
711 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU);
712 env->priv_ver = PRIV_VERSION_1_10_0;
713 #ifndef CONFIG_USER_ONLY
714 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
715 #endif
716
717 /* inherited from parent obj via riscv_cpu_init() */
718 cpu->cfg.ext_zifencei = true;
719 cpu->cfg.ext_zicsr = true;
720 cpu->cfg.pmp = true;
721 }
722
rv32i_bare_cpu_init(Object * obj)723 static void rv32i_bare_cpu_init(Object *obj)
724 {
725 CPURISCVState *env = &RISCV_CPU(obj)->env;
726 riscv_cpu_set_misa_ext(env, RVI);
727 }
728
rv32e_bare_cpu_init(Object * obj)729 static void rv32e_bare_cpu_init(Object *obj)
730 {
731 CPURISCVState *env = &RISCV_CPU(obj)->env;
732 riscv_cpu_set_misa_ext(env, RVE);
733 }
734 #endif
735
riscv_cpu_class_by_name(const char * cpu_model)736 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
737 {
738 ObjectClass *oc;
739 char *typename;
740 char **cpuname;
741
742 cpuname = g_strsplit(cpu_model, ",", 1);
743 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
744 oc = object_class_by_name(typename);
745 g_strfreev(cpuname);
746 g_free(typename);
747
748 return oc;
749 }
750
riscv_cpu_get_name(RISCVCPU * cpu)751 char *riscv_cpu_get_name(RISCVCPU *cpu)
752 {
753 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu);
754 const char *typename = object_class_get_name(OBJECT_CLASS(rcc));
755
756 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX));
757
758 return cpu_model_from_type(typename);
759 }
760
riscv_cpu_dump_state(CPUState * cs,FILE * f,int flags)761 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
762 {
763 RISCVCPU *cpu = RISCV_CPU(cs);
764 CPURISCVState *env = &cpu->env;
765 int i, j;
766 uint8_t *p;
767
768 #if !defined(CONFIG_USER_ONLY)
769 if (riscv_has_ext(env, RVH)) {
770 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled);
771 }
772 #endif
773 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc);
774 #ifndef CONFIG_USER_ONLY
775 {
776 static const int dump_csrs[] = {
777 CSR_MHARTID,
778 CSR_MSTATUS,
779 CSR_MSTATUSH,
780 /*
781 * CSR_SSTATUS is intentionally omitted here as its value
782 * can be figured out by looking at CSR_MSTATUS
783 */
784 CSR_HSTATUS,
785 CSR_VSSTATUS,
786 CSR_MIP,
787 CSR_MIE,
788 CSR_MIDELEG,
789 CSR_HIDELEG,
790 CSR_MEDELEG,
791 CSR_HEDELEG,
792 CSR_MTVEC,
793 CSR_STVEC,
794 CSR_VSTVEC,
795 CSR_MEPC,
796 CSR_SEPC,
797 CSR_VSEPC,
798 CSR_MCAUSE,
799 CSR_SCAUSE,
800 CSR_VSCAUSE,
801 CSR_MTVAL,
802 CSR_STVAL,
803 CSR_HTVAL,
804 CSR_MTVAL2,
805 CSR_MSCRATCH,
806 CSR_SSCRATCH,
807 CSR_SATP,
808 CSR_MMTE,
809 CSR_UPMBASE,
810 CSR_UPMMASK,
811 CSR_SPMBASE,
812 CSR_SPMMASK,
813 CSR_MPMBASE,
814 CSR_MPMMASK,
815 };
816
817 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
818 int csrno = dump_csrs[i];
819 target_ulong val = 0;
820 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
821
822 /*
823 * Rely on the smode, hmode, etc, predicates within csr.c
824 * to do the filtering of the registers that are present.
825 */
826 if (res == RISCV_EXCP_NONE) {
827 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
828 csr_ops[csrno].name, val);
829 }
830 }
831 }
832 #endif
833
834 for (i = 0; i < 32; i++) {
835 qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
836 riscv_int_regnames[i], env->gpr[i]);
837 if ((i & 3) == 3) {
838 qemu_fprintf(f, "\n");
839 }
840 }
841 if (flags & CPU_DUMP_FPU) {
842 for (i = 0; i < 32; i++) {
843 qemu_fprintf(f, " %-8s %016" PRIx64,
844 riscv_fpr_regnames[i], env->fpr[i]);
845 if ((i & 3) == 3) {
846 qemu_fprintf(f, "\n");
847 }
848 }
849 }
850 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) {
851 static const int dump_rvv_csrs[] = {
852 CSR_VSTART,
853 CSR_VXSAT,
854 CSR_VXRM,
855 CSR_VCSR,
856 CSR_VL,
857 CSR_VTYPE,
858 CSR_VLENB,
859 };
860 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
861 int csrno = dump_rvv_csrs[i];
862 target_ulong val = 0;
863 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
864
865 /*
866 * Rely on the smode, hmode, etc, predicates within csr.c
867 * to do the filtering of the registers that are present.
868 */
869 if (res == RISCV_EXCP_NONE) {
870 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
871 csr_ops[csrno].name, val);
872 }
873 }
874 uint16_t vlenb = cpu->cfg.vlenb;
875
876 for (i = 0; i < 32; i++) {
877 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]);
878 p = (uint8_t *)env->vreg;
879 for (j = vlenb - 1 ; j >= 0; j--) {
880 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j)));
881 }
882 qemu_fprintf(f, "\n");
883 }
884 }
885 }
886
riscv_cpu_set_pc(CPUState * cs,vaddr value)887 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
888 {
889 RISCVCPU *cpu = RISCV_CPU(cs);
890 CPURISCVState *env = &cpu->env;
891
892 if (env->xl == MXL_RV32) {
893 env->pc = (int32_t)value;
894 } else {
895 env->pc = value;
896 }
897 }
898
riscv_cpu_get_pc(CPUState * cs)899 static vaddr riscv_cpu_get_pc(CPUState *cs)
900 {
901 RISCVCPU *cpu = RISCV_CPU(cs);
902 CPURISCVState *env = &cpu->env;
903
904 /* Match cpu_get_tb_cpu_state. */
905 if (env->xl == MXL_RV32) {
906 return env->pc & UINT32_MAX;
907 }
908 return env->pc;
909 }
910
riscv_cpu_has_work(CPUState * cs)911 bool riscv_cpu_has_work(CPUState *cs)
912 {
913 #ifndef CONFIG_USER_ONLY
914 RISCVCPU *cpu = RISCV_CPU(cs);
915 CPURISCVState *env = &cpu->env;
916 /*
917 * Definition of the WFI instruction requires it to ignore the privilege
918 * mode and delegation registers, but respect individual enables
919 */
920 return riscv_cpu_all_pending(env) != 0 ||
921 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE ||
922 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE;
923 #else
924 return true;
925 #endif
926 }
927
riscv_cpu_mmu_index(CPUState * cs,bool ifetch)928 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch)
929 {
930 return riscv_env_mmu_index(cpu_env(cs), ifetch);
931 }
932
riscv_cpu_reset_hold(Object * obj,ResetType type)933 static void riscv_cpu_reset_hold(Object *obj, ResetType type)
934 {
935 #ifndef CONFIG_USER_ONLY
936 uint8_t iprio;
937 int i, irq, rdzero;
938 #endif
939 CPUState *cs = CPU(obj);
940 RISCVCPU *cpu = RISCV_CPU(cs);
941 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
942 CPURISCVState *env = &cpu->env;
943
944 if (mcc->parent_phases.hold) {
945 mcc->parent_phases.hold(obj, type);
946 }
947 #ifndef CONFIG_USER_ONLY
948 env->misa_mxl = mcc->misa_mxl_max;
949 env->priv = PRV_M;
950 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
951 if (env->misa_mxl > MXL_RV32) {
952 /*
953 * The reset status of SXL/UXL is undefined, but mstatus is WARL
954 * and we must ensure that the value after init is valid for read.
955 */
956 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
957 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
958 if (riscv_has_ext(env, RVH)) {
959 env->vsstatus = set_field(env->vsstatus,
960 MSTATUS64_SXL, env->misa_mxl);
961 env->vsstatus = set_field(env->vsstatus,
962 MSTATUS64_UXL, env->misa_mxl);
963 env->mstatus_hs = set_field(env->mstatus_hs,
964 MSTATUS64_SXL, env->misa_mxl);
965 env->mstatus_hs = set_field(env->mstatus_hs,
966 MSTATUS64_UXL, env->misa_mxl);
967 }
968 }
969 env->mcause = 0;
970 env->miclaim = MIP_SGEIP;
971 env->pc = env->resetvec;
972 env->bins = 0;
973 env->two_stage_lookup = false;
974
975 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
976 (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ?
977 MENVCFG_ADUE : 0);
978 env->henvcfg = 0;
979
980 /* Initialized default priorities of local interrupts. */
981 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
982 iprio = riscv_cpu_default_priority(i);
983 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
984 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
985 env->hviprio[i] = 0;
986 }
987 i = 0;
988 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
989 if (!rdzero) {
990 env->hviprio[irq] = env->miprio[irq];
991 }
992 i++;
993 }
994 /* mmte is supposed to have pm.current hardwired to 1 */
995 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
996
997 /*
998 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor
999 * extension is enabled.
1000 */
1001 if (riscv_has_ext(env, RVH)) {
1002 env->mideleg |= HS_MODE_INTERRUPTS;
1003 }
1004
1005 /*
1006 * Clear mseccfg and unlock all the PMP entries upon reset.
1007 * This is allowed as per the priv and smepmp specifications
1008 * and is needed to clear stale entries across reboots.
1009 */
1010 if (riscv_cpu_cfg(env)->ext_smepmp) {
1011 env->mseccfg = 0;
1012 }
1013
1014 pmp_unlock_entries(env);
1015 #endif
1016 env->xl = riscv_cpu_mxl(env);
1017 riscv_cpu_update_mask(env);
1018 cs->exception_index = RISCV_EXCP_NONE;
1019 env->load_res = -1;
1020 set_default_nan_mode(1, &env->fp_status);
1021 env->vill = true;
1022
1023 #ifndef CONFIG_USER_ONLY
1024 if (cpu->cfg.debug) {
1025 riscv_trigger_reset_hold(env);
1026 }
1027
1028 if (kvm_enabled()) {
1029 kvm_riscv_reset_vcpu(cpu);
1030 }
1031 #endif
1032 }
1033
riscv_cpu_disas_set_info(CPUState * s,disassemble_info * info)1034 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
1035 {
1036 RISCVCPU *cpu = RISCV_CPU(s);
1037 CPURISCVState *env = &cpu->env;
1038 info->target_info = &cpu->cfg;
1039
1040 switch (env->xl) {
1041 case MXL_RV32:
1042 info->print_insn = print_insn_riscv32;
1043 break;
1044 case MXL_RV64:
1045 info->print_insn = print_insn_riscv64;
1046 break;
1047 case MXL_RV128:
1048 info->print_insn = print_insn_riscv128;
1049 break;
1050 default:
1051 g_assert_not_reached();
1052 }
1053 }
1054
1055 #ifndef CONFIG_USER_ONLY
riscv_cpu_satp_mode_finalize(RISCVCPU * cpu,Error ** errp)1056 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
1057 {
1058 bool rv32 = riscv_cpu_is_32bit(cpu);
1059 uint8_t satp_mode_map_max, satp_mode_supported_max;
1060
1061 /* The CPU wants the OS to decide which satp mode to use */
1062 if (cpu->cfg.satp_mode.supported == 0) {
1063 return;
1064 }
1065
1066 satp_mode_supported_max =
1067 satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
1068
1069 if (cpu->cfg.satp_mode.map == 0) {
1070 if (cpu->cfg.satp_mode.init == 0) {
1071 /* If unset by the user, we fallback to the default satp mode. */
1072 set_satp_mode_default_map(cpu);
1073 } else {
1074 /*
1075 * Find the lowest level that was disabled and then enable the
1076 * first valid level below which can be found in
1077 * valid_vm_1_10_32/64.
1078 */
1079 for (int i = 1; i < 16; ++i) {
1080 if ((cpu->cfg.satp_mode.init & (1 << i)) &&
1081 (cpu->cfg.satp_mode.supported & (1 << i))) {
1082 for (int j = i - 1; j >= 0; --j) {
1083 if (cpu->cfg.satp_mode.supported & (1 << j)) {
1084 cpu->cfg.satp_mode.map |= (1 << j);
1085 break;
1086 }
1087 }
1088 break;
1089 }
1090 }
1091 }
1092 }
1093
1094 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
1095
1096 /* Make sure the user asked for a supported configuration (HW and qemu) */
1097 if (satp_mode_map_max > satp_mode_supported_max) {
1098 error_setg(errp, "satp_mode %s is higher than hw max capability %s",
1099 satp_mode_str(satp_mode_map_max, rv32),
1100 satp_mode_str(satp_mode_supported_max, rv32));
1101 return;
1102 }
1103
1104 /*
1105 * Make sure the user did not ask for an invalid configuration as per
1106 * the specification.
1107 */
1108 if (!rv32) {
1109 for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1110 if (!(cpu->cfg.satp_mode.map & (1 << i)) &&
1111 (cpu->cfg.satp_mode.init & (1 << i)) &&
1112 (cpu->cfg.satp_mode.supported & (1 << i))) {
1113 error_setg(errp, "cannot disable %s satp mode if %s "
1114 "is enabled", satp_mode_str(i, false),
1115 satp_mode_str(satp_mode_map_max, false));
1116 return;
1117 }
1118 }
1119 }
1120
1121 /* Finally expand the map so that all valid modes are set */
1122 for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1123 if (cpu->cfg.satp_mode.supported & (1 << i)) {
1124 cpu->cfg.satp_mode.map |= (1 << i);
1125 }
1126 }
1127 }
1128 #endif
1129
riscv_cpu_finalize_features(RISCVCPU * cpu,Error ** errp)1130 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1131 {
1132 Error *local_err = NULL;
1133
1134 #ifndef CONFIG_USER_ONLY
1135 riscv_cpu_satp_mode_finalize(cpu, &local_err);
1136 if (local_err != NULL) {
1137 error_propagate(errp, local_err);
1138 return;
1139 }
1140 #endif
1141
1142 if (tcg_enabled()) {
1143 riscv_tcg_cpu_finalize_features(cpu, &local_err);
1144 if (local_err != NULL) {
1145 error_propagate(errp, local_err);
1146 return;
1147 }
1148 riscv_tcg_cpu_finalize_dynamic_decoder(cpu);
1149 } else if (kvm_enabled()) {
1150 riscv_kvm_cpu_finalize_features(cpu, &local_err);
1151 if (local_err != NULL) {
1152 error_propagate(errp, local_err);
1153 return;
1154 }
1155 }
1156 }
1157
riscv_cpu_realize(DeviceState * dev,Error ** errp)1158 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
1159 {
1160 CPUState *cs = CPU(dev);
1161 RISCVCPU *cpu = RISCV_CPU(dev);
1162 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
1163 Error *local_err = NULL;
1164
1165 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) {
1166 warn_report("The 'any' CPU is deprecated and will be "
1167 "removed in the future.");
1168 }
1169
1170 cpu_exec_realizefn(cs, &local_err);
1171 if (local_err != NULL) {
1172 error_propagate(errp, local_err);
1173 return;
1174 }
1175
1176 riscv_cpu_finalize_features(cpu, &local_err);
1177 if (local_err != NULL) {
1178 error_propagate(errp, local_err);
1179 return;
1180 }
1181
1182 riscv_cpu_register_gdb_regs_for_features(cs);
1183
1184 #ifndef CONFIG_USER_ONLY
1185 if (cpu->cfg.debug) {
1186 riscv_trigger_realize(&cpu->env);
1187 }
1188 #endif
1189
1190 qemu_init_vcpu(cs);
1191 cpu_reset(cs);
1192
1193 mcc->parent_realize(dev, errp);
1194 }
1195
riscv_cpu_accelerator_compatible(RISCVCPU * cpu)1196 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu)
1197 {
1198 if (tcg_enabled()) {
1199 return riscv_cpu_tcg_compatible(cpu);
1200 }
1201
1202 return true;
1203 }
1204
1205 #ifndef CONFIG_USER_ONLY
cpu_riscv_get_satp(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1206 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
1207 void *opaque, Error **errp)
1208 {
1209 RISCVSATPMap *satp_map = opaque;
1210 uint8_t satp = satp_mode_from_str(name);
1211 bool value;
1212
1213 value = satp_map->map & (1 << satp);
1214
1215 visit_type_bool(v, name, &value, errp);
1216 }
1217
cpu_riscv_set_satp(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1218 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
1219 void *opaque, Error **errp)
1220 {
1221 RISCVSATPMap *satp_map = opaque;
1222 uint8_t satp = satp_mode_from_str(name);
1223 bool value;
1224
1225 if (!visit_type_bool(v, name, &value, errp)) {
1226 return;
1227 }
1228
1229 satp_map->map = deposit32(satp_map->map, satp, 1, value);
1230 satp_map->init |= 1 << satp;
1231 }
1232
riscv_add_satp_mode_properties(Object * obj)1233 void riscv_add_satp_mode_properties(Object *obj)
1234 {
1235 RISCVCPU *cpu = RISCV_CPU(obj);
1236
1237 if (cpu->env.misa_mxl == MXL_RV32) {
1238 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
1239 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1240 } else {
1241 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
1242 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1243 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
1244 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1245 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
1246 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1247 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
1248 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1249 }
1250 }
1251
riscv_cpu_set_irq(void * opaque,int irq,int level)1252 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
1253 {
1254 RISCVCPU *cpu = RISCV_CPU(opaque);
1255 CPURISCVState *env = &cpu->env;
1256
1257 if (irq < IRQ_LOCAL_MAX) {
1258 switch (irq) {
1259 case IRQ_U_SOFT:
1260 case IRQ_S_SOFT:
1261 case IRQ_VS_SOFT:
1262 case IRQ_M_SOFT:
1263 case IRQ_U_TIMER:
1264 case IRQ_S_TIMER:
1265 case IRQ_VS_TIMER:
1266 case IRQ_M_TIMER:
1267 case IRQ_U_EXT:
1268 case IRQ_VS_EXT:
1269 case IRQ_M_EXT:
1270 if (kvm_enabled()) {
1271 kvm_riscv_set_irq(cpu, irq, level);
1272 } else {
1273 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level));
1274 }
1275 break;
1276 case IRQ_S_EXT:
1277 if (kvm_enabled()) {
1278 kvm_riscv_set_irq(cpu, irq, level);
1279 } else {
1280 env->external_seip = level;
1281 riscv_cpu_update_mip(env, 1 << irq,
1282 BOOL_TO_MASK(level | env->software_seip));
1283 }
1284 break;
1285 default:
1286 g_assert_not_reached();
1287 }
1288 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
1289 /* Require H-extension for handling guest local interrupts */
1290 if (!riscv_has_ext(env, RVH)) {
1291 g_assert_not_reached();
1292 }
1293
1294 /* Compute bit position in HGEIP CSR */
1295 irq = irq - IRQ_LOCAL_MAX + 1;
1296 if (env->geilen < irq) {
1297 g_assert_not_reached();
1298 }
1299
1300 /* Update HGEIP CSR */
1301 env->hgeip &= ~((target_ulong)1 << irq);
1302 if (level) {
1303 env->hgeip |= (target_ulong)1 << irq;
1304 }
1305
1306 /* Update mip.SGEIP bit */
1307 riscv_cpu_update_mip(env, MIP_SGEIP,
1308 BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
1309 } else {
1310 g_assert_not_reached();
1311 }
1312 }
1313 #endif /* CONFIG_USER_ONLY */
1314
riscv_cpu_is_dynamic(Object * cpu_obj)1315 static bool riscv_cpu_is_dynamic(Object *cpu_obj)
1316 {
1317 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
1318 }
1319
riscv_cpu_post_init(Object * obj)1320 static void riscv_cpu_post_init(Object *obj)
1321 {
1322 accel_cpu_instance_init(CPU(obj));
1323 }
1324
riscv_cpu_init(Object * obj)1325 static void riscv_cpu_init(Object *obj)
1326 {
1327 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
1328 RISCVCPU *cpu = RISCV_CPU(obj);
1329 CPURISCVState *env = &cpu->env;
1330
1331 env->misa_mxl = mcc->misa_mxl_max;
1332
1333 #ifndef CONFIG_USER_ONLY
1334 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
1335 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
1336 #endif /* CONFIG_USER_ONLY */
1337
1338 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal);
1339
1340 /*
1341 * The timer and performance counters extensions were supported
1342 * in QEMU before they were added as discrete extensions in the
1343 * ISA. To keep compatibility we'll always default them to 'true'
1344 * for all CPUs. Each accelerator will decide what to do when
1345 * users disable them.
1346 */
1347 RISCV_CPU(obj)->cfg.ext_zicntr = true;
1348 RISCV_CPU(obj)->cfg.ext_zihpm = true;
1349
1350 /* Default values for non-bool cpu properties */
1351 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16);
1352 cpu->cfg.vlenb = 128 >> 3;
1353 cpu->cfg.elen = 64;
1354 cpu->cfg.cbom_blocksize = 64;
1355 cpu->cfg.cbop_blocksize = 64;
1356 cpu->cfg.cboz_blocksize = 64;
1357 cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1358 }
1359
riscv_bare_cpu_init(Object * obj)1360 static void riscv_bare_cpu_init(Object *obj)
1361 {
1362 RISCVCPU *cpu = RISCV_CPU(obj);
1363
1364 /*
1365 * Bare CPUs do not inherit the timer and performance
1366 * counters from the parent class (see riscv_cpu_init()
1367 * for info on why the parent enables them).
1368 *
1369 * Users have to explicitly enable these counters for
1370 * bare CPUs.
1371 */
1372 cpu->cfg.ext_zicntr = false;
1373 cpu->cfg.ext_zihpm = false;
1374
1375 /* Set to QEMU's first supported priv version */
1376 cpu->env.priv_ver = PRIV_VERSION_1_10_0;
1377
1378 /*
1379 * Support all available satp_mode settings. The default
1380 * value will be set to MBARE if the user doesn't set
1381 * satp_mode manually (see set_satp_mode_default()).
1382 */
1383 #ifndef CONFIG_USER_ONLY
1384 set_satp_mode_max_supported(cpu, VM_1_10_SV64);
1385 #endif
1386 }
1387
1388 typedef struct misa_ext_info {
1389 const char *name;
1390 const char *description;
1391 } MISAExtInfo;
1392
1393 #define MISA_INFO_IDX(_bit) \
1394 __builtin_ctz(_bit)
1395
1396 #define MISA_EXT_INFO(_bit, _propname, _descr) \
1397 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr}
1398
1399 static const MISAExtInfo misa_ext_info_arr[] = {
1400 MISA_EXT_INFO(RVA, "a", "Atomic instructions"),
1401 MISA_EXT_INFO(RVC, "c", "Compressed instructions"),
1402 MISA_EXT_INFO(RVD, "d", "Double-precision float point"),
1403 MISA_EXT_INFO(RVF, "f", "Single-precision float point"),
1404 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"),
1405 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"),
1406 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"),
1407 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
1408 MISA_EXT_INFO(RVU, "u", "User-level instructions"),
1409 MISA_EXT_INFO(RVH, "h", "Hypervisor"),
1410 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
1411 MISA_EXT_INFO(RVV, "v", "Vector operations"),
1412 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
1413 MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)")
1414 };
1415
riscv_cpu_validate_misa_mxl(RISCVCPUClass * mcc)1416 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc)
1417 {
1418 CPUClass *cc = CPU_CLASS(mcc);
1419
1420 /* Validate that MISA_MXL is set properly. */
1421 switch (mcc->misa_mxl_max) {
1422 #ifdef TARGET_RISCV64
1423 case MXL_RV64:
1424 case MXL_RV128:
1425 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
1426 break;
1427 #endif
1428 case MXL_RV32:
1429 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml";
1430 break;
1431 default:
1432 g_assert_not_reached();
1433 }
1434 }
1435
riscv_validate_misa_info_idx(uint32_t bit)1436 static int riscv_validate_misa_info_idx(uint32_t bit)
1437 {
1438 int idx;
1439
1440 /*
1441 * Our lowest valid input (RVA) is 1 and
1442 * __builtin_ctz() is UB with zero.
1443 */
1444 g_assert(bit != 0);
1445 idx = MISA_INFO_IDX(bit);
1446
1447 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr));
1448 return idx;
1449 }
1450
riscv_get_misa_ext_name(uint32_t bit)1451 const char *riscv_get_misa_ext_name(uint32_t bit)
1452 {
1453 int idx = riscv_validate_misa_info_idx(bit);
1454 const char *val = misa_ext_info_arr[idx].name;
1455
1456 g_assert(val != NULL);
1457 return val;
1458 }
1459
riscv_get_misa_ext_description(uint32_t bit)1460 const char *riscv_get_misa_ext_description(uint32_t bit)
1461 {
1462 int idx = riscv_validate_misa_info_idx(bit);
1463 const char *val = misa_ext_info_arr[idx].description;
1464
1465 g_assert(val != NULL);
1466 return val;
1467 }
1468
1469 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \
1470 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
1471 .enabled = _defval}
1472
1473 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
1474 /* Defaults for standard extensions */
1475 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
1476 MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false),
1477 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
1478 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
1479 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
1480 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
1481 MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false),
1482 MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false),
1483 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false),
1484 MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false),
1485 MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false),
1486 MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false),
1487 MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false),
1488 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
1489 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true),
1490 MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false),
1491 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false),
1492 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false),
1493 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false),
1494 MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false),
1495 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false),
1496 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false),
1497 MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false),
1498 MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false),
1499 MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false),
1500 MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false),
1501 MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false),
1502 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
1503
1504 MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false),
1505 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
1506 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
1507 MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false),
1508 MULTI_EXT_CFG_BOOL("svade", ext_svade, false),
1509 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
1510 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
1511 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false),
1512 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false),
1513
1514 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true),
1515 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true),
1516
1517 MULTI_EXT_CFG_BOOL("zba", ext_zba, true),
1518 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true),
1519 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true),
1520 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false),
1521 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false),
1522 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false),
1523 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true),
1524 MULTI_EXT_CFG_BOOL("zk", ext_zk, false),
1525 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false),
1526 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false),
1527 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false),
1528 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false),
1529 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false),
1530 MULTI_EXT_CFG_BOOL("zks", ext_zks, false),
1531 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false),
1532 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false),
1533 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false),
1534 MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false),
1535
1536 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false),
1537 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false),
1538 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false),
1539 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false),
1540
1541 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true),
1542 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true),
1543 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true),
1544
1545 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false),
1546
1547 MULTI_EXT_CFG_BOOL("zca", ext_zca, false),
1548 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false),
1549 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false),
1550 MULTI_EXT_CFG_BOOL("zce", ext_zce, false),
1551 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false),
1552 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false),
1553 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false),
1554 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false),
1555
1556 /* Vector cryptography extensions */
1557 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false),
1558 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false),
1559 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false),
1560 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false),
1561 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false),
1562 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false),
1563 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false),
1564 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false),
1565 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false),
1566 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false),
1567 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false),
1568 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false),
1569 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false),
1570 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false),
1571 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false),
1572 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false),
1573
1574 DEFINE_PROP_END_OF_LIST(),
1575 };
1576
1577 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = {
1578 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false),
1579 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false),
1580 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false),
1581 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false),
1582 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false),
1583 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false),
1584 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false),
1585 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false),
1586 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false),
1587 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false),
1588 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false),
1589 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false),
1590
1591 DEFINE_PROP_END_OF_LIST(),
1592 };
1593
1594 /* These are experimental so mark with 'x-' */
1595 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
1596 DEFINE_PROP_END_OF_LIST(),
1597 };
1598
1599 /*
1600 * 'Named features' is the name we give to extensions that we
1601 * don't want to expose to users. They are either immutable
1602 * (always enabled/disable) or they'll vary depending on
1603 * the resulting CPU state. They have riscv,isa strings
1604 * and priv_ver like regular extensions.
1605 */
1606 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
1607 MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true),
1608
1609 DEFINE_PROP_END_OF_LIST(),
1610 };
1611
1612 /* Deprecated entries marked for future removal */
1613 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = {
1614 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true),
1615 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true),
1616 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true),
1617 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true),
1618 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true),
1619 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true),
1620 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false),
1621 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false),
1622 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false),
1623 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false),
1624 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false),
1625
1626 DEFINE_PROP_END_OF_LIST(),
1627 };
1628
cpu_set_prop_err(RISCVCPU * cpu,const char * propname,Error ** errp)1629 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname,
1630 Error **errp)
1631 {
1632 g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1633 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'",
1634 cpuname, propname);
1635 }
1636
prop_pmu_num_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1637 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name,
1638 void *opaque, Error **errp)
1639 {
1640 RISCVCPU *cpu = RISCV_CPU(obj);
1641 uint8_t pmu_num, curr_pmu_num;
1642 uint32_t pmu_mask;
1643
1644 visit_type_uint8(v, name, &pmu_num, errp);
1645
1646 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask);
1647
1648 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) {
1649 cpu_set_prop_err(cpu, name, errp);
1650 error_append_hint(errp, "Current '%s' val: %u\n",
1651 name, curr_pmu_num);
1652 return;
1653 }
1654
1655 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1656 error_setg(errp, "Number of counters exceeds maximum available");
1657 return;
1658 }
1659
1660 if (pmu_num == 0) {
1661 pmu_mask = 0;
1662 } else {
1663 pmu_mask = MAKE_64BIT_MASK(3, pmu_num);
1664 }
1665
1666 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\"");
1667 cpu->cfg.pmu_mask = pmu_mask;
1668 cpu_option_add_user_setting("pmu-mask", pmu_mask);
1669 }
1670
prop_pmu_num_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1671 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name,
1672 void *opaque, Error **errp)
1673 {
1674 RISCVCPU *cpu = RISCV_CPU(obj);
1675 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask);
1676
1677 visit_type_uint8(v, name, &pmu_num, errp);
1678 }
1679
1680 static const PropertyInfo prop_pmu_num = {
1681 .name = "pmu-num",
1682 .get = prop_pmu_num_get,
1683 .set = prop_pmu_num_set,
1684 };
1685
prop_pmu_mask_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1686 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name,
1687 void *opaque, Error **errp)
1688 {
1689 RISCVCPU *cpu = RISCV_CPU(obj);
1690 uint32_t value;
1691 uint8_t pmu_num;
1692
1693 visit_type_uint32(v, name, &value, errp);
1694
1695 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) {
1696 cpu_set_prop_err(cpu, name, errp);
1697 error_append_hint(errp, "Current '%s' val: %x\n",
1698 name, cpu->cfg.pmu_mask);
1699 return;
1700 }
1701
1702 pmu_num = ctpop32(value);
1703
1704 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1705 error_setg(errp, "Number of counters exceeds maximum available");
1706 return;
1707 }
1708
1709 cpu_option_add_user_setting(name, value);
1710 cpu->cfg.pmu_mask = value;
1711 }
1712
prop_pmu_mask_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1713 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name,
1714 void *opaque, Error **errp)
1715 {
1716 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask;
1717
1718 visit_type_uint8(v, name, &pmu_mask, errp);
1719 }
1720
1721 static const PropertyInfo prop_pmu_mask = {
1722 .name = "pmu-mask",
1723 .get = prop_pmu_mask_get,
1724 .set = prop_pmu_mask_set,
1725 };
1726
prop_mmu_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1727 static void prop_mmu_set(Object *obj, Visitor *v, const char *name,
1728 void *opaque, Error **errp)
1729 {
1730 RISCVCPU *cpu = RISCV_CPU(obj);
1731 bool value;
1732
1733 visit_type_bool(v, name, &value, errp);
1734
1735 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) {
1736 cpu_set_prop_err(cpu, "mmu", errp);
1737 return;
1738 }
1739
1740 cpu_option_add_user_setting(name, value);
1741 cpu->cfg.mmu = value;
1742 }
1743
prop_mmu_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1744 static void prop_mmu_get(Object *obj, Visitor *v, const char *name,
1745 void *opaque, Error **errp)
1746 {
1747 bool value = RISCV_CPU(obj)->cfg.mmu;
1748
1749 visit_type_bool(v, name, &value, errp);
1750 }
1751
1752 static const PropertyInfo prop_mmu = {
1753 .name = "mmu",
1754 .get = prop_mmu_get,
1755 .set = prop_mmu_set,
1756 };
1757
prop_pmp_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1758 static void prop_pmp_set(Object *obj, Visitor *v, const char *name,
1759 void *opaque, Error **errp)
1760 {
1761 RISCVCPU *cpu = RISCV_CPU(obj);
1762 bool value;
1763
1764 visit_type_bool(v, name, &value, errp);
1765
1766 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) {
1767 cpu_set_prop_err(cpu, name, errp);
1768 return;
1769 }
1770
1771 cpu_option_add_user_setting(name, value);
1772 cpu->cfg.pmp = value;
1773 }
1774
prop_pmp_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1775 static void prop_pmp_get(Object *obj, Visitor *v, const char *name,
1776 void *opaque, Error **errp)
1777 {
1778 bool value = RISCV_CPU(obj)->cfg.pmp;
1779
1780 visit_type_bool(v, name, &value, errp);
1781 }
1782
1783 static const PropertyInfo prop_pmp = {
1784 .name = "pmp",
1785 .get = prop_pmp_get,
1786 .set = prop_pmp_set,
1787 };
1788
priv_spec_from_str(const char * priv_spec_str)1789 static int priv_spec_from_str(const char *priv_spec_str)
1790 {
1791 int priv_version = -1;
1792
1793 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) {
1794 priv_version = PRIV_VERSION_1_13_0;
1795 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) {
1796 priv_version = PRIV_VERSION_1_12_0;
1797 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) {
1798 priv_version = PRIV_VERSION_1_11_0;
1799 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) {
1800 priv_version = PRIV_VERSION_1_10_0;
1801 }
1802
1803 return priv_version;
1804 }
1805
priv_spec_to_str(int priv_version)1806 const char *priv_spec_to_str(int priv_version)
1807 {
1808 switch (priv_version) {
1809 case PRIV_VERSION_1_10_0:
1810 return PRIV_VER_1_10_0_STR;
1811 case PRIV_VERSION_1_11_0:
1812 return PRIV_VER_1_11_0_STR;
1813 case PRIV_VERSION_1_12_0:
1814 return PRIV_VER_1_12_0_STR;
1815 case PRIV_VERSION_1_13_0:
1816 return PRIV_VER_1_13_0_STR;
1817 default:
1818 return NULL;
1819 }
1820 }
1821
prop_priv_spec_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1822 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name,
1823 void *opaque, Error **errp)
1824 {
1825 RISCVCPU *cpu = RISCV_CPU(obj);
1826 g_autofree char *value = NULL;
1827 int priv_version = -1;
1828
1829 visit_type_str(v, name, &value, errp);
1830
1831 priv_version = priv_spec_from_str(value);
1832 if (priv_version < 0) {
1833 error_setg(errp, "Unsupported privilege spec version '%s'", value);
1834 return;
1835 }
1836
1837 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) {
1838 cpu_set_prop_err(cpu, name, errp);
1839 error_append_hint(errp, "Current '%s' val: %s\n", name,
1840 object_property_get_str(obj, name, NULL));
1841 return;
1842 }
1843
1844 cpu_option_add_user_setting(name, priv_version);
1845 cpu->env.priv_ver = priv_version;
1846 }
1847
prop_priv_spec_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1848 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name,
1849 void *opaque, Error **errp)
1850 {
1851 RISCVCPU *cpu = RISCV_CPU(obj);
1852 const char *value = priv_spec_to_str(cpu->env.priv_ver);
1853
1854 visit_type_str(v, name, (char **)&value, errp);
1855 }
1856
1857 static const PropertyInfo prop_priv_spec = {
1858 .name = "priv_spec",
1859 .get = prop_priv_spec_get,
1860 .set = prop_priv_spec_set,
1861 };
1862
prop_vext_spec_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1863 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name,
1864 void *opaque, Error **errp)
1865 {
1866 RISCVCPU *cpu = RISCV_CPU(obj);
1867 g_autofree char *value = NULL;
1868
1869 visit_type_str(v, name, &value, errp);
1870
1871 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) {
1872 error_setg(errp, "Unsupported vector spec version '%s'", value);
1873 return;
1874 }
1875
1876 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0);
1877 cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1878 }
1879
prop_vext_spec_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1880 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name,
1881 void *opaque, Error **errp)
1882 {
1883 const char *value = VEXT_VER_1_00_0_STR;
1884
1885 visit_type_str(v, name, (char **)&value, errp);
1886 }
1887
1888 static const PropertyInfo prop_vext_spec = {
1889 .name = "vext_spec",
1890 .get = prop_vext_spec_get,
1891 .set = prop_vext_spec_set,
1892 };
1893
prop_vlen_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1894 static void prop_vlen_set(Object *obj, Visitor *v, const char *name,
1895 void *opaque, Error **errp)
1896 {
1897 RISCVCPU *cpu = RISCV_CPU(obj);
1898 uint16_t value;
1899
1900 if (!visit_type_uint16(v, name, &value, errp)) {
1901 return;
1902 }
1903
1904 if (!is_power_of_2(value)) {
1905 error_setg(errp, "Vector extension VLEN must be power of 2");
1906 return;
1907 }
1908
1909 if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) {
1910 cpu_set_prop_err(cpu, name, errp);
1911 error_append_hint(errp, "Current '%s' val: %u\n",
1912 name, cpu->cfg.vlenb << 3);
1913 return;
1914 }
1915
1916 cpu_option_add_user_setting(name, value);
1917 cpu->cfg.vlenb = value >> 3;
1918 }
1919
prop_vlen_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1920 static void prop_vlen_get(Object *obj, Visitor *v, const char *name,
1921 void *opaque, Error **errp)
1922 {
1923 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3;
1924
1925 visit_type_uint16(v, name, &value, errp);
1926 }
1927
1928 static const PropertyInfo prop_vlen = {
1929 .name = "vlen",
1930 .get = prop_vlen_get,
1931 .set = prop_vlen_set,
1932 };
1933
prop_elen_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1934 static void prop_elen_set(Object *obj, Visitor *v, const char *name,
1935 void *opaque, Error **errp)
1936 {
1937 RISCVCPU *cpu = RISCV_CPU(obj);
1938 uint16_t value;
1939
1940 if (!visit_type_uint16(v, name, &value, errp)) {
1941 return;
1942 }
1943
1944 if (!is_power_of_2(value)) {
1945 error_setg(errp, "Vector extension ELEN must be power of 2");
1946 return;
1947 }
1948
1949 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) {
1950 cpu_set_prop_err(cpu, name, errp);
1951 error_append_hint(errp, "Current '%s' val: %u\n",
1952 name, cpu->cfg.elen);
1953 return;
1954 }
1955
1956 cpu_option_add_user_setting(name, value);
1957 cpu->cfg.elen = value;
1958 }
1959
prop_elen_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1960 static void prop_elen_get(Object *obj, Visitor *v, const char *name,
1961 void *opaque, Error **errp)
1962 {
1963 uint16_t value = RISCV_CPU(obj)->cfg.elen;
1964
1965 visit_type_uint16(v, name, &value, errp);
1966 }
1967
1968 static const PropertyInfo prop_elen = {
1969 .name = "elen",
1970 .get = prop_elen_get,
1971 .set = prop_elen_set,
1972 };
1973
prop_cbom_blksize_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1974 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name,
1975 void *opaque, Error **errp)
1976 {
1977 RISCVCPU *cpu = RISCV_CPU(obj);
1978 uint16_t value;
1979
1980 if (!visit_type_uint16(v, name, &value, errp)) {
1981 return;
1982 }
1983
1984 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) {
1985 cpu_set_prop_err(cpu, name, errp);
1986 error_append_hint(errp, "Current '%s' val: %u\n",
1987 name, cpu->cfg.cbom_blocksize);
1988 return;
1989 }
1990
1991 cpu_option_add_user_setting(name, value);
1992 cpu->cfg.cbom_blocksize = value;
1993 }
1994
prop_cbom_blksize_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1995 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name,
1996 void *opaque, Error **errp)
1997 {
1998 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize;
1999
2000 visit_type_uint16(v, name, &value, errp);
2001 }
2002
2003 static const PropertyInfo prop_cbom_blksize = {
2004 .name = "cbom_blocksize",
2005 .get = prop_cbom_blksize_get,
2006 .set = prop_cbom_blksize_set,
2007 };
2008
prop_cbop_blksize_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2009 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name,
2010 void *opaque, Error **errp)
2011 {
2012 RISCVCPU *cpu = RISCV_CPU(obj);
2013 uint16_t value;
2014
2015 if (!visit_type_uint16(v, name, &value, errp)) {
2016 return;
2017 }
2018
2019 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) {
2020 cpu_set_prop_err(cpu, name, errp);
2021 error_append_hint(errp, "Current '%s' val: %u\n",
2022 name, cpu->cfg.cbop_blocksize);
2023 return;
2024 }
2025
2026 cpu_option_add_user_setting(name, value);
2027 cpu->cfg.cbop_blocksize = value;
2028 }
2029
prop_cbop_blksize_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2030 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name,
2031 void *opaque, Error **errp)
2032 {
2033 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize;
2034
2035 visit_type_uint16(v, name, &value, errp);
2036 }
2037
2038 static const PropertyInfo prop_cbop_blksize = {
2039 .name = "cbop_blocksize",
2040 .get = prop_cbop_blksize_get,
2041 .set = prop_cbop_blksize_set,
2042 };
2043
prop_cboz_blksize_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2044 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name,
2045 void *opaque, Error **errp)
2046 {
2047 RISCVCPU *cpu = RISCV_CPU(obj);
2048 uint16_t value;
2049
2050 if (!visit_type_uint16(v, name, &value, errp)) {
2051 return;
2052 }
2053
2054 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) {
2055 cpu_set_prop_err(cpu, name, errp);
2056 error_append_hint(errp, "Current '%s' val: %u\n",
2057 name, cpu->cfg.cboz_blocksize);
2058 return;
2059 }
2060
2061 cpu_option_add_user_setting(name, value);
2062 cpu->cfg.cboz_blocksize = value;
2063 }
2064
prop_cboz_blksize_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2065 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name,
2066 void *opaque, Error **errp)
2067 {
2068 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize;
2069
2070 visit_type_uint16(v, name, &value, errp);
2071 }
2072
2073 static const PropertyInfo prop_cboz_blksize = {
2074 .name = "cboz_blocksize",
2075 .get = prop_cboz_blksize_get,
2076 .set = prop_cboz_blksize_set,
2077 };
2078
prop_mvendorid_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2079 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name,
2080 void *opaque, Error **errp)
2081 {
2082 bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2083 RISCVCPU *cpu = RISCV_CPU(obj);
2084 uint32_t prev_val = cpu->cfg.mvendorid;
2085 uint32_t value;
2086
2087 if (!visit_type_uint32(v, name, &value, errp)) {
2088 return;
2089 }
2090
2091 if (!dynamic_cpu && prev_val != value) {
2092 error_setg(errp, "Unable to change %s mvendorid (0x%x)",
2093 object_get_typename(obj), prev_val);
2094 return;
2095 }
2096
2097 cpu->cfg.mvendorid = value;
2098 }
2099
prop_mvendorid_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2100 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name,
2101 void *opaque, Error **errp)
2102 {
2103 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid;
2104
2105 visit_type_uint32(v, name, &value, errp);
2106 }
2107
2108 static const PropertyInfo prop_mvendorid = {
2109 .name = "mvendorid",
2110 .get = prop_mvendorid_get,
2111 .set = prop_mvendorid_set,
2112 };
2113
prop_mimpid_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2114 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name,
2115 void *opaque, Error **errp)
2116 {
2117 bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2118 RISCVCPU *cpu = RISCV_CPU(obj);
2119 uint64_t prev_val = cpu->cfg.mimpid;
2120 uint64_t value;
2121
2122 if (!visit_type_uint64(v, name, &value, errp)) {
2123 return;
2124 }
2125
2126 if (!dynamic_cpu && prev_val != value) {
2127 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")",
2128 object_get_typename(obj), prev_val);
2129 return;
2130 }
2131
2132 cpu->cfg.mimpid = value;
2133 }
2134
prop_mimpid_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2135 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name,
2136 void *opaque, Error **errp)
2137 {
2138 uint64_t value = RISCV_CPU(obj)->cfg.mimpid;
2139
2140 visit_type_uint64(v, name, &value, errp);
2141 }
2142
2143 static const PropertyInfo prop_mimpid = {
2144 .name = "mimpid",
2145 .get = prop_mimpid_get,
2146 .set = prop_mimpid_set,
2147 };
2148
prop_marchid_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2149 static void prop_marchid_set(Object *obj, Visitor *v, const char *name,
2150 void *opaque, Error **errp)
2151 {
2152 bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2153 RISCVCPU *cpu = RISCV_CPU(obj);
2154 uint64_t prev_val = cpu->cfg.marchid;
2155 uint64_t value, invalid_val;
2156 uint32_t mxlen = 0;
2157
2158 if (!visit_type_uint64(v, name, &value, errp)) {
2159 return;
2160 }
2161
2162 if (!dynamic_cpu && prev_val != value) {
2163 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")",
2164 object_get_typename(obj), prev_val);
2165 return;
2166 }
2167
2168 switch (riscv_cpu_mxl(&cpu->env)) {
2169 case MXL_RV32:
2170 mxlen = 32;
2171 break;
2172 case MXL_RV64:
2173 case MXL_RV128:
2174 mxlen = 64;
2175 break;
2176 default:
2177 g_assert_not_reached();
2178 }
2179
2180 invalid_val = 1LL << (mxlen - 1);
2181
2182 if (value == invalid_val) {
2183 error_setg(errp, "Unable to set marchid with MSB (%u) bit set "
2184 "and the remaining bits zero", mxlen);
2185 return;
2186 }
2187
2188 cpu->cfg.marchid = value;
2189 }
2190
prop_marchid_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2191 static void prop_marchid_get(Object *obj, Visitor *v, const char *name,
2192 void *opaque, Error **errp)
2193 {
2194 uint64_t value = RISCV_CPU(obj)->cfg.marchid;
2195
2196 visit_type_uint64(v, name, &value, errp);
2197 }
2198
2199 static const PropertyInfo prop_marchid = {
2200 .name = "marchid",
2201 .get = prop_marchid_get,
2202 .set = prop_marchid_set,
2203 };
2204
2205 /*
2206 * RVA22U64 defines some 'named features' that are cache
2207 * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa
2208 * and Zicclsm. They are always implemented in TCG and
2209 * doesn't need to be manually enabled by the profile.
2210 */
2211 static RISCVCPUProfile RVA22U64 = {
2212 .parent = NULL,
2213 .name = "rva22u64",
2214 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU,
2215 .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
2216 .satp_mode = RISCV_PROFILE_ATTR_UNUSED,
2217 .ext_offsets = {
2218 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause),
2219 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb),
2220 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin),
2221 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr),
2222 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom),
2223 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz),
2224
2225 /* mandatory named features for this profile */
2226 CPU_CFG_OFFSET(ext_zic64b),
2227
2228 RISCV_PROFILE_EXT_LIST_END
2229 }
2230 };
2231
2232 /*
2233 * As with RVA22U64, RVA22S64 also defines 'named features'.
2234 *
2235 * Cache related features that we consider enabled since we don't
2236 * implement cache: Ssccptr
2237 *
2238 * Other named features that we already implement: Sstvecd, Sstvala,
2239 * Sscounterenw
2240 *
2241 * The remaining features/extensions comes from RVA22U64.
2242 */
2243 static RISCVCPUProfile RVA22S64 = {
2244 .parent = &RVA22U64,
2245 .name = "rva22s64",
2246 .misa_ext = RVS,
2247 .priv_spec = PRIV_VERSION_1_12_0,
2248 .satp_mode = VM_1_10_SV39,
2249 .ext_offsets = {
2250 /* rva22s64 exts */
2251 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt),
2252 CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade),
2253
2254 RISCV_PROFILE_EXT_LIST_END
2255 }
2256 };
2257
2258 RISCVCPUProfile *riscv_profiles[] = {
2259 &RVA22U64,
2260 &RVA22S64,
2261 NULL,
2262 };
2263
2264 static RISCVCPUImpliedExtsRule RVA_IMPLIED = {
2265 .is_misa = true,
2266 .ext = RVA,
2267 .implied_multi_exts = {
2268 CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo),
2269
2270 RISCV_IMPLIED_EXTS_RULE_END
2271 },
2272 };
2273
2274 static RISCVCPUImpliedExtsRule RVD_IMPLIED = {
2275 .is_misa = true,
2276 .ext = RVD,
2277 .implied_misa_exts = RVF,
2278 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2279 };
2280
2281 static RISCVCPUImpliedExtsRule RVF_IMPLIED = {
2282 .is_misa = true,
2283 .ext = RVF,
2284 .implied_multi_exts = {
2285 CPU_CFG_OFFSET(ext_zicsr),
2286
2287 RISCV_IMPLIED_EXTS_RULE_END
2288 },
2289 };
2290
2291 static RISCVCPUImpliedExtsRule RVM_IMPLIED = {
2292 .is_misa = true,
2293 .ext = RVM,
2294 .implied_multi_exts = {
2295 CPU_CFG_OFFSET(ext_zmmul),
2296
2297 RISCV_IMPLIED_EXTS_RULE_END
2298 },
2299 };
2300
2301 static RISCVCPUImpliedExtsRule RVV_IMPLIED = {
2302 .is_misa = true,
2303 .ext = RVV,
2304 .implied_multi_exts = {
2305 CPU_CFG_OFFSET(ext_zve64d),
2306
2307 RISCV_IMPLIED_EXTS_RULE_END
2308 },
2309 };
2310
2311 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = {
2312 .ext = CPU_CFG_OFFSET(ext_zcb),
2313 .implied_multi_exts = {
2314 CPU_CFG_OFFSET(ext_zca),
2315
2316 RISCV_IMPLIED_EXTS_RULE_END
2317 },
2318 };
2319
2320 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = {
2321 .ext = CPU_CFG_OFFSET(ext_zcd),
2322 .implied_misa_exts = RVD,
2323 .implied_multi_exts = {
2324 CPU_CFG_OFFSET(ext_zca),
2325
2326 RISCV_IMPLIED_EXTS_RULE_END
2327 },
2328 };
2329
2330 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = {
2331 .ext = CPU_CFG_OFFSET(ext_zce),
2332 .implied_multi_exts = {
2333 CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp),
2334 CPU_CFG_OFFSET(ext_zcmt),
2335
2336 RISCV_IMPLIED_EXTS_RULE_END
2337 },
2338 };
2339
2340 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = {
2341 .ext = CPU_CFG_OFFSET(ext_zcf),
2342 .implied_misa_exts = RVF,
2343 .implied_multi_exts = {
2344 CPU_CFG_OFFSET(ext_zca),
2345
2346 RISCV_IMPLIED_EXTS_RULE_END
2347 },
2348 };
2349
2350 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = {
2351 .ext = CPU_CFG_OFFSET(ext_zcmp),
2352 .implied_multi_exts = {
2353 CPU_CFG_OFFSET(ext_zca),
2354
2355 RISCV_IMPLIED_EXTS_RULE_END
2356 },
2357 };
2358
2359 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = {
2360 .ext = CPU_CFG_OFFSET(ext_zcmt),
2361 .implied_multi_exts = {
2362 CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr),
2363
2364 RISCV_IMPLIED_EXTS_RULE_END
2365 },
2366 };
2367
2368 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = {
2369 .ext = CPU_CFG_OFFSET(ext_zdinx),
2370 .implied_multi_exts = {
2371 CPU_CFG_OFFSET(ext_zfinx),
2372
2373 RISCV_IMPLIED_EXTS_RULE_END
2374 },
2375 };
2376
2377 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = {
2378 .ext = CPU_CFG_OFFSET(ext_zfa),
2379 .implied_misa_exts = RVF,
2380 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2381 };
2382
2383 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = {
2384 .ext = CPU_CFG_OFFSET(ext_zfbfmin),
2385 .implied_misa_exts = RVF,
2386 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2387 };
2388
2389 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = {
2390 .ext = CPU_CFG_OFFSET(ext_zfh),
2391 .implied_multi_exts = {
2392 CPU_CFG_OFFSET(ext_zfhmin),
2393
2394 RISCV_IMPLIED_EXTS_RULE_END
2395 },
2396 };
2397
2398 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = {
2399 .ext = CPU_CFG_OFFSET(ext_zfhmin),
2400 .implied_misa_exts = RVF,
2401 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2402 };
2403
2404 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = {
2405 .ext = CPU_CFG_OFFSET(ext_zfinx),
2406 .implied_multi_exts = {
2407 CPU_CFG_OFFSET(ext_zicsr),
2408
2409 RISCV_IMPLIED_EXTS_RULE_END
2410 },
2411 };
2412
2413 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = {
2414 .ext = CPU_CFG_OFFSET(ext_zhinx),
2415 .implied_multi_exts = {
2416 CPU_CFG_OFFSET(ext_zhinxmin),
2417
2418 RISCV_IMPLIED_EXTS_RULE_END
2419 },
2420 };
2421
2422 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = {
2423 .ext = CPU_CFG_OFFSET(ext_zhinxmin),
2424 .implied_multi_exts = {
2425 CPU_CFG_OFFSET(ext_zfinx),
2426
2427 RISCV_IMPLIED_EXTS_RULE_END
2428 },
2429 };
2430
2431 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = {
2432 .ext = CPU_CFG_OFFSET(ext_zicntr),
2433 .implied_multi_exts = {
2434 CPU_CFG_OFFSET(ext_zicsr),
2435
2436 RISCV_IMPLIED_EXTS_RULE_END
2437 },
2438 };
2439
2440 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = {
2441 .ext = CPU_CFG_OFFSET(ext_zihpm),
2442 .implied_multi_exts = {
2443 CPU_CFG_OFFSET(ext_zicsr),
2444
2445 RISCV_IMPLIED_EXTS_RULE_END
2446 },
2447 };
2448
2449 static RISCVCPUImpliedExtsRule ZK_IMPLIED = {
2450 .ext = CPU_CFG_OFFSET(ext_zk),
2451 .implied_multi_exts = {
2452 CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr),
2453 CPU_CFG_OFFSET(ext_zkt),
2454
2455 RISCV_IMPLIED_EXTS_RULE_END
2456 },
2457 };
2458
2459 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = {
2460 .ext = CPU_CFG_OFFSET(ext_zkn),
2461 .implied_multi_exts = {
2462 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
2463 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne),
2464 CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh),
2465
2466 RISCV_IMPLIED_EXTS_RULE_END
2467 },
2468 };
2469
2470 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = {
2471 .ext = CPU_CFG_OFFSET(ext_zks),
2472 .implied_multi_exts = {
2473 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
2474 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed),
2475 CPU_CFG_OFFSET(ext_zksh),
2476
2477 RISCV_IMPLIED_EXTS_RULE_END
2478 },
2479 };
2480
2481 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = {
2482 .ext = CPU_CFG_OFFSET(ext_zvbb),
2483 .implied_multi_exts = {
2484 CPU_CFG_OFFSET(ext_zvkb),
2485
2486 RISCV_IMPLIED_EXTS_RULE_END
2487 },
2488 };
2489
2490 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = {
2491 .ext = CPU_CFG_OFFSET(ext_zve32f),
2492 .implied_misa_exts = RVF,
2493 .implied_multi_exts = {
2494 CPU_CFG_OFFSET(ext_zve32x),
2495
2496 RISCV_IMPLIED_EXTS_RULE_END
2497 },
2498 };
2499
2500 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = {
2501 .ext = CPU_CFG_OFFSET(ext_zve32x),
2502 .implied_multi_exts = {
2503 CPU_CFG_OFFSET(ext_zicsr),
2504
2505 RISCV_IMPLIED_EXTS_RULE_END
2506 },
2507 };
2508
2509 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = {
2510 .ext = CPU_CFG_OFFSET(ext_zve64d),
2511 .implied_misa_exts = RVD,
2512 .implied_multi_exts = {
2513 CPU_CFG_OFFSET(ext_zve64f),
2514
2515 RISCV_IMPLIED_EXTS_RULE_END
2516 },
2517 };
2518
2519 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = {
2520 .ext = CPU_CFG_OFFSET(ext_zve64f),
2521 .implied_misa_exts = RVF,
2522 .implied_multi_exts = {
2523 CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x),
2524
2525 RISCV_IMPLIED_EXTS_RULE_END
2526 },
2527 };
2528
2529 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = {
2530 .ext = CPU_CFG_OFFSET(ext_zve64x),
2531 .implied_multi_exts = {
2532 CPU_CFG_OFFSET(ext_zve32x),
2533
2534 RISCV_IMPLIED_EXTS_RULE_END
2535 },
2536 };
2537
2538 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = {
2539 .ext = CPU_CFG_OFFSET(ext_zvfbfmin),
2540 .implied_multi_exts = {
2541 CPU_CFG_OFFSET(ext_zve32f),
2542
2543 RISCV_IMPLIED_EXTS_RULE_END
2544 },
2545 };
2546
2547 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = {
2548 .ext = CPU_CFG_OFFSET(ext_zvfbfwma),
2549 .implied_multi_exts = {
2550 CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin),
2551
2552 RISCV_IMPLIED_EXTS_RULE_END
2553 },
2554 };
2555
2556 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = {
2557 .ext = CPU_CFG_OFFSET(ext_zvfh),
2558 .implied_multi_exts = {
2559 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin),
2560
2561 RISCV_IMPLIED_EXTS_RULE_END
2562 },
2563 };
2564
2565 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = {
2566 .ext = CPU_CFG_OFFSET(ext_zvfhmin),
2567 .implied_multi_exts = {
2568 CPU_CFG_OFFSET(ext_zve32f),
2569
2570 RISCV_IMPLIED_EXTS_RULE_END
2571 },
2572 };
2573
2574 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = {
2575 .ext = CPU_CFG_OFFSET(ext_zvkn),
2576 .implied_multi_exts = {
2577 CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb),
2578 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
2579
2580 RISCV_IMPLIED_EXTS_RULE_END
2581 },
2582 };
2583
2584 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = {
2585 .ext = CPU_CFG_OFFSET(ext_zvknc),
2586 .implied_multi_exts = {
2587 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc),
2588
2589 RISCV_IMPLIED_EXTS_RULE_END
2590 },
2591 };
2592
2593 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = {
2594 .ext = CPU_CFG_OFFSET(ext_zvkng),
2595 .implied_multi_exts = {
2596 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg),
2597
2598 RISCV_IMPLIED_EXTS_RULE_END
2599 },
2600 };
2601
2602 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = {
2603 .ext = CPU_CFG_OFFSET(ext_zvknhb),
2604 .implied_multi_exts = {
2605 CPU_CFG_OFFSET(ext_zve64x),
2606
2607 RISCV_IMPLIED_EXTS_RULE_END
2608 },
2609 };
2610
2611 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = {
2612 .ext = CPU_CFG_OFFSET(ext_zvks),
2613 .implied_multi_exts = {
2614 CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh),
2615 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
2616
2617 RISCV_IMPLIED_EXTS_RULE_END
2618 },
2619 };
2620
2621 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = {
2622 .ext = CPU_CFG_OFFSET(ext_zvksc),
2623 .implied_multi_exts = {
2624 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc),
2625
2626 RISCV_IMPLIED_EXTS_RULE_END
2627 },
2628 };
2629
2630 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = {
2631 .ext = CPU_CFG_OFFSET(ext_zvksg),
2632 .implied_multi_exts = {
2633 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg),
2634
2635 RISCV_IMPLIED_EXTS_RULE_END
2636 },
2637 };
2638
2639 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = {
2640 &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED,
2641 &RVM_IMPLIED, &RVV_IMPLIED, NULL
2642 };
2643
2644 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = {
2645 &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED,
2646 &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED,
2647 &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED,
2648 &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED,
2649 &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED,
2650 &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED,
2651 &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED,
2652 &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED,
2653 &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED,
2654 &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED,
2655 &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED,
2656 &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED,
2657 NULL
2658 };
2659
2660 static Property riscv_cpu_properties[] = {
2661 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
2662
2663 {.name = "pmu-mask", .info = &prop_pmu_mask},
2664 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */
2665
2666 {.name = "mmu", .info = &prop_mmu},
2667 {.name = "pmp", .info = &prop_pmp},
2668
2669 {.name = "priv_spec", .info = &prop_priv_spec},
2670 {.name = "vext_spec", .info = &prop_vext_spec},
2671
2672 {.name = "vlen", .info = &prop_vlen},
2673 {.name = "elen", .info = &prop_elen},
2674
2675 {.name = "cbom_blocksize", .info = &prop_cbom_blksize},
2676 {.name = "cbop_blocksize", .info = &prop_cbop_blksize},
2677 {.name = "cboz_blocksize", .info = &prop_cboz_blksize},
2678
2679 {.name = "mvendorid", .info = &prop_mvendorid},
2680 {.name = "mimpid", .info = &prop_mimpid},
2681 {.name = "marchid", .info = &prop_marchid},
2682
2683 #ifndef CONFIG_USER_ONLY
2684 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
2685 #endif
2686
2687 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
2688
2689 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
2690 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
2691
2692 /*
2693 * write_misa() is marked as experimental for now so mark
2694 * it with -x and default to 'false'.
2695 */
2696 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
2697 DEFINE_PROP_END_OF_LIST(),
2698 };
2699
2700 #if defined(TARGET_RISCV64)
rva22u64_profile_cpu_init(Object * obj)2701 static void rva22u64_profile_cpu_init(Object *obj)
2702 {
2703 rv64i_bare_cpu_init(obj);
2704
2705 RVA22U64.enabled = true;
2706 }
2707
rva22s64_profile_cpu_init(Object * obj)2708 static void rva22s64_profile_cpu_init(Object *obj)
2709 {
2710 rv64i_bare_cpu_init(obj);
2711
2712 RVA22S64.enabled = true;
2713 }
2714 #endif
2715
riscv_gdb_arch_name(CPUState * cs)2716 static const gchar *riscv_gdb_arch_name(CPUState *cs)
2717 {
2718 RISCVCPU *cpu = RISCV_CPU(cs);
2719 CPURISCVState *env = &cpu->env;
2720
2721 switch (riscv_cpu_mxl(env)) {
2722 case MXL_RV32:
2723 return "riscv:rv32";
2724 case MXL_RV64:
2725 case MXL_RV128:
2726 return "riscv:rv64";
2727 default:
2728 g_assert_not_reached();
2729 }
2730 }
2731
2732 #ifndef CONFIG_USER_ONLY
riscv_get_arch_id(CPUState * cs)2733 static int64_t riscv_get_arch_id(CPUState *cs)
2734 {
2735 RISCVCPU *cpu = RISCV_CPU(cs);
2736
2737 return cpu->env.mhartid;
2738 }
2739
2740 #include "hw/core/sysemu-cpu-ops.h"
2741
2742 static const struct SysemuCPUOps riscv_sysemu_ops = {
2743 .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
2744 .write_elf64_note = riscv_cpu_write_elf64_note,
2745 .write_elf32_note = riscv_cpu_write_elf32_note,
2746 .legacy_vmsd = &vmstate_riscv_cpu,
2747 };
2748 #endif
2749
riscv_cpu_common_class_init(ObjectClass * c,void * data)2750 static void riscv_cpu_common_class_init(ObjectClass *c, void *data)
2751 {
2752 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2753 CPUClass *cc = CPU_CLASS(c);
2754 DeviceClass *dc = DEVICE_CLASS(c);
2755 ResettableClass *rc = RESETTABLE_CLASS(c);
2756
2757 device_class_set_parent_realize(dc, riscv_cpu_realize,
2758 &mcc->parent_realize);
2759
2760 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL,
2761 &mcc->parent_phases);
2762
2763 cc->class_by_name = riscv_cpu_class_by_name;
2764 cc->has_work = riscv_cpu_has_work;
2765 cc->mmu_index = riscv_cpu_mmu_index;
2766 cc->dump_state = riscv_cpu_dump_state;
2767 cc->set_pc = riscv_cpu_set_pc;
2768 cc->get_pc = riscv_cpu_get_pc;
2769 cc->gdb_read_register = riscv_cpu_gdb_read_register;
2770 cc->gdb_write_register = riscv_cpu_gdb_write_register;
2771 cc->gdb_stop_before_watchpoint = true;
2772 cc->disas_set_info = riscv_cpu_disas_set_info;
2773 #ifndef CONFIG_USER_ONLY
2774 cc->sysemu_ops = &riscv_sysemu_ops;
2775 cc->get_arch_id = riscv_get_arch_id;
2776 #endif
2777 cc->gdb_arch_name = riscv_gdb_arch_name;
2778
2779 device_class_set_props(dc, riscv_cpu_properties);
2780 }
2781
riscv_cpu_class_init(ObjectClass * c,void * data)2782 static void riscv_cpu_class_init(ObjectClass *c, void *data)
2783 {
2784 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2785
2786 mcc->misa_mxl_max = (uint32_t)(uintptr_t)data;
2787 riscv_cpu_validate_misa_mxl(mcc);
2788 }
2789
riscv_isa_string_ext(RISCVCPU * cpu,char ** isa_str,int max_str_len)2790 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
2791 int max_str_len)
2792 {
2793 const RISCVIsaExtData *edata;
2794 char *old = *isa_str;
2795 char *new = *isa_str;
2796
2797 for (edata = isa_edata_arr; edata && edata->name; edata++) {
2798 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2799 new = g_strconcat(old, "_", edata->name, NULL);
2800 g_free(old);
2801 old = new;
2802 }
2803 }
2804
2805 *isa_str = new;
2806 }
2807
riscv_isa_string(RISCVCPU * cpu)2808 char *riscv_isa_string(RISCVCPU *cpu)
2809 {
2810 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
2811 int i;
2812 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
2813 char *isa_str = g_new(char, maxlen);
2814 int xlen = riscv_cpu_max_xlen(mcc);
2815 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen);
2816
2817 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2818 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2819 *p++ = qemu_tolower(riscv_single_letter_exts[i]);
2820 }
2821 }
2822 *p = '\0';
2823 if (!cpu->cfg.short_isa_string) {
2824 riscv_isa_string_ext(cpu, &isa_str, maxlen);
2825 }
2826 return isa_str;
2827 }
2828
2829 #ifndef CONFIG_USER_ONLY
riscv_isa_extensions_list(RISCVCPU * cpu,int * count)2830 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count)
2831 {
2832 int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr);
2833 char **extensions = g_new(char *, maxlen);
2834
2835 for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2836 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2837 extensions[*count] = g_new(char, 2);
2838 snprintf(extensions[*count], 2, "%c",
2839 qemu_tolower(riscv_single_letter_exts[i]));
2840 (*count)++;
2841 }
2842 }
2843
2844 for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) {
2845 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2846 extensions[*count] = g_strdup(edata->name);
2847 (*count)++;
2848 }
2849 }
2850
2851 return extensions;
2852 }
2853
riscv_isa_write_fdt(RISCVCPU * cpu,void * fdt,char * nodename)2854 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename)
2855 {
2856 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
2857 const size_t maxlen = sizeof("rv128i");
2858 g_autofree char *isa_base = g_new(char, maxlen);
2859 g_autofree char *riscv_isa;
2860 char **isa_extensions;
2861 int count = 0;
2862 int xlen = riscv_cpu_max_xlen(mcc);
2863
2864 riscv_isa = riscv_isa_string(cpu);
2865 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa);
2866
2867 snprintf(isa_base, maxlen, "rv%di", xlen);
2868 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base);
2869
2870 isa_extensions = riscv_isa_extensions_list(cpu, &count);
2871 qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions",
2872 isa_extensions, count);
2873
2874 for (int i = 0; i < count; i++) {
2875 g_free(isa_extensions[i]);
2876 }
2877
2878 g_free(isa_extensions);
2879 }
2880 #endif
2881
2882 #define DEFINE_CPU(type_name, misa_mxl_max, initfn) \
2883 { \
2884 .name = (type_name), \
2885 .parent = TYPE_RISCV_CPU, \
2886 .instance_init = (initfn), \
2887 .class_init = riscv_cpu_class_init, \
2888 .class_data = (void *)(misa_mxl_max) \
2889 }
2890
2891 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \
2892 { \
2893 .name = (type_name), \
2894 .parent = TYPE_RISCV_DYNAMIC_CPU, \
2895 .instance_init = (initfn), \
2896 .class_init = riscv_cpu_class_init, \
2897 .class_data = (void *)(misa_mxl_max) \
2898 }
2899
2900 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \
2901 { \
2902 .name = (type_name), \
2903 .parent = TYPE_RISCV_VENDOR_CPU, \
2904 .instance_init = (initfn), \
2905 .class_init = riscv_cpu_class_init, \
2906 .class_data = (void *)(misa_mxl_max) \
2907 }
2908
2909 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \
2910 { \
2911 .name = (type_name), \
2912 .parent = TYPE_RISCV_BARE_CPU, \
2913 .instance_init = (initfn), \
2914 .class_init = riscv_cpu_class_init, \
2915 .class_data = (void *)(misa_mxl_max) \
2916 }
2917
2918 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \
2919 { \
2920 .name = (type_name), \
2921 .parent = TYPE_RISCV_BARE_CPU, \
2922 .instance_init = (initfn), \
2923 .class_init = riscv_cpu_class_init, \
2924 .class_data = (void *)(misa_mxl_max) \
2925 }
2926
2927 static const TypeInfo riscv_cpu_type_infos[] = {
2928 {
2929 .name = TYPE_RISCV_CPU,
2930 .parent = TYPE_CPU,
2931 .instance_size = sizeof(RISCVCPU),
2932 .instance_align = __alignof(RISCVCPU),
2933 .instance_init = riscv_cpu_init,
2934 .instance_post_init = riscv_cpu_post_init,
2935 .abstract = true,
2936 .class_size = sizeof(RISCVCPUClass),
2937 .class_init = riscv_cpu_common_class_init,
2938 },
2939 {
2940 .name = TYPE_RISCV_DYNAMIC_CPU,
2941 .parent = TYPE_RISCV_CPU,
2942 .abstract = true,
2943 },
2944 {
2945 .name = TYPE_RISCV_VENDOR_CPU,
2946 .parent = TYPE_RISCV_CPU,
2947 .abstract = true,
2948 },
2949 {
2950 .name = TYPE_RISCV_BARE_CPU,
2951 .parent = TYPE_RISCV_CPU,
2952 .instance_init = riscv_bare_cpu_init,
2953 .abstract = true,
2954 },
2955 #if defined(TARGET_RISCV32)
2956 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV32, riscv_any_cpu_init),
2957 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init),
2958 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init),
2959 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init),
2960 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init),
2961 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init),
2962 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init),
2963 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init),
2964 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init),
2965 #elif defined(TARGET_RISCV64)
2966 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV64, riscv_any_cpu_init),
2967 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init),
2968 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init),
2969 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init),
2970 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init),
2971 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init),
2972 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init),
2973 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init),
2974 #ifdef CONFIG_TCG
2975 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init),
2976 #endif /* CONFIG_TCG */
2977 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init),
2978 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init),
2979 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init),
2980 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init),
2981 #endif /* TARGET_RISCV64 */
2982 };
2983
2984 DEFINE_TYPES(riscv_cpu_type_infos)
2985