1 /*
2 * RISC-V VMState Description
3 *
4 * Copyright (c) 2020 Huawei Technologies Co., Ltd
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include "qemu/osdep.h"
20 #include "cpu.h"
21 #include "qemu/error-report.h"
22 #include "sysemu/kvm.h"
23 #include "migration/cpu.h"
24 #include "sysemu/cpu-timers.h"
25 #include "debug.h"
26
pmp_needed(void * opaque)27 static bool pmp_needed(void *opaque)
28 {
29 RISCVCPU *cpu = opaque;
30
31 return cpu->cfg.pmp;
32 }
33
pmp_post_load(void * opaque,int version_id)34 static int pmp_post_load(void *opaque, int version_id)
35 {
36 RISCVCPU *cpu = opaque;
37 CPURISCVState *env = &cpu->env;
38 int i;
39
40 for (i = 0; i < MAX_RISCV_PMPS; i++) {
41 pmp_update_rule_addr(env, i);
42 }
43 pmp_update_rule_nums(env);
44
45 return 0;
46 }
47
48 static const VMStateDescription vmstate_pmp_entry = {
49 .name = "cpu/pmp/entry",
50 .version_id = 1,
51 .minimum_version_id = 1,
52 .fields = (const VMStateField[]) {
53 VMSTATE_UINTTL(addr_reg, pmp_entry_t),
54 VMSTATE_UINT8(cfg_reg, pmp_entry_t),
55 VMSTATE_END_OF_LIST()
56 }
57 };
58
59 static const VMStateDescription vmstate_pmp = {
60 .name = "cpu/pmp",
61 .version_id = 1,
62 .minimum_version_id = 1,
63 .needed = pmp_needed,
64 .post_load = pmp_post_load,
65 .fields = (const VMStateField[]) {
66 VMSTATE_STRUCT_ARRAY(env.pmp_state.pmp, RISCVCPU, MAX_RISCV_PMPS,
67 0, vmstate_pmp_entry, pmp_entry_t),
68 VMSTATE_END_OF_LIST()
69 }
70 };
71
hyper_needed(void * opaque)72 static bool hyper_needed(void *opaque)
73 {
74 RISCVCPU *cpu = opaque;
75 CPURISCVState *env = &cpu->env;
76
77 return riscv_has_ext(env, RVH);
78 }
79
80 static const VMStateDescription vmstate_hyper = {
81 .name = "cpu/hyper",
82 .version_id = 4,
83 .minimum_version_id = 4,
84 .needed = hyper_needed,
85 .fields = (const VMStateField[]) {
86 VMSTATE_UINTTL(env.hstatus, RISCVCPU),
87 VMSTATE_UINTTL(env.hedeleg, RISCVCPU),
88 VMSTATE_UINT64(env.hideleg, RISCVCPU),
89 VMSTATE_UINT32(env.hcounteren, RISCVCPU),
90 VMSTATE_UINTTL(env.htval, RISCVCPU),
91 VMSTATE_UINTTL(env.htinst, RISCVCPU),
92 VMSTATE_UINTTL(env.hgatp, RISCVCPU),
93 VMSTATE_UINTTL(env.hgeie, RISCVCPU),
94 VMSTATE_UINTTL(env.hgeip, RISCVCPU),
95 VMSTATE_UINT64(env.hvien, RISCVCPU),
96 VMSTATE_UINT64(env.hvip, RISCVCPU),
97 VMSTATE_UINT64(env.htimedelta, RISCVCPU),
98 VMSTATE_UINT64(env.vstimecmp, RISCVCPU),
99
100 VMSTATE_UINTTL(env.hvictl, RISCVCPU),
101 VMSTATE_UINT8_ARRAY(env.hviprio, RISCVCPU, 64),
102
103 VMSTATE_UINT64(env.vsstatus, RISCVCPU),
104 VMSTATE_UINTTL(env.vstvec, RISCVCPU),
105 VMSTATE_UINTTL(env.vsscratch, RISCVCPU),
106 VMSTATE_UINTTL(env.vsepc, RISCVCPU),
107 VMSTATE_UINTTL(env.vscause, RISCVCPU),
108 VMSTATE_UINTTL(env.vstval, RISCVCPU),
109 VMSTATE_UINTTL(env.vsatp, RISCVCPU),
110 VMSTATE_UINTTL(env.vsiselect, RISCVCPU),
111 VMSTATE_UINT64(env.vsie, RISCVCPU),
112
113 VMSTATE_UINTTL(env.mtval2, RISCVCPU),
114 VMSTATE_UINTTL(env.mtinst, RISCVCPU),
115
116 VMSTATE_UINTTL(env.stvec_hs, RISCVCPU),
117 VMSTATE_UINTTL(env.sscratch_hs, RISCVCPU),
118 VMSTATE_UINTTL(env.sepc_hs, RISCVCPU),
119 VMSTATE_UINTTL(env.scause_hs, RISCVCPU),
120 VMSTATE_UINTTL(env.stval_hs, RISCVCPU),
121 VMSTATE_UINTTL(env.satp_hs, RISCVCPU),
122 VMSTATE_UINT64(env.mstatus_hs, RISCVCPU),
123
124 VMSTATE_END_OF_LIST()
125 }
126 };
127
vector_needed(void * opaque)128 static bool vector_needed(void *opaque)
129 {
130 RISCVCPU *cpu = opaque;
131 CPURISCVState *env = &cpu->env;
132
133 return riscv_has_ext(env, RVV);
134 }
135
136 static const VMStateDescription vmstate_vector = {
137 .name = "cpu/vector",
138 .version_id = 2,
139 .minimum_version_id = 2,
140 .needed = vector_needed,
141 .fields = (const VMStateField[]) {
142 VMSTATE_UINT64_ARRAY(env.vreg, RISCVCPU, 32 * RV_VLEN_MAX / 64),
143 VMSTATE_UINTTL(env.vxrm, RISCVCPU),
144 VMSTATE_UINTTL(env.vxsat, RISCVCPU),
145 VMSTATE_UINTTL(env.vl, RISCVCPU),
146 VMSTATE_UINTTL(env.vstart, RISCVCPU),
147 VMSTATE_UINTTL(env.vtype, RISCVCPU),
148 VMSTATE_BOOL(env.vill, RISCVCPU),
149 VMSTATE_END_OF_LIST()
150 }
151 };
152
pointermasking_needed(void * opaque)153 static bool pointermasking_needed(void *opaque)
154 {
155 RISCVCPU *cpu = opaque;
156 CPURISCVState *env = &cpu->env;
157
158 return riscv_has_ext(env, RVJ);
159 }
160
161 static const VMStateDescription vmstate_pointermasking = {
162 .name = "cpu/pointer_masking",
163 .version_id = 1,
164 .minimum_version_id = 1,
165 .needed = pointermasking_needed,
166 .fields = (const VMStateField[]) {
167 VMSTATE_UINTTL(env.mmte, RISCVCPU),
168 VMSTATE_UINTTL(env.mpmmask, RISCVCPU),
169 VMSTATE_UINTTL(env.mpmbase, RISCVCPU),
170 VMSTATE_UINTTL(env.spmmask, RISCVCPU),
171 VMSTATE_UINTTL(env.spmbase, RISCVCPU),
172 VMSTATE_UINTTL(env.upmmask, RISCVCPU),
173 VMSTATE_UINTTL(env.upmbase, RISCVCPU),
174
175 VMSTATE_END_OF_LIST()
176 }
177 };
178
rv128_needed(void * opaque)179 static bool rv128_needed(void *opaque)
180 {
181 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(opaque);
182
183 return mcc->misa_mxl_max == MXL_RV128;
184 }
185
186 static const VMStateDescription vmstate_rv128 = {
187 .name = "cpu/rv128",
188 .version_id = 1,
189 .minimum_version_id = 1,
190 .needed = rv128_needed,
191 .fields = (const VMStateField[]) {
192 VMSTATE_UINTTL_ARRAY(env.gprh, RISCVCPU, 32),
193 VMSTATE_UINT64(env.mscratchh, RISCVCPU),
194 VMSTATE_UINT64(env.sscratchh, RISCVCPU),
195 VMSTATE_END_OF_LIST()
196 }
197 };
198
199 #ifdef CONFIG_KVM
kvmtimer_needed(void * opaque)200 static bool kvmtimer_needed(void *opaque)
201 {
202 return kvm_enabled();
203 }
204
cpu_kvmtimer_post_load(void * opaque,int version_id)205 static int cpu_kvmtimer_post_load(void *opaque, int version_id)
206 {
207 RISCVCPU *cpu = opaque;
208 CPURISCVState *env = &cpu->env;
209
210 env->kvm_timer_dirty = true;
211 return 0;
212 }
213
214 static const VMStateDescription vmstate_kvmtimer = {
215 .name = "cpu/kvmtimer",
216 .version_id = 1,
217 .minimum_version_id = 1,
218 .needed = kvmtimer_needed,
219 .post_load = cpu_kvmtimer_post_load,
220 .fields = (const VMStateField[]) {
221 VMSTATE_UINT64(env.kvm_timer_time, RISCVCPU),
222 VMSTATE_UINT64(env.kvm_timer_compare, RISCVCPU),
223 VMSTATE_UINT64(env.kvm_timer_state, RISCVCPU),
224 VMSTATE_END_OF_LIST()
225 }
226 };
227 #endif
228
debug_needed(void * opaque)229 static bool debug_needed(void *opaque)
230 {
231 RISCVCPU *cpu = opaque;
232
233 return cpu->cfg.debug;
234 }
235
debug_post_load(void * opaque,int version_id)236 static int debug_post_load(void *opaque, int version_id)
237 {
238 RISCVCPU *cpu = opaque;
239 CPURISCVState *env = &cpu->env;
240
241 if (icount_enabled()) {
242 env->itrigger_enabled = riscv_itrigger_enabled(env);
243 }
244
245 return 0;
246 }
247
248 static const VMStateDescription vmstate_debug = {
249 .name = "cpu/debug",
250 .version_id = 2,
251 .minimum_version_id = 2,
252 .needed = debug_needed,
253 .post_load = debug_post_load,
254 .fields = (const VMStateField[]) {
255 VMSTATE_UINTTL(env.trigger_cur, RISCVCPU),
256 VMSTATE_UINTTL_ARRAY(env.tdata1, RISCVCPU, RV_MAX_TRIGGERS),
257 VMSTATE_UINTTL_ARRAY(env.tdata2, RISCVCPU, RV_MAX_TRIGGERS),
258 VMSTATE_UINTTL_ARRAY(env.tdata3, RISCVCPU, RV_MAX_TRIGGERS),
259 VMSTATE_END_OF_LIST()
260 }
261 };
262
riscv_cpu_post_load(void * opaque,int version_id)263 static int riscv_cpu_post_load(void *opaque, int version_id)
264 {
265 RISCVCPU *cpu = opaque;
266 CPURISCVState *env = &cpu->env;
267
268 env->xl = cpu_recompute_xl(env);
269 riscv_cpu_update_mask(env);
270 return 0;
271 }
272
smstateen_needed(void * opaque)273 static bool smstateen_needed(void *opaque)
274 {
275 RISCVCPU *cpu = opaque;
276
277 return cpu->cfg.ext_smstateen;
278 }
279
280 static const VMStateDescription vmstate_smstateen = {
281 .name = "cpu/smtateen",
282 .version_id = 1,
283 .minimum_version_id = 1,
284 .needed = smstateen_needed,
285 .fields = (const VMStateField[]) {
286 VMSTATE_UINT64_ARRAY(env.mstateen, RISCVCPU, 4),
287 VMSTATE_UINT64_ARRAY(env.hstateen, RISCVCPU, 4),
288 VMSTATE_UINT64_ARRAY(env.sstateen, RISCVCPU, 4),
289 VMSTATE_END_OF_LIST()
290 }
291 };
292
envcfg_needed(void * opaque)293 static bool envcfg_needed(void *opaque)
294 {
295 RISCVCPU *cpu = opaque;
296 CPURISCVState *env = &cpu->env;
297
298 return (env->priv_ver >= PRIV_VERSION_1_12_0 ? 1 : 0);
299 }
300
301 static const VMStateDescription vmstate_envcfg = {
302 .name = "cpu/envcfg",
303 .version_id = 1,
304 .minimum_version_id = 1,
305 .needed = envcfg_needed,
306 .fields = (const VMStateField[]) {
307 VMSTATE_UINT64(env.menvcfg, RISCVCPU),
308 VMSTATE_UINTTL(env.senvcfg, RISCVCPU),
309 VMSTATE_UINT64(env.henvcfg, RISCVCPU),
310 VMSTATE_END_OF_LIST()
311 }
312 };
313
pmu_needed(void * opaque)314 static bool pmu_needed(void *opaque)
315 {
316 RISCVCPU *cpu = opaque;
317
318 return (cpu->cfg.pmu_mask > 0);
319 }
320
321 static const VMStateDescription vmstate_pmu_ctr_state = {
322 .name = "cpu/pmu",
323 .version_id = 2,
324 .minimum_version_id = 2,
325 .needed = pmu_needed,
326 .fields = (const VMStateField[]) {
327 VMSTATE_UINTTL(mhpmcounter_val, PMUCTRState),
328 VMSTATE_UINTTL(mhpmcounterh_val, PMUCTRState),
329 VMSTATE_UINTTL(mhpmcounter_prev, PMUCTRState),
330 VMSTATE_UINTTL(mhpmcounterh_prev, PMUCTRState),
331 VMSTATE_END_OF_LIST()
332 }
333 };
334
jvt_needed(void * opaque)335 static bool jvt_needed(void *opaque)
336 {
337 RISCVCPU *cpu = opaque;
338
339 return cpu->cfg.ext_zcmt;
340 }
341
342 static const VMStateDescription vmstate_jvt = {
343 .name = "cpu/jvt",
344 .version_id = 1,
345 .minimum_version_id = 1,
346 .needed = jvt_needed,
347 .fields = (const VMStateField[]) {
348 VMSTATE_UINTTL(env.jvt, RISCVCPU),
349 VMSTATE_END_OF_LIST()
350 }
351 };
352
elp_needed(void * opaque)353 static bool elp_needed(void *opaque)
354 {
355 RISCVCPU *cpu = opaque;
356
357 return cpu->cfg.ext_zicfilp;
358 }
359
360 static const VMStateDescription vmstate_elp = {
361 .name = "cpu/elp",
362 .version_id = 1,
363 .minimum_version_id = 1,
364 .needed = elp_needed,
365 .fields = (const VMStateField[]) {
366 VMSTATE_BOOL(env.elp, RISCVCPU),
367 VMSTATE_END_OF_LIST()
368 }
369 };
370
ssp_needed(void * opaque)371 static bool ssp_needed(void *opaque)
372 {
373 RISCVCPU *cpu = opaque;
374
375 return cpu->cfg.ext_zicfiss;
376 }
377
378 static const VMStateDescription vmstate_ssp = {
379 .name = "cpu/ssp",
380 .version_id = 1,
381 .minimum_version_id = 1,
382 .needed = ssp_needed,
383 .fields = (const VMStateField[]) {
384 VMSTATE_UINTTL(env.ssp, RISCVCPU),
385 VMSTATE_END_OF_LIST()
386 }
387 };
388
389 const VMStateDescription vmstate_riscv_cpu = {
390 .name = "cpu",
391 .version_id = 10,
392 .minimum_version_id = 10,
393 .post_load = riscv_cpu_post_load,
394 .fields = (const VMStateField[]) {
395 VMSTATE_UINTTL_ARRAY(env.gpr, RISCVCPU, 32),
396 VMSTATE_UINT64_ARRAY(env.fpr, RISCVCPU, 32),
397 VMSTATE_UINT8_ARRAY(env.miprio, RISCVCPU, 64),
398 VMSTATE_UINT8_ARRAY(env.siprio, RISCVCPU, 64),
399 VMSTATE_UINTTL(env.pc, RISCVCPU),
400 VMSTATE_UINTTL(env.load_res, RISCVCPU),
401 VMSTATE_UINTTL(env.load_val, RISCVCPU),
402 VMSTATE_UINTTL(env.frm, RISCVCPU),
403 VMSTATE_UINTTL(env.badaddr, RISCVCPU),
404 VMSTATE_UINTTL(env.guest_phys_fault_addr, RISCVCPU),
405 VMSTATE_UINTTL(env.priv_ver, RISCVCPU),
406 VMSTATE_UINTTL(env.vext_ver, RISCVCPU),
407 VMSTATE_UINT32(env.misa_mxl, RISCVCPU),
408 VMSTATE_UINT32(env.misa_ext, RISCVCPU),
409 VMSTATE_UNUSED(4),
410 VMSTATE_UINT32(env.misa_ext_mask, RISCVCPU),
411 VMSTATE_UINTTL(env.priv, RISCVCPU),
412 VMSTATE_BOOL(env.virt_enabled, RISCVCPU),
413 VMSTATE_UINT64(env.resetvec, RISCVCPU),
414 VMSTATE_UINTTL(env.mhartid, RISCVCPU),
415 VMSTATE_UINT64(env.mstatus, RISCVCPU),
416 VMSTATE_UINT64(env.mip, RISCVCPU),
417 VMSTATE_UINT64(env.miclaim, RISCVCPU),
418 VMSTATE_UINT64(env.mie, RISCVCPU),
419 VMSTATE_UINT64(env.mvien, RISCVCPU),
420 VMSTATE_UINT64(env.mvip, RISCVCPU),
421 VMSTATE_UINT64(env.sie, RISCVCPU),
422 VMSTATE_UINT64(env.mideleg, RISCVCPU),
423 VMSTATE_UINTTL(env.satp, RISCVCPU),
424 VMSTATE_UINTTL(env.stval, RISCVCPU),
425 VMSTATE_UINTTL(env.medeleg, RISCVCPU),
426 VMSTATE_UINTTL(env.stvec, RISCVCPU),
427 VMSTATE_UINTTL(env.sepc, RISCVCPU),
428 VMSTATE_UINTTL(env.scause, RISCVCPU),
429 VMSTATE_UINTTL(env.mtvec, RISCVCPU),
430 VMSTATE_UINTTL(env.mepc, RISCVCPU),
431 VMSTATE_UINTTL(env.mcause, RISCVCPU),
432 VMSTATE_UINTTL(env.mtval, RISCVCPU),
433 VMSTATE_UINTTL(env.miselect, RISCVCPU),
434 VMSTATE_UINTTL(env.siselect, RISCVCPU),
435 VMSTATE_UINT32(env.scounteren, RISCVCPU),
436 VMSTATE_UINT32(env.mcounteren, RISCVCPU),
437 VMSTATE_UINT32(env.mcountinhibit, RISCVCPU),
438 VMSTATE_STRUCT_ARRAY(env.pmu_ctrs, RISCVCPU, RV_MAX_MHPMCOUNTERS, 0,
439 vmstate_pmu_ctr_state, PMUCTRState),
440 VMSTATE_UINTTL_ARRAY(env.mhpmevent_val, RISCVCPU, RV_MAX_MHPMEVENTS),
441 VMSTATE_UINTTL_ARRAY(env.mhpmeventh_val, RISCVCPU, RV_MAX_MHPMEVENTS),
442 VMSTATE_UINTTL(env.sscratch, RISCVCPU),
443 VMSTATE_UINTTL(env.mscratch, RISCVCPU),
444 VMSTATE_UINT64(env.stimecmp, RISCVCPU),
445
446 VMSTATE_END_OF_LIST()
447 },
448 .subsections = (const VMStateDescription * const []) {
449 &vmstate_pmp,
450 &vmstate_hyper,
451 &vmstate_vector,
452 &vmstate_pointermasking,
453 &vmstate_rv128,
454 #ifdef CONFIG_KVM
455 &vmstate_kvmtimer,
456 #endif
457 &vmstate_envcfg,
458 &vmstate_debug,
459 &vmstate_smstateen,
460 &vmstate_jvt,
461 &vmstate_elp,
462 &vmstate_ssp,
463 NULL
464 }
465 };
466