1 /* 2 * RISC-V VMState Description 3 * 4 * Copyright (c) 2020 Huawei Technologies Co., Ltd 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2 or later, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #include "qemu/osdep.h" 20 #include "cpu.h" 21 #include "qemu/error-report.h" 22 #include "sysemu/kvm.h" 23 #include "migration/cpu.h" 24 #include "sysemu/cpu-timers.h" 25 #include "debug.h" 26 27 static bool pmp_needed(void *opaque) 28 { 29 RISCVCPU *cpu = opaque; 30 31 return cpu->cfg.pmp; 32 } 33 34 static int pmp_post_load(void *opaque, int version_id) 35 { 36 RISCVCPU *cpu = opaque; 37 CPURISCVState *env = &cpu->env; 38 int i; 39 40 for (i = 0; i < MAX_RISCV_PMPS; i++) { 41 pmp_update_rule_addr(env, i); 42 } 43 pmp_update_rule_nums(env); 44 45 return 0; 46 } 47 48 static const VMStateDescription vmstate_pmp_entry = { 49 .name = "cpu/pmp/entry", 50 .version_id = 1, 51 .minimum_version_id = 1, 52 .fields = (VMStateField[]) { 53 VMSTATE_UINTTL(addr_reg, pmp_entry_t), 54 VMSTATE_UINT8(cfg_reg, pmp_entry_t), 55 VMSTATE_END_OF_LIST() 56 } 57 }; 58 59 static const VMStateDescription vmstate_pmp = { 60 .name = "cpu/pmp", 61 .version_id = 1, 62 .minimum_version_id = 1, 63 .needed = pmp_needed, 64 .post_load = pmp_post_load, 65 .fields = (VMStateField[]) { 66 VMSTATE_STRUCT_ARRAY(env.pmp_state.pmp, RISCVCPU, MAX_RISCV_PMPS, 67 0, vmstate_pmp_entry, pmp_entry_t), 68 VMSTATE_END_OF_LIST() 69 } 70 }; 71 72 static bool hyper_needed(void *opaque) 73 { 74 RISCVCPU *cpu = opaque; 75 CPURISCVState *env = &cpu->env; 76 77 return riscv_has_ext(env, RVH); 78 } 79 80 static const VMStateDescription vmstate_hyper = { 81 .name = "cpu/hyper", 82 .version_id = 2, 83 .minimum_version_id = 2, 84 .needed = hyper_needed, 85 .fields = (VMStateField[]) { 86 VMSTATE_UINTTL(env.hstatus, RISCVCPU), 87 VMSTATE_UINTTL(env.hedeleg, RISCVCPU), 88 VMSTATE_UINT64(env.hideleg, RISCVCPU), 89 VMSTATE_UINTTL(env.hcounteren, RISCVCPU), 90 VMSTATE_UINTTL(env.htval, RISCVCPU), 91 VMSTATE_UINTTL(env.htinst, RISCVCPU), 92 VMSTATE_UINTTL(env.hgatp, RISCVCPU), 93 VMSTATE_UINTTL(env.hgeie, RISCVCPU), 94 VMSTATE_UINTTL(env.hgeip, RISCVCPU), 95 VMSTATE_UINT64(env.htimedelta, RISCVCPU), 96 VMSTATE_UINT64(env.vstimecmp, RISCVCPU), 97 98 VMSTATE_UINTTL(env.hvictl, RISCVCPU), 99 VMSTATE_UINT8_ARRAY(env.hviprio, RISCVCPU, 64), 100 101 VMSTATE_UINT64(env.vsstatus, RISCVCPU), 102 VMSTATE_UINTTL(env.vstvec, RISCVCPU), 103 VMSTATE_UINTTL(env.vsscratch, RISCVCPU), 104 VMSTATE_UINTTL(env.vsepc, RISCVCPU), 105 VMSTATE_UINTTL(env.vscause, RISCVCPU), 106 VMSTATE_UINTTL(env.vstval, RISCVCPU), 107 VMSTATE_UINTTL(env.vsatp, RISCVCPU), 108 VMSTATE_UINTTL(env.vsiselect, RISCVCPU), 109 110 VMSTATE_UINTTL(env.mtval2, RISCVCPU), 111 VMSTATE_UINTTL(env.mtinst, RISCVCPU), 112 113 VMSTATE_UINTTL(env.stvec_hs, RISCVCPU), 114 VMSTATE_UINTTL(env.sscratch_hs, RISCVCPU), 115 VMSTATE_UINTTL(env.sepc_hs, RISCVCPU), 116 VMSTATE_UINTTL(env.scause_hs, RISCVCPU), 117 VMSTATE_UINTTL(env.stval_hs, RISCVCPU), 118 VMSTATE_UINTTL(env.satp_hs, RISCVCPU), 119 VMSTATE_UINT64(env.mstatus_hs, RISCVCPU), 120 121 VMSTATE_END_OF_LIST() 122 } 123 }; 124 125 static bool vector_needed(void *opaque) 126 { 127 RISCVCPU *cpu = opaque; 128 CPURISCVState *env = &cpu->env; 129 130 return riscv_has_ext(env, RVV); 131 } 132 133 static const VMStateDescription vmstate_vector = { 134 .name = "cpu/vector", 135 .version_id = 2, 136 .minimum_version_id = 2, 137 .needed = vector_needed, 138 .fields = (VMStateField[]) { 139 VMSTATE_UINT64_ARRAY(env.vreg, RISCVCPU, 32 * RV_VLEN_MAX / 64), 140 VMSTATE_UINTTL(env.vxrm, RISCVCPU), 141 VMSTATE_UINTTL(env.vxsat, RISCVCPU), 142 VMSTATE_UINTTL(env.vl, RISCVCPU), 143 VMSTATE_UINTTL(env.vstart, RISCVCPU), 144 VMSTATE_UINTTL(env.vtype, RISCVCPU), 145 VMSTATE_BOOL(env.vill, RISCVCPU), 146 VMSTATE_END_OF_LIST() 147 } 148 }; 149 150 static bool pointermasking_needed(void *opaque) 151 { 152 RISCVCPU *cpu = opaque; 153 CPURISCVState *env = &cpu->env; 154 155 return riscv_has_ext(env, RVJ); 156 } 157 158 static const VMStateDescription vmstate_pointermasking = { 159 .name = "cpu/pointer_masking", 160 .version_id = 1, 161 .minimum_version_id = 1, 162 .needed = pointermasking_needed, 163 .fields = (VMStateField[]) { 164 VMSTATE_UINTTL(env.mmte, RISCVCPU), 165 VMSTATE_UINTTL(env.mpmmask, RISCVCPU), 166 VMSTATE_UINTTL(env.mpmbase, RISCVCPU), 167 VMSTATE_UINTTL(env.spmmask, RISCVCPU), 168 VMSTATE_UINTTL(env.spmbase, RISCVCPU), 169 VMSTATE_UINTTL(env.upmmask, RISCVCPU), 170 VMSTATE_UINTTL(env.upmbase, RISCVCPU), 171 172 VMSTATE_END_OF_LIST() 173 } 174 }; 175 176 static bool rv128_needed(void *opaque) 177 { 178 RISCVCPU *cpu = opaque; 179 CPURISCVState *env = &cpu->env; 180 181 return env->misa_mxl_max == MXL_RV128; 182 } 183 184 static const VMStateDescription vmstate_rv128 = { 185 .name = "cpu/rv128", 186 .version_id = 1, 187 .minimum_version_id = 1, 188 .needed = rv128_needed, 189 .fields = (VMStateField[]) { 190 VMSTATE_UINTTL_ARRAY(env.gprh, RISCVCPU, 32), 191 VMSTATE_UINT64(env.mscratchh, RISCVCPU), 192 VMSTATE_UINT64(env.sscratchh, RISCVCPU), 193 VMSTATE_END_OF_LIST() 194 } 195 }; 196 197 #ifdef CONFIG_KVM 198 static bool kvmtimer_needed(void *opaque) 199 { 200 return kvm_enabled(); 201 } 202 203 static int cpu_kvmtimer_post_load(void *opaque, int version_id) 204 { 205 RISCVCPU *cpu = opaque; 206 CPURISCVState *env = &cpu->env; 207 208 env->kvm_timer_dirty = true; 209 return 0; 210 } 211 212 static const VMStateDescription vmstate_kvmtimer = { 213 .name = "cpu/kvmtimer", 214 .version_id = 1, 215 .minimum_version_id = 1, 216 .needed = kvmtimer_needed, 217 .post_load = cpu_kvmtimer_post_load, 218 .fields = (VMStateField[]) { 219 VMSTATE_UINT64(env.kvm_timer_time, RISCVCPU), 220 VMSTATE_UINT64(env.kvm_timer_compare, RISCVCPU), 221 VMSTATE_UINT64(env.kvm_timer_state, RISCVCPU), 222 VMSTATE_END_OF_LIST() 223 } 224 }; 225 #endif 226 227 static bool debug_needed(void *opaque) 228 { 229 RISCVCPU *cpu = opaque; 230 231 return cpu->cfg.debug; 232 } 233 234 static int debug_post_load(void *opaque, int version_id) 235 { 236 RISCVCPU *cpu = opaque; 237 CPURISCVState *env = &cpu->env; 238 239 if (icount_enabled()) { 240 env->itrigger_enabled = riscv_itrigger_enabled(env); 241 } 242 243 return 0; 244 } 245 246 static const VMStateDescription vmstate_debug = { 247 .name = "cpu/debug", 248 .version_id = 2, 249 .minimum_version_id = 2, 250 .needed = debug_needed, 251 .post_load = debug_post_load, 252 .fields = (VMStateField[]) { 253 VMSTATE_UINTTL(env.trigger_cur, RISCVCPU), 254 VMSTATE_UINTTL_ARRAY(env.tdata1, RISCVCPU, RV_MAX_TRIGGERS), 255 VMSTATE_UINTTL_ARRAY(env.tdata2, RISCVCPU, RV_MAX_TRIGGERS), 256 VMSTATE_UINTTL_ARRAY(env.tdata3, RISCVCPU, RV_MAX_TRIGGERS), 257 VMSTATE_END_OF_LIST() 258 } 259 }; 260 261 static int riscv_cpu_post_load(void *opaque, int version_id) 262 { 263 RISCVCPU *cpu = opaque; 264 CPURISCVState *env = &cpu->env; 265 266 env->xl = cpu_recompute_xl(env); 267 riscv_cpu_update_mask(env); 268 return 0; 269 } 270 271 static bool smstateen_needed(void *opaque) 272 { 273 RISCVCPU *cpu = opaque; 274 275 return cpu->cfg.ext_smstateen; 276 } 277 278 static const VMStateDescription vmstate_smstateen = { 279 .name = "cpu/smtateen", 280 .version_id = 1, 281 .minimum_version_id = 1, 282 .needed = smstateen_needed, 283 .fields = (VMStateField[]) { 284 VMSTATE_UINT64_ARRAY(env.mstateen, RISCVCPU, 4), 285 VMSTATE_UINT64_ARRAY(env.hstateen, RISCVCPU, 4), 286 VMSTATE_UINT64_ARRAY(env.sstateen, RISCVCPU, 4), 287 VMSTATE_END_OF_LIST() 288 } 289 }; 290 291 static bool envcfg_needed(void *opaque) 292 { 293 RISCVCPU *cpu = opaque; 294 CPURISCVState *env = &cpu->env; 295 296 return (env->priv_ver >= PRIV_VERSION_1_12_0 ? 1 : 0); 297 } 298 299 static const VMStateDescription vmstate_envcfg = { 300 .name = "cpu/envcfg", 301 .version_id = 1, 302 .minimum_version_id = 1, 303 .needed = envcfg_needed, 304 .fields = (VMStateField[]) { 305 VMSTATE_UINT64(env.menvcfg, RISCVCPU), 306 VMSTATE_UINTTL(env.senvcfg, RISCVCPU), 307 VMSTATE_UINT64(env.henvcfg, RISCVCPU), 308 VMSTATE_END_OF_LIST() 309 } 310 }; 311 312 static bool pmu_needed(void *opaque) 313 { 314 RISCVCPU *cpu = opaque; 315 316 return cpu->cfg.pmu_num; 317 } 318 319 static const VMStateDescription vmstate_pmu_ctr_state = { 320 .name = "cpu/pmu", 321 .version_id = 1, 322 .minimum_version_id = 1, 323 .needed = pmu_needed, 324 .fields = (VMStateField[]) { 325 VMSTATE_UINTTL(mhpmcounter_val, PMUCTRState), 326 VMSTATE_UINTTL(mhpmcounterh_val, PMUCTRState), 327 VMSTATE_UINTTL(mhpmcounter_prev, PMUCTRState), 328 VMSTATE_UINTTL(mhpmcounterh_prev, PMUCTRState), 329 VMSTATE_BOOL(started, PMUCTRState), 330 VMSTATE_END_OF_LIST() 331 } 332 }; 333 334 static bool jvt_needed(void *opaque) 335 { 336 RISCVCPU *cpu = opaque; 337 338 return cpu->cfg.ext_zcmt; 339 } 340 341 static const VMStateDescription vmstate_jvt = { 342 .name = "cpu/jvt", 343 .version_id = 1, 344 .minimum_version_id = 1, 345 .needed = jvt_needed, 346 .fields = (VMStateField[]) { 347 VMSTATE_UINTTL(env.jvt, RISCVCPU), 348 VMSTATE_END_OF_LIST() 349 } 350 }; 351 352 const VMStateDescription vmstate_riscv_cpu = { 353 .name = "cpu", 354 .version_id = 8, 355 .minimum_version_id = 8, 356 .post_load = riscv_cpu_post_load, 357 .fields = (VMStateField[]) { 358 VMSTATE_UINTTL_ARRAY(env.gpr, RISCVCPU, 32), 359 VMSTATE_UINT64_ARRAY(env.fpr, RISCVCPU, 32), 360 VMSTATE_UINT8_ARRAY(env.miprio, RISCVCPU, 64), 361 VMSTATE_UINT8_ARRAY(env.siprio, RISCVCPU, 64), 362 VMSTATE_UINTTL(env.pc, RISCVCPU), 363 VMSTATE_UINTTL(env.load_res, RISCVCPU), 364 VMSTATE_UINTTL(env.load_val, RISCVCPU), 365 VMSTATE_UINTTL(env.frm, RISCVCPU), 366 VMSTATE_UINTTL(env.badaddr, RISCVCPU), 367 VMSTATE_UINTTL(env.guest_phys_fault_addr, RISCVCPU), 368 VMSTATE_UINTTL(env.priv_ver, RISCVCPU), 369 VMSTATE_UINTTL(env.vext_ver, RISCVCPU), 370 VMSTATE_UINT32(env.misa_mxl, RISCVCPU), 371 VMSTATE_UINT32(env.misa_ext, RISCVCPU), 372 VMSTATE_UINT32(env.misa_mxl_max, RISCVCPU), 373 VMSTATE_UINT32(env.misa_ext_mask, RISCVCPU), 374 VMSTATE_UINTTL(env.priv, RISCVCPU), 375 VMSTATE_BOOL(env.virt_enabled, RISCVCPU), 376 VMSTATE_UINT64(env.resetvec, RISCVCPU), 377 VMSTATE_UINTTL(env.mhartid, RISCVCPU), 378 VMSTATE_UINT64(env.mstatus, RISCVCPU), 379 VMSTATE_UINT64(env.mip, RISCVCPU), 380 VMSTATE_UINT64(env.miclaim, RISCVCPU), 381 VMSTATE_UINT64(env.mie, RISCVCPU), 382 VMSTATE_UINT64(env.mideleg, RISCVCPU), 383 VMSTATE_UINTTL(env.satp, RISCVCPU), 384 VMSTATE_UINTTL(env.stval, RISCVCPU), 385 VMSTATE_UINTTL(env.medeleg, RISCVCPU), 386 VMSTATE_UINTTL(env.stvec, RISCVCPU), 387 VMSTATE_UINTTL(env.sepc, RISCVCPU), 388 VMSTATE_UINTTL(env.scause, RISCVCPU), 389 VMSTATE_UINTTL(env.mtvec, RISCVCPU), 390 VMSTATE_UINTTL(env.mepc, RISCVCPU), 391 VMSTATE_UINTTL(env.mcause, RISCVCPU), 392 VMSTATE_UINTTL(env.mtval, RISCVCPU), 393 VMSTATE_UINTTL(env.miselect, RISCVCPU), 394 VMSTATE_UINTTL(env.siselect, RISCVCPU), 395 VMSTATE_UINTTL(env.scounteren, RISCVCPU), 396 VMSTATE_UINTTL(env.mcounteren, RISCVCPU), 397 VMSTATE_UINTTL(env.mcountinhibit, RISCVCPU), 398 VMSTATE_STRUCT_ARRAY(env.pmu_ctrs, RISCVCPU, RV_MAX_MHPMCOUNTERS, 0, 399 vmstate_pmu_ctr_state, PMUCTRState), 400 VMSTATE_UINTTL_ARRAY(env.mhpmevent_val, RISCVCPU, RV_MAX_MHPMEVENTS), 401 VMSTATE_UINTTL_ARRAY(env.mhpmeventh_val, RISCVCPU, RV_MAX_MHPMEVENTS), 402 VMSTATE_UINTTL(env.sscratch, RISCVCPU), 403 VMSTATE_UINTTL(env.mscratch, RISCVCPU), 404 VMSTATE_UINT64(env.stimecmp, RISCVCPU), 405 406 VMSTATE_END_OF_LIST() 407 }, 408 .subsections = (const VMStateDescription * []) { 409 &vmstate_pmp, 410 &vmstate_hyper, 411 &vmstate_vector, 412 &vmstate_pointermasking, 413 &vmstate_rv128, 414 #ifdef CONFIG_KVM 415 &vmstate_kvmtimer, 416 #endif 417 &vmstate_envcfg, 418 &vmstate_debug, 419 &vmstate_smstateen, 420 &vmstate_jvt, 421 NULL 422 } 423 }; 424