1 /* 2 * RISC-V VMState Description 3 * 4 * Copyright (c) 2020 Huawei Technologies Co., Ltd 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2 or later, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #include "qemu/osdep.h" 20 #include "cpu.h" 21 #include "qemu/error-report.h" 22 #include "sysemu/kvm.h" 23 #include "migration/cpu.h" 24 25 static bool pmp_needed(void *opaque) 26 { 27 RISCVCPU *cpu = opaque; 28 CPURISCVState *env = &cpu->env; 29 30 return riscv_feature(env, RISCV_FEATURE_PMP); 31 } 32 33 static int pmp_post_load(void *opaque, int version_id) 34 { 35 RISCVCPU *cpu = opaque; 36 CPURISCVState *env = &cpu->env; 37 int i; 38 39 for (i = 0; i < MAX_RISCV_PMPS; i++) { 40 pmp_update_rule_addr(env, i); 41 } 42 pmp_update_rule_nums(env); 43 44 return 0; 45 } 46 47 static const VMStateDescription vmstate_pmp_entry = { 48 .name = "cpu/pmp/entry", 49 .version_id = 1, 50 .minimum_version_id = 1, 51 .fields = (VMStateField[]) { 52 VMSTATE_UINTTL(addr_reg, pmp_entry_t), 53 VMSTATE_UINT8(cfg_reg, pmp_entry_t), 54 VMSTATE_END_OF_LIST() 55 } 56 }; 57 58 static const VMStateDescription vmstate_pmp = { 59 .name = "cpu/pmp", 60 .version_id = 1, 61 .minimum_version_id = 1, 62 .needed = pmp_needed, 63 .post_load = pmp_post_load, 64 .fields = (VMStateField[]) { 65 VMSTATE_STRUCT_ARRAY(env.pmp_state.pmp, RISCVCPU, MAX_RISCV_PMPS, 66 0, vmstate_pmp_entry, pmp_entry_t), 67 VMSTATE_END_OF_LIST() 68 } 69 }; 70 71 static bool hyper_needed(void *opaque) 72 { 73 RISCVCPU *cpu = opaque; 74 CPURISCVState *env = &cpu->env; 75 76 return riscv_has_ext(env, RVH); 77 } 78 79 static const VMStateDescription vmstate_hyper = { 80 .name = "cpu/hyper", 81 .version_id = 1, 82 .minimum_version_id = 1, 83 .needed = hyper_needed, 84 .fields = (VMStateField[]) { 85 VMSTATE_UINTTL(env.hstatus, RISCVCPU), 86 VMSTATE_UINTTL(env.hedeleg, RISCVCPU), 87 VMSTATE_UINTTL(env.hideleg, RISCVCPU), 88 VMSTATE_UINTTL(env.hcounteren, RISCVCPU), 89 VMSTATE_UINTTL(env.htval, RISCVCPU), 90 VMSTATE_UINTTL(env.htinst, RISCVCPU), 91 VMSTATE_UINTTL(env.hgatp, RISCVCPU), 92 VMSTATE_UINT64(env.htimedelta, RISCVCPU), 93 94 VMSTATE_UINT64(env.vsstatus, RISCVCPU), 95 VMSTATE_UINTTL(env.vstvec, RISCVCPU), 96 VMSTATE_UINTTL(env.vsscratch, RISCVCPU), 97 VMSTATE_UINTTL(env.vsepc, RISCVCPU), 98 VMSTATE_UINTTL(env.vscause, RISCVCPU), 99 VMSTATE_UINTTL(env.vstval, RISCVCPU), 100 VMSTATE_UINTTL(env.vsatp, RISCVCPU), 101 102 VMSTATE_UINTTL(env.mtval2, RISCVCPU), 103 VMSTATE_UINTTL(env.mtinst, RISCVCPU), 104 105 VMSTATE_UINTTL(env.stvec_hs, RISCVCPU), 106 VMSTATE_UINTTL(env.sscratch_hs, RISCVCPU), 107 VMSTATE_UINTTL(env.sepc_hs, RISCVCPU), 108 VMSTATE_UINTTL(env.scause_hs, RISCVCPU), 109 VMSTATE_UINTTL(env.stval_hs, RISCVCPU), 110 VMSTATE_UINTTL(env.satp_hs, RISCVCPU), 111 VMSTATE_UINT64(env.mstatus_hs, RISCVCPU), 112 113 VMSTATE_END_OF_LIST() 114 } 115 }; 116 117 static bool vector_needed(void *opaque) 118 { 119 RISCVCPU *cpu = opaque; 120 CPURISCVState *env = &cpu->env; 121 122 return riscv_has_ext(env, RVV); 123 } 124 125 static const VMStateDescription vmstate_vector = { 126 .name = "cpu/vector", 127 .version_id = 1, 128 .minimum_version_id = 1, 129 .needed = vector_needed, 130 .fields = (VMStateField[]) { 131 VMSTATE_UINT64_ARRAY(env.vreg, RISCVCPU, 32 * RV_VLEN_MAX / 64), 132 VMSTATE_UINTTL(env.vxrm, RISCVCPU), 133 VMSTATE_UINTTL(env.vxsat, RISCVCPU), 134 VMSTATE_UINTTL(env.vl, RISCVCPU), 135 VMSTATE_UINTTL(env.vstart, RISCVCPU), 136 VMSTATE_UINTTL(env.vtype, RISCVCPU), 137 VMSTATE_END_OF_LIST() 138 } 139 }; 140 141 static bool pointermasking_needed(void *opaque) 142 { 143 RISCVCPU *cpu = opaque; 144 CPURISCVState *env = &cpu->env; 145 146 return riscv_has_ext(env, RVJ); 147 } 148 149 static const VMStateDescription vmstate_pointermasking = { 150 .name = "cpu/pointer_masking", 151 .version_id = 1, 152 .minimum_version_id = 1, 153 .needed = pointermasking_needed, 154 .fields = (VMStateField[]) { 155 VMSTATE_UINTTL(env.mmte, RISCVCPU), 156 VMSTATE_UINTTL(env.mpmmask, RISCVCPU), 157 VMSTATE_UINTTL(env.mpmbase, RISCVCPU), 158 VMSTATE_UINTTL(env.spmmask, RISCVCPU), 159 VMSTATE_UINTTL(env.spmbase, RISCVCPU), 160 VMSTATE_UINTTL(env.upmmask, RISCVCPU), 161 VMSTATE_UINTTL(env.upmbase, RISCVCPU), 162 163 VMSTATE_END_OF_LIST() 164 } 165 }; 166 167 static bool rv128_needed(void *opaque) 168 { 169 RISCVCPU *cpu = opaque; 170 CPURISCVState *env = &cpu->env; 171 172 return env->misa_mxl_max == MXL_RV128; 173 } 174 175 static const VMStateDescription vmstate_rv128 = { 176 .name = "cpu/rv128", 177 .version_id = 1, 178 .minimum_version_id = 1, 179 .needed = rv128_needed, 180 .fields = (VMStateField[]) { 181 VMSTATE_UINTTL_ARRAY(env.gprh, RISCVCPU, 32), 182 VMSTATE_UINT64(env.mscratchh, RISCVCPU), 183 VMSTATE_UINT64(env.sscratchh, RISCVCPU), 184 VMSTATE_END_OF_LIST() 185 } 186 }; 187 188 const VMStateDescription vmstate_riscv_cpu = { 189 .name = "cpu", 190 .version_id = 3, 191 .minimum_version_id = 3, 192 .fields = (VMStateField[]) { 193 VMSTATE_UINTTL_ARRAY(env.gpr, RISCVCPU, 32), 194 VMSTATE_UINT64_ARRAY(env.fpr, RISCVCPU, 32), 195 VMSTATE_UINTTL(env.pc, RISCVCPU), 196 VMSTATE_UINTTL(env.load_res, RISCVCPU), 197 VMSTATE_UINTTL(env.load_val, RISCVCPU), 198 VMSTATE_UINTTL(env.frm, RISCVCPU), 199 VMSTATE_UINTTL(env.badaddr, RISCVCPU), 200 VMSTATE_UINTTL(env.guest_phys_fault_addr, RISCVCPU), 201 VMSTATE_UINTTL(env.priv_ver, RISCVCPU), 202 VMSTATE_UINTTL(env.vext_ver, RISCVCPU), 203 VMSTATE_UINT32(env.misa_mxl, RISCVCPU), 204 VMSTATE_UINT32(env.misa_ext, RISCVCPU), 205 VMSTATE_UINT32(env.misa_mxl_max, RISCVCPU), 206 VMSTATE_UINT32(env.misa_ext_mask, RISCVCPU), 207 VMSTATE_UINT32(env.features, RISCVCPU), 208 VMSTATE_UINTTL(env.priv, RISCVCPU), 209 VMSTATE_UINTTL(env.virt, RISCVCPU), 210 VMSTATE_UINTTL(env.resetvec, RISCVCPU), 211 VMSTATE_UINTTL(env.mhartid, RISCVCPU), 212 VMSTATE_UINT64(env.mstatus, RISCVCPU), 213 VMSTATE_UINTTL(env.mip, RISCVCPU), 214 VMSTATE_UINT32(env.miclaim, RISCVCPU), 215 VMSTATE_UINTTL(env.mie, RISCVCPU), 216 VMSTATE_UINTTL(env.mideleg, RISCVCPU), 217 VMSTATE_UINTTL(env.satp, RISCVCPU), 218 VMSTATE_UINTTL(env.stval, RISCVCPU), 219 VMSTATE_UINTTL(env.medeleg, RISCVCPU), 220 VMSTATE_UINTTL(env.stvec, RISCVCPU), 221 VMSTATE_UINTTL(env.sepc, RISCVCPU), 222 VMSTATE_UINTTL(env.scause, RISCVCPU), 223 VMSTATE_UINTTL(env.mtvec, RISCVCPU), 224 VMSTATE_UINTTL(env.mepc, RISCVCPU), 225 VMSTATE_UINTTL(env.mcause, RISCVCPU), 226 VMSTATE_UINTTL(env.mtval, RISCVCPU), 227 VMSTATE_UINTTL(env.scounteren, RISCVCPU), 228 VMSTATE_UINTTL(env.mcounteren, RISCVCPU), 229 VMSTATE_UINTTL(env.sscratch, RISCVCPU), 230 VMSTATE_UINTTL(env.mscratch, RISCVCPU), 231 VMSTATE_UINT64(env.mfromhost, RISCVCPU), 232 VMSTATE_UINT64(env.mtohost, RISCVCPU), 233 VMSTATE_UINT64(env.timecmp, RISCVCPU), 234 235 VMSTATE_END_OF_LIST() 236 }, 237 .subsections = (const VMStateDescription * []) { 238 &vmstate_pmp, 239 &vmstate_hyper, 240 &vmstate_vector, 241 &vmstate_pointermasking, 242 &vmstate_rv128, 243 NULL 244 } 245 }; 246