1 /* 2 * RISC-V Emulation Helpers for QEMU. 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/log.h" 22 #include "cpu.h" 23 #include "qemu/main-loop.h" 24 #include "exec/exec-all.h" 25 #include "exec/helper-proto.h" 26 27 /* Exceptions processing helpers */ 28 void QEMU_NORETURN riscv_raise_exception(CPURISCVState *env, 29 uint32_t exception, uintptr_t pc) 30 { 31 CPUState *cs = env_cpu(env); 32 cs->exception_index = exception; 33 cpu_loop_exit_restore(cs, pc); 34 } 35 36 void helper_raise_exception(CPURISCVState *env, uint32_t exception) 37 { 38 riscv_raise_exception(env, exception, 0); 39 } 40 41 target_ulong helper_csrrw(CPURISCVState *env, target_ulong src, 42 target_ulong csr) 43 { 44 target_ulong val = 0; 45 int ret = riscv_csrrw(env, csr, &val, src, -1); 46 47 if (ret < 0) { 48 riscv_raise_exception(env, -ret, GETPC()); 49 } 50 return val; 51 } 52 53 target_ulong helper_csrrs(CPURISCVState *env, target_ulong src, 54 target_ulong csr, target_ulong rs1_pass) 55 { 56 target_ulong val = 0; 57 int ret = riscv_csrrw(env, csr, &val, -1, rs1_pass ? src : 0); 58 59 if (ret < 0) { 60 riscv_raise_exception(env, -ret, GETPC()); 61 } 62 return val; 63 } 64 65 target_ulong helper_csrrc(CPURISCVState *env, target_ulong src, 66 target_ulong csr, target_ulong rs1_pass) 67 { 68 target_ulong val = 0; 69 int ret = riscv_csrrw(env, csr, &val, 0, rs1_pass ? src : 0); 70 71 if (ret < 0) { 72 riscv_raise_exception(env, -ret, GETPC()); 73 } 74 return val; 75 } 76 77 #ifndef CONFIG_USER_ONLY 78 79 target_ulong helper_sret(CPURISCVState *env, target_ulong cpu_pc_deb) 80 { 81 uint64_t mstatus; 82 target_ulong prev_priv, prev_virt; 83 84 if (!(env->priv >= PRV_S)) { 85 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 86 } 87 88 target_ulong retpc = env->sepc; 89 if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) { 90 riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC()); 91 } 92 93 if (get_field(env->mstatus, MSTATUS_TSR) && !(env->priv >= PRV_M)) { 94 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 95 } 96 97 if (riscv_has_ext(env, RVH) && riscv_cpu_virt_enabled(env) && 98 get_field(env->hstatus, HSTATUS_VTSR)) { 99 riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC()); 100 } 101 102 mstatus = env->mstatus; 103 104 if (riscv_has_ext(env, RVH) && !riscv_cpu_virt_enabled(env)) { 105 /* We support Hypervisor extensions and virtulisation is disabled */ 106 target_ulong hstatus = env->hstatus; 107 108 prev_priv = get_field(mstatus, MSTATUS_SPP); 109 prev_virt = get_field(hstatus, HSTATUS_SPV); 110 111 hstatus = set_field(hstatus, HSTATUS_SPV, 0); 112 mstatus = set_field(mstatus, MSTATUS_SPP, 0); 113 mstatus = set_field(mstatus, SSTATUS_SIE, 114 get_field(mstatus, SSTATUS_SPIE)); 115 mstatus = set_field(mstatus, SSTATUS_SPIE, 1); 116 117 env->mstatus = mstatus; 118 env->hstatus = hstatus; 119 120 if (prev_virt) { 121 riscv_cpu_swap_hypervisor_regs(env); 122 } 123 124 riscv_cpu_set_virt_enabled(env, prev_virt); 125 } else { 126 prev_priv = get_field(mstatus, MSTATUS_SPP); 127 128 mstatus = set_field(mstatus, MSTATUS_SIE, 129 get_field(mstatus, MSTATUS_SPIE)); 130 mstatus = set_field(mstatus, MSTATUS_SPIE, 1); 131 mstatus = set_field(mstatus, MSTATUS_SPP, PRV_U); 132 env->mstatus = mstatus; 133 } 134 135 riscv_cpu_set_mode(env, prev_priv); 136 137 return retpc; 138 } 139 140 target_ulong helper_mret(CPURISCVState *env, target_ulong cpu_pc_deb) 141 { 142 if (!(env->priv >= PRV_M)) { 143 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 144 } 145 146 target_ulong retpc = env->mepc; 147 if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) { 148 riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC()); 149 } 150 151 uint64_t mstatus = env->mstatus; 152 target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP); 153 target_ulong prev_virt = get_field(env->mstatus, MSTATUS_MPV); 154 mstatus = set_field(mstatus, MSTATUS_MIE, 155 get_field(mstatus, MSTATUS_MPIE)); 156 mstatus = set_field(mstatus, MSTATUS_MPIE, 1); 157 mstatus = set_field(mstatus, MSTATUS_MPP, PRV_U); 158 mstatus = set_field(mstatus, MSTATUS_MPV, 0); 159 env->mstatus = mstatus; 160 riscv_cpu_set_mode(env, prev_priv); 161 162 if (riscv_has_ext(env, RVH)) { 163 if (prev_virt) { 164 riscv_cpu_swap_hypervisor_regs(env); 165 } 166 167 riscv_cpu_set_virt_enabled(env, prev_virt); 168 } 169 170 return retpc; 171 } 172 173 void helper_wfi(CPURISCVState *env) 174 { 175 CPUState *cs = env_cpu(env); 176 177 if ((env->priv == PRV_S && 178 get_field(env->mstatus, MSTATUS_TW)) || 179 riscv_cpu_virt_enabled(env)) { 180 riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC()); 181 } else { 182 cs->halted = 1; 183 cs->exception_index = EXCP_HLT; 184 cpu_loop_exit(cs); 185 } 186 } 187 188 void helper_tlb_flush(CPURISCVState *env) 189 { 190 CPUState *cs = env_cpu(env); 191 if (!(env->priv >= PRV_S) || 192 (env->priv == PRV_S && 193 get_field(env->mstatus, MSTATUS_TVM))) { 194 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 195 } else if (riscv_has_ext(env, RVH) && riscv_cpu_virt_enabled(env) && 196 get_field(env->hstatus, HSTATUS_VTVM)) { 197 riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC()); 198 } else { 199 tlb_flush(cs); 200 } 201 } 202 203 void helper_hyp_tlb_flush(CPURISCVState *env) 204 { 205 CPUState *cs = env_cpu(env); 206 207 if (env->priv == PRV_S && riscv_cpu_virt_enabled(env)) { 208 riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC()); 209 } 210 211 if (env->priv == PRV_M || 212 (env->priv == PRV_S && !riscv_cpu_virt_enabled(env))) { 213 tlb_flush(cs); 214 return; 215 } 216 217 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 218 } 219 220 void helper_hyp_gvma_tlb_flush(CPURISCVState *env) 221 { 222 if (env->priv == PRV_S && !riscv_cpu_virt_enabled(env) && 223 get_field(env->mstatus, MSTATUS_TVM)) { 224 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 225 } 226 227 helper_hyp_tlb_flush(env); 228 } 229 230 target_ulong helper_hyp_load(CPURISCVState *env, target_ulong address, 231 target_ulong attrs, target_ulong memop) 232 { 233 if (env->priv == PRV_M || 234 (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) || 235 (env->priv == PRV_U && !riscv_cpu_virt_enabled(env) && 236 get_field(env->hstatus, HSTATUS_HU))) { 237 target_ulong pte; 238 int mmu_idx = cpu_mmu_index(env, false) | TB_FLAGS_PRIV_HYP_ACCESS_MASK; 239 240 switch (memop) { 241 case MO_SB: 242 pte = cpu_ldsb_mmuidx_ra(env, address, mmu_idx, GETPC()); 243 break; 244 case MO_UB: 245 pte = cpu_ldub_mmuidx_ra(env, address, mmu_idx, GETPC()); 246 break; 247 case MO_TESW: 248 pte = cpu_ldsw_mmuidx_ra(env, address, mmu_idx, GETPC()); 249 break; 250 case MO_TEUW: 251 pte = cpu_lduw_mmuidx_ra(env, address, mmu_idx, GETPC()); 252 break; 253 case MO_TESL: 254 pte = cpu_ldl_mmuidx_ra(env, address, mmu_idx, GETPC()); 255 break; 256 case MO_TEUL: 257 pte = cpu_ldl_mmuidx_ra(env, address, mmu_idx, GETPC()); 258 break; 259 case MO_TEQ: 260 pte = cpu_ldq_mmuidx_ra(env, address, mmu_idx, GETPC()); 261 break; 262 default: 263 g_assert_not_reached(); 264 } 265 266 return pte; 267 } 268 269 if (riscv_cpu_virt_enabled(env)) { 270 riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC()); 271 } else { 272 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 273 } 274 return 0; 275 } 276 277 void helper_hyp_store(CPURISCVState *env, target_ulong address, 278 target_ulong val, target_ulong attrs, target_ulong memop) 279 { 280 if (env->priv == PRV_M || 281 (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) || 282 (env->priv == PRV_U && !riscv_cpu_virt_enabled(env) && 283 get_field(env->hstatus, HSTATUS_HU))) { 284 int mmu_idx = cpu_mmu_index(env, false) | TB_FLAGS_PRIV_HYP_ACCESS_MASK; 285 286 switch (memop) { 287 case MO_SB: 288 case MO_UB: 289 cpu_stb_mmuidx_ra(env, address, val, mmu_idx, GETPC()); 290 break; 291 case MO_TESW: 292 case MO_TEUW: 293 cpu_stw_mmuidx_ra(env, address, val, mmu_idx, GETPC()); 294 break; 295 case MO_TESL: 296 case MO_TEUL: 297 cpu_stl_mmuidx_ra(env, address, val, mmu_idx, GETPC()); 298 break; 299 case MO_TEQ: 300 cpu_stq_mmuidx_ra(env, address, val, mmu_idx, GETPC()); 301 break; 302 default: 303 g_assert_not_reached(); 304 } 305 306 return; 307 } 308 309 if (riscv_cpu_virt_enabled(env)) { 310 riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC()); 311 } else { 312 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 313 } 314 } 315 316 target_ulong helper_hyp_x_load(CPURISCVState *env, target_ulong address, 317 target_ulong attrs, target_ulong memop) 318 { 319 if (env->priv == PRV_M || 320 (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) || 321 (env->priv == PRV_U && !riscv_cpu_virt_enabled(env) && 322 get_field(env->hstatus, HSTATUS_HU))) { 323 target_ulong pte; 324 int mmu_idx = cpu_mmu_index(env, false) | TB_FLAGS_PRIV_HYP_ACCESS_MASK; 325 326 switch (memop) { 327 case MO_TEUW: 328 pte = cpu_lduw_mmuidx_ra(env, address, mmu_idx, GETPC()); 329 break; 330 case MO_TEUL: 331 pte = cpu_ldl_mmuidx_ra(env, address, mmu_idx, GETPC()); 332 break; 333 default: 334 g_assert_not_reached(); 335 } 336 337 return pte; 338 } 339 340 if (riscv_cpu_virt_enabled(env)) { 341 riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC()); 342 } else { 343 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 344 } 345 return 0; 346 } 347 348 #endif /* !CONFIG_USER_ONLY */ 349