1 /* 2 * RISC-V Emulation Helpers for QEMU. 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * Copyright (c) 2022 VRULL GmbH 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms and conditions of the GNU General Public License, 10 * version 2 or later, as published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 * more details. 16 * 17 * You should have received a copy of the GNU General Public License along with 18 * this program. If not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "qemu/main-loop.h" 24 #include "exec/exec-all.h" 25 #include "exec/helper-proto.h" 26 27 /* Exceptions processing helpers */ 28 G_NORETURN void riscv_raise_exception(CPURISCVState *env, 29 uint32_t exception, uintptr_t pc) 30 { 31 CPUState *cs = env_cpu(env); 32 cs->exception_index = exception; 33 cpu_loop_exit_restore(cs, pc); 34 } 35 36 void helper_raise_exception(CPURISCVState *env, uint32_t exception) 37 { 38 riscv_raise_exception(env, exception, 0); 39 } 40 41 target_ulong helper_csrr(CPURISCVState *env, int csr) 42 { 43 /* 44 * The seed CSR must be accessed with a read-write instruction. A 45 * read-only instruction such as CSRRS/CSRRC with rs1=x0 or CSRRSI/ 46 * CSRRCI with uimm=0 will raise an illegal instruction exception. 47 */ 48 if (csr == CSR_SEED) { 49 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 50 } 51 52 target_ulong val = 0; 53 RISCVException ret = riscv_csrrw(env, csr, &val, 0, 0); 54 55 if (ret != RISCV_EXCP_NONE) { 56 riscv_raise_exception(env, ret, GETPC()); 57 } 58 return val; 59 } 60 61 void helper_csrw(CPURISCVState *env, int csr, target_ulong src) 62 { 63 target_ulong mask = env->xl == MXL_RV32 ? UINT32_MAX : (target_ulong)-1; 64 RISCVException ret = riscv_csrrw(env, csr, NULL, src, mask); 65 66 if (ret != RISCV_EXCP_NONE) { 67 riscv_raise_exception(env, ret, GETPC()); 68 } 69 } 70 71 target_ulong helper_csrrw(CPURISCVState *env, int csr, 72 target_ulong src, target_ulong write_mask) 73 { 74 target_ulong val = 0; 75 RISCVException ret = riscv_csrrw(env, csr, &val, src, write_mask); 76 77 if (ret != RISCV_EXCP_NONE) { 78 riscv_raise_exception(env, ret, GETPC()); 79 } 80 return val; 81 } 82 83 target_ulong helper_csrr_i128(CPURISCVState *env, int csr) 84 { 85 Int128 rv = int128_zero(); 86 RISCVException ret = riscv_csrrw_i128(env, csr, &rv, 87 int128_zero(), 88 int128_zero()); 89 90 if (ret != RISCV_EXCP_NONE) { 91 riscv_raise_exception(env, ret, GETPC()); 92 } 93 94 env->retxh = int128_gethi(rv); 95 return int128_getlo(rv); 96 } 97 98 void helper_csrw_i128(CPURISCVState *env, int csr, 99 target_ulong srcl, target_ulong srch) 100 { 101 RISCVException ret = riscv_csrrw_i128(env, csr, NULL, 102 int128_make128(srcl, srch), 103 UINT128_MAX); 104 105 if (ret != RISCV_EXCP_NONE) { 106 riscv_raise_exception(env, ret, GETPC()); 107 } 108 } 109 110 target_ulong helper_csrrw_i128(CPURISCVState *env, int csr, 111 target_ulong srcl, target_ulong srch, 112 target_ulong maskl, target_ulong maskh) 113 { 114 Int128 rv = int128_zero(); 115 RISCVException ret = riscv_csrrw_i128(env, csr, &rv, 116 int128_make128(srcl, srch), 117 int128_make128(maskl, maskh)); 118 119 if (ret != RISCV_EXCP_NONE) { 120 riscv_raise_exception(env, ret, GETPC()); 121 } 122 123 env->retxh = int128_gethi(rv); 124 return int128_getlo(rv); 125 } 126 127 128 /* 129 * check_zicbo_envcfg 130 * 131 * Raise virtual exceptions and illegal instruction exceptions for 132 * Zicbo[mz] instructions based on the settings of [mhs]envcfg as 133 * specified in section 2.5.1 of the CMO specification. 134 */ 135 static void check_zicbo_envcfg(CPURISCVState *env, target_ulong envbits, 136 uintptr_t ra) 137 { 138 #ifndef CONFIG_USER_ONLY 139 if ((env->priv < PRV_M) && !get_field(env->menvcfg, envbits)) { 140 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, ra); 141 } 142 143 if (riscv_cpu_virt_enabled(env) && 144 (((env->priv < PRV_H) && !get_field(env->henvcfg, envbits)) || 145 ((env->priv < PRV_S) && !get_field(env->senvcfg, envbits)))) { 146 riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, ra); 147 } 148 149 if ((env->priv < PRV_S) && !get_field(env->senvcfg, envbits)) { 150 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, ra); 151 } 152 #endif 153 } 154 155 void helper_cbo_zero(CPURISCVState *env, target_ulong address) 156 { 157 RISCVCPU *cpu = env_archcpu(env); 158 uint16_t cbozlen = cpu->cfg.cboz_blocksize; 159 int mmu_idx = cpu_mmu_index(env, false); 160 uintptr_t ra = GETPC(); 161 void *mem; 162 163 check_zicbo_envcfg(env, MENVCFG_CBZE, ra); 164 165 /* Mask off low-bits to align-down to the cache-block. */ 166 address &= ~(cbozlen - 1); 167 168 /* 169 * cbo.zero requires MMU_DATA_STORE access. Do a probe_write() 170 * to raise any exceptions, including PMP. 171 */ 172 mem = probe_write(env, address, cbozlen, mmu_idx, ra); 173 174 if (likely(mem)) { 175 memset(mem, 0, cbozlen); 176 } else { 177 /* 178 * This means that we're dealing with an I/O page. Section 4.2 179 * of cmobase v1.0.1 says: 180 * 181 * "Cache-block zero instructions store zeros independently 182 * of whether data from the underlying memory locations are 183 * cacheable." 184 * 185 * Write zeros in address + cbozlen regardless of not being 186 * a RAM page. 187 */ 188 for (int i = 0; i < cbozlen; i++) { 189 cpu_stb_mmuidx_ra(env, address + i, 0, mmu_idx, ra); 190 } 191 } 192 } 193 194 /* 195 * check_zicbom_access 196 * 197 * Check access permissions (LOAD, STORE or FETCH as specified in 198 * section 2.5.2 of the CMO specification) for Zicbom, raising 199 * either store page-fault (non-virtualized) or store guest-page 200 * fault (virtualized). 201 */ 202 static void check_zicbom_access(CPURISCVState *env, 203 target_ulong address, 204 uintptr_t ra) 205 { 206 RISCVCPU *cpu = env_archcpu(env); 207 int mmu_idx = cpu_mmu_index(env, false); 208 uint16_t cbomlen = cpu->cfg.cbom_blocksize; 209 void *phost; 210 int ret; 211 212 /* Mask off low-bits to align-down to the cache-block. */ 213 address &= ~(cbomlen - 1); 214 215 /* 216 * Section 2.5.2 of cmobase v1.0.1: 217 * 218 * "A cache-block management instruction is permitted to 219 * access the specified cache block whenever a load instruction 220 * or store instruction is permitted to access the corresponding 221 * physical addresses. If neither a load instruction nor store 222 * instruction is permitted to access the physical addresses, 223 * but an instruction fetch is permitted to access the physical 224 * addresses, whether a cache-block management instruction is 225 * permitted to access the cache block is UNSPECIFIED." 226 */ 227 ret = probe_access_flags(env, address, cbomlen, MMU_DATA_LOAD, 228 mmu_idx, true, &phost, ra); 229 if (ret != TLB_INVALID_MASK) { 230 /* Success: readable */ 231 return; 232 } 233 234 /* 235 * Since not readable, must be writable. On failure, store 236 * fault/store guest amo fault will be raised by 237 * riscv_cpu_tlb_fill(). PMP exceptions will be caught 238 * there as well. 239 */ 240 probe_write(env, address, cbomlen, mmu_idx, ra); 241 } 242 243 void helper_cbo_clean_flush(CPURISCVState *env, target_ulong address) 244 { 245 uintptr_t ra = GETPC(); 246 check_zicbo_envcfg(env, MENVCFG_CBCFE, ra); 247 check_zicbom_access(env, address, ra); 248 249 /* We don't emulate the cache-hierarchy, so we're done. */ 250 } 251 252 void helper_cbo_inval(CPURISCVState *env, target_ulong address) 253 { 254 uintptr_t ra = GETPC(); 255 check_zicbo_envcfg(env, MENVCFG_CBIE, ra); 256 check_zicbom_access(env, address, ra); 257 258 /* We don't emulate the cache-hierarchy, so we're done. */ 259 } 260 261 #ifndef CONFIG_USER_ONLY 262 263 target_ulong helper_sret(CPURISCVState *env) 264 { 265 uint64_t mstatus; 266 target_ulong prev_priv, prev_virt; 267 268 if (!(env->priv >= PRV_S)) { 269 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 270 } 271 272 target_ulong retpc = env->sepc; 273 if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) { 274 riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC()); 275 } 276 277 if (get_field(env->mstatus, MSTATUS_TSR) && !(env->priv >= PRV_M)) { 278 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 279 } 280 281 if (riscv_has_ext(env, RVH) && riscv_cpu_virt_enabled(env) && 282 get_field(env->hstatus, HSTATUS_VTSR)) { 283 riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC()); 284 } 285 286 mstatus = env->mstatus; 287 prev_priv = get_field(mstatus, MSTATUS_SPP); 288 mstatus = set_field(mstatus, MSTATUS_SIE, 289 get_field(mstatus, MSTATUS_SPIE)); 290 mstatus = set_field(mstatus, MSTATUS_SPIE, 1); 291 mstatus = set_field(mstatus, MSTATUS_SPP, PRV_U); 292 if (env->priv_ver >= PRIV_VERSION_1_12_0) { 293 mstatus = set_field(mstatus, MSTATUS_MPRV, 0); 294 } 295 env->mstatus = mstatus; 296 297 if (riscv_has_ext(env, RVH) && !riscv_cpu_virt_enabled(env)) { 298 /* We support Hypervisor extensions and virtulisation is disabled */ 299 target_ulong hstatus = env->hstatus; 300 301 prev_virt = get_field(hstatus, HSTATUS_SPV); 302 303 hstatus = set_field(hstatus, HSTATUS_SPV, 0); 304 305 env->hstatus = hstatus; 306 307 if (prev_virt) { 308 riscv_cpu_swap_hypervisor_regs(env); 309 } 310 311 riscv_cpu_set_virt_enabled(env, prev_virt); 312 } 313 314 riscv_cpu_set_mode(env, prev_priv); 315 316 return retpc; 317 } 318 319 target_ulong helper_mret(CPURISCVState *env) 320 { 321 if (!(env->priv >= PRV_M)) { 322 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 323 } 324 325 target_ulong retpc = env->mepc; 326 if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) { 327 riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC()); 328 } 329 330 uint64_t mstatus = env->mstatus; 331 target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP); 332 333 if (riscv_cpu_cfg(env)->pmp && 334 !pmp_get_num_rules(env) && (prev_priv != PRV_M)) { 335 riscv_raise_exception(env, RISCV_EXCP_INST_ACCESS_FAULT, GETPC()); 336 } 337 338 target_ulong prev_virt = get_field(env->mstatus, MSTATUS_MPV); 339 mstatus = set_field(mstatus, MSTATUS_MIE, 340 get_field(mstatus, MSTATUS_MPIE)); 341 mstatus = set_field(mstatus, MSTATUS_MPIE, 1); 342 mstatus = set_field(mstatus, MSTATUS_MPP, PRV_U); 343 mstatus = set_field(mstatus, MSTATUS_MPV, 0); 344 if ((env->priv_ver >= PRIV_VERSION_1_12_0) && (prev_priv != PRV_M)) { 345 mstatus = set_field(mstatus, MSTATUS_MPRV, 0); 346 } 347 env->mstatus = mstatus; 348 riscv_cpu_set_mode(env, prev_priv); 349 350 if (riscv_has_ext(env, RVH)) { 351 if (prev_virt) { 352 riscv_cpu_swap_hypervisor_regs(env); 353 } 354 355 riscv_cpu_set_virt_enabled(env, prev_virt); 356 } 357 358 return retpc; 359 } 360 361 void helper_wfi(CPURISCVState *env) 362 { 363 CPUState *cs = env_cpu(env); 364 bool rvs = riscv_has_ext(env, RVS); 365 bool prv_u = env->priv == PRV_U; 366 bool prv_s = env->priv == PRV_S; 367 368 if (((prv_s || (!rvs && prv_u)) && get_field(env->mstatus, MSTATUS_TW)) || 369 (rvs && prv_u && !riscv_cpu_virt_enabled(env))) { 370 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 371 } else if (riscv_cpu_virt_enabled(env) && (prv_u || 372 (prv_s && get_field(env->hstatus, HSTATUS_VTW)))) { 373 riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC()); 374 } else { 375 cs->halted = 1; 376 cs->exception_index = EXCP_HLT; 377 cpu_loop_exit(cs); 378 } 379 } 380 381 void helper_tlb_flush(CPURISCVState *env) 382 { 383 CPUState *cs = env_cpu(env); 384 if (!(env->priv >= PRV_S) || 385 (env->priv == PRV_S && 386 get_field(env->mstatus, MSTATUS_TVM))) { 387 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 388 } else if (riscv_has_ext(env, RVH) && riscv_cpu_virt_enabled(env) && 389 get_field(env->hstatus, HSTATUS_VTVM)) { 390 riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC()); 391 } else { 392 tlb_flush(cs); 393 } 394 } 395 396 void helper_tlb_flush_all(CPURISCVState *env) 397 { 398 CPUState *cs = env_cpu(env); 399 tlb_flush_all_cpus_synced(cs); 400 } 401 402 void helper_hyp_tlb_flush(CPURISCVState *env) 403 { 404 CPUState *cs = env_cpu(env); 405 406 if (env->priv == PRV_S && riscv_cpu_virt_enabled(env)) { 407 riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC()); 408 } 409 410 if (env->priv == PRV_M || 411 (env->priv == PRV_S && !riscv_cpu_virt_enabled(env))) { 412 tlb_flush(cs); 413 return; 414 } 415 416 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 417 } 418 419 void helper_hyp_gvma_tlb_flush(CPURISCVState *env) 420 { 421 if (env->priv == PRV_S && !riscv_cpu_virt_enabled(env) && 422 get_field(env->mstatus, MSTATUS_TVM)) { 423 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 424 } 425 426 helper_hyp_tlb_flush(env); 427 } 428 429 target_ulong helper_hyp_hlvx_hu(CPURISCVState *env, target_ulong address) 430 { 431 int mmu_idx = cpu_mmu_index(env, true) | TB_FLAGS_PRIV_HYP_ACCESS_MASK; 432 433 return cpu_lduw_mmuidx_ra(env, address, mmu_idx, GETPC()); 434 } 435 436 target_ulong helper_hyp_hlvx_wu(CPURISCVState *env, target_ulong address) 437 { 438 int mmu_idx = cpu_mmu_index(env, true) | TB_FLAGS_PRIV_HYP_ACCESS_MASK; 439 440 return cpu_ldl_mmuidx_ra(env, address, mmu_idx, GETPC()); 441 } 442 443 #endif /* !CONFIG_USER_ONLY */ 444