1 /* 2 * RISC-V Emulation Helpers for QEMU. 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * Copyright (c) 2022 VRULL GmbH 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms and conditions of the GNU General Public License, 10 * version 2 or later, as published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 * more details. 16 * 17 * You should have received a copy of the GNU General Public License along with 18 * this program. If not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "qemu/main-loop.h" 24 #include "exec/exec-all.h" 25 #include "exec/helper-proto.h" 26 27 /* Exceptions processing helpers */ 28 G_NORETURN void riscv_raise_exception(CPURISCVState *env, 29 uint32_t exception, uintptr_t pc) 30 { 31 CPUState *cs = env_cpu(env); 32 cs->exception_index = exception; 33 cpu_loop_exit_restore(cs, pc); 34 } 35 36 void helper_raise_exception(CPURISCVState *env, uint32_t exception) 37 { 38 riscv_raise_exception(env, exception, 0); 39 } 40 41 target_ulong helper_csrr(CPURISCVState *env, int csr) 42 { 43 /* 44 * The seed CSR must be accessed with a read-write instruction. A 45 * read-only instruction such as CSRRS/CSRRC with rs1=x0 or CSRRSI/ 46 * CSRRCI with uimm=0 will raise an illegal instruction exception. 47 */ 48 if (csr == CSR_SEED) { 49 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 50 } 51 52 target_ulong val = 0; 53 RISCVException ret = riscv_csrrw(env, csr, &val, 0, 0); 54 55 if (ret != RISCV_EXCP_NONE) { 56 riscv_raise_exception(env, ret, GETPC()); 57 } 58 return val; 59 } 60 61 void helper_csrw(CPURISCVState *env, int csr, target_ulong src) 62 { 63 target_ulong mask = env->xl == MXL_RV32 ? UINT32_MAX : (target_ulong)-1; 64 RISCVException ret = riscv_csrrw(env, csr, NULL, src, mask); 65 66 if (ret != RISCV_EXCP_NONE) { 67 riscv_raise_exception(env, ret, GETPC()); 68 } 69 } 70 71 target_ulong helper_csrrw(CPURISCVState *env, int csr, 72 target_ulong src, target_ulong write_mask) 73 { 74 target_ulong val = 0; 75 RISCVException ret = riscv_csrrw(env, csr, &val, src, write_mask); 76 77 if (ret != RISCV_EXCP_NONE) { 78 riscv_raise_exception(env, ret, GETPC()); 79 } 80 return val; 81 } 82 83 target_ulong helper_csrr_i128(CPURISCVState *env, int csr) 84 { 85 Int128 rv = int128_zero(); 86 RISCVException ret = riscv_csrrw_i128(env, csr, &rv, 87 int128_zero(), 88 int128_zero()); 89 90 if (ret != RISCV_EXCP_NONE) { 91 riscv_raise_exception(env, ret, GETPC()); 92 } 93 94 env->retxh = int128_gethi(rv); 95 return int128_getlo(rv); 96 } 97 98 void helper_csrw_i128(CPURISCVState *env, int csr, 99 target_ulong srcl, target_ulong srch) 100 { 101 RISCVException ret = riscv_csrrw_i128(env, csr, NULL, 102 int128_make128(srcl, srch), 103 UINT128_MAX); 104 105 if (ret != RISCV_EXCP_NONE) { 106 riscv_raise_exception(env, ret, GETPC()); 107 } 108 } 109 110 target_ulong helper_csrrw_i128(CPURISCVState *env, int csr, 111 target_ulong srcl, target_ulong srch, 112 target_ulong maskl, target_ulong maskh) 113 { 114 Int128 rv = int128_zero(); 115 RISCVException ret = riscv_csrrw_i128(env, csr, &rv, 116 int128_make128(srcl, srch), 117 int128_make128(maskl, maskh)); 118 119 if (ret != RISCV_EXCP_NONE) { 120 riscv_raise_exception(env, ret, GETPC()); 121 } 122 123 env->retxh = int128_gethi(rv); 124 return int128_getlo(rv); 125 } 126 127 128 /* 129 * check_zicbo_envcfg 130 * 131 * Raise virtual exceptions and illegal instruction exceptions for 132 * Zicbo[mz] instructions based on the settings of [mhs]envcfg as 133 * specified in section 2.5.1 of the CMO specification. 134 */ 135 static void check_zicbo_envcfg(CPURISCVState *env, target_ulong envbits, 136 uintptr_t ra) 137 { 138 #ifndef CONFIG_USER_ONLY 139 if ((env->priv < PRV_M) && !get_field(env->menvcfg, envbits)) { 140 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, ra); 141 } 142 143 if (riscv_cpu_virt_enabled(env) && 144 (((env->priv < PRV_H) && !get_field(env->henvcfg, envbits)) || 145 ((env->priv < PRV_S) && !get_field(env->senvcfg, envbits)))) { 146 riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, ra); 147 } 148 149 if ((env->priv < PRV_S) && !get_field(env->senvcfg, envbits)) { 150 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, ra); 151 } 152 #endif 153 } 154 155 void helper_cbo_zero(CPURISCVState *env, target_ulong address) 156 { 157 RISCVCPU *cpu = env_archcpu(env); 158 uint16_t cbozlen = cpu->cfg.cboz_blocksize; 159 int mmu_idx = cpu_mmu_index(env, false); 160 uintptr_t ra = GETPC(); 161 void *mem; 162 163 check_zicbo_envcfg(env, MENVCFG_CBZE, ra); 164 165 /* Mask off low-bits to align-down to the cache-block. */ 166 address &= ~(cbozlen - 1); 167 168 /* 169 * cbo.zero requires MMU_DATA_STORE access. Do a probe_write() 170 * to raise any exceptions, including PMP. 171 */ 172 mem = probe_write(env, address, cbozlen, mmu_idx, ra); 173 174 if (likely(mem)) { 175 memset(mem, 0, cbozlen); 176 } else { 177 /* 178 * This means that we're dealing with an I/O page. Section 4.2 179 * of cmobase v1.0.1 says: 180 * 181 * "Cache-block zero instructions store zeros independently 182 * of whether data from the underlying memory locations are 183 * cacheable." 184 * 185 * Write zeros in address + cbozlen regardless of not being 186 * a RAM page. 187 */ 188 for (int i = 0; i < cbozlen; i++) { 189 cpu_stb_mmuidx_ra(env, address + i, 0, mmu_idx, ra); 190 } 191 } 192 } 193 194 /* 195 * check_zicbom_access 196 * 197 * Check access permissions (LOAD, STORE or FETCH as specified in 198 * section 2.5.2 of the CMO specification) for Zicbom, raising 199 * either store page-fault (non-virtualized) or store guest-page 200 * fault (virtualized). 201 */ 202 static void check_zicbom_access(CPURISCVState *env, 203 target_ulong address, 204 uintptr_t ra) 205 { 206 RISCVCPU *cpu = env_archcpu(env); 207 int mmu_idx = cpu_mmu_index(env, false); 208 uint16_t cbomlen = cpu->cfg.cbom_blocksize; 209 void *phost; 210 int ret; 211 212 /* Mask off low-bits to align-down to the cache-block. */ 213 address &= ~(cbomlen - 1); 214 215 /* 216 * Section 2.5.2 of cmobase v1.0.1: 217 * 218 * "A cache-block management instruction is permitted to 219 * access the specified cache block whenever a load instruction 220 * or store instruction is permitted to access the corresponding 221 * physical addresses. If neither a load instruction nor store 222 * instruction is permitted to access the physical addresses, 223 * but an instruction fetch is permitted to access the physical 224 * addresses, whether a cache-block management instruction is 225 * permitted to access the cache block is UNSPECIFIED." 226 */ 227 ret = probe_access_flags(env, address, cbomlen, MMU_DATA_LOAD, 228 mmu_idx, true, &phost, ra); 229 if (ret != TLB_INVALID_MASK) { 230 /* Success: readable */ 231 return; 232 } 233 234 /* 235 * Since not readable, must be writable. On failure, store 236 * fault/store guest amo fault will be raised by 237 * riscv_cpu_tlb_fill(). PMP exceptions will be caught 238 * there as well. 239 */ 240 probe_write(env, address, cbomlen, mmu_idx, ra); 241 } 242 243 void helper_cbo_clean_flush(CPURISCVState *env, target_ulong address) 244 { 245 uintptr_t ra = GETPC(); 246 check_zicbo_envcfg(env, MENVCFG_CBCFE, ra); 247 check_zicbom_access(env, address, ra); 248 249 /* We don't emulate the cache-hierarchy, so we're done. */ 250 } 251 252 void helper_cbo_inval(CPURISCVState *env, target_ulong address) 253 { 254 uintptr_t ra = GETPC(); 255 check_zicbo_envcfg(env, MENVCFG_CBIE, ra); 256 check_zicbom_access(env, address, ra); 257 258 /* We don't emulate the cache-hierarchy, so we're done. */ 259 } 260 261 #ifndef CONFIG_USER_ONLY 262 263 target_ulong helper_sret(CPURISCVState *env) 264 { 265 uint64_t mstatus; 266 target_ulong prev_priv, prev_virt; 267 268 if (!(env->priv >= PRV_S)) { 269 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 270 } 271 272 target_ulong retpc = env->sepc; 273 if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) { 274 riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC()); 275 } 276 277 if (get_field(env->mstatus, MSTATUS_TSR) && !(env->priv >= PRV_M)) { 278 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 279 } 280 281 if (riscv_cpu_virt_enabled(env) && get_field(env->hstatus, HSTATUS_VTSR)) { 282 riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC()); 283 } 284 285 mstatus = env->mstatus; 286 prev_priv = get_field(mstatus, MSTATUS_SPP); 287 mstatus = set_field(mstatus, MSTATUS_SIE, 288 get_field(mstatus, MSTATUS_SPIE)); 289 mstatus = set_field(mstatus, MSTATUS_SPIE, 1); 290 mstatus = set_field(mstatus, MSTATUS_SPP, PRV_U); 291 if (env->priv_ver >= PRIV_VERSION_1_12_0) { 292 mstatus = set_field(mstatus, MSTATUS_MPRV, 0); 293 } 294 env->mstatus = mstatus; 295 296 if (riscv_has_ext(env, RVH) && !riscv_cpu_virt_enabled(env)) { 297 /* We support Hypervisor extensions and virtulisation is disabled */ 298 target_ulong hstatus = env->hstatus; 299 300 prev_virt = get_field(hstatus, HSTATUS_SPV); 301 302 hstatus = set_field(hstatus, HSTATUS_SPV, 0); 303 304 env->hstatus = hstatus; 305 306 if (prev_virt) { 307 riscv_cpu_swap_hypervisor_regs(env); 308 } 309 310 riscv_cpu_set_virt_enabled(env, prev_virt); 311 } 312 313 riscv_cpu_set_mode(env, prev_priv); 314 315 return retpc; 316 } 317 318 target_ulong helper_mret(CPURISCVState *env) 319 { 320 if (!(env->priv >= PRV_M)) { 321 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 322 } 323 324 target_ulong retpc = env->mepc; 325 if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) { 326 riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC()); 327 } 328 329 uint64_t mstatus = env->mstatus; 330 target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP); 331 332 if (riscv_cpu_cfg(env)->pmp && 333 !pmp_get_num_rules(env) && (prev_priv != PRV_M)) { 334 riscv_raise_exception(env, RISCV_EXCP_INST_ACCESS_FAULT, GETPC()); 335 } 336 337 target_ulong prev_virt = get_field(env->mstatus, MSTATUS_MPV); 338 mstatus = set_field(mstatus, MSTATUS_MIE, 339 get_field(mstatus, MSTATUS_MPIE)); 340 mstatus = set_field(mstatus, MSTATUS_MPIE, 1); 341 mstatus = set_field(mstatus, MSTATUS_MPP, PRV_U); 342 mstatus = set_field(mstatus, MSTATUS_MPV, 0); 343 if ((env->priv_ver >= PRIV_VERSION_1_12_0) && (prev_priv != PRV_M)) { 344 mstatus = set_field(mstatus, MSTATUS_MPRV, 0); 345 } 346 env->mstatus = mstatus; 347 riscv_cpu_set_mode(env, prev_priv); 348 349 if (riscv_has_ext(env, RVH)) { 350 if (prev_virt) { 351 riscv_cpu_swap_hypervisor_regs(env); 352 } 353 354 riscv_cpu_set_virt_enabled(env, prev_virt); 355 } 356 357 return retpc; 358 } 359 360 void helper_wfi(CPURISCVState *env) 361 { 362 CPUState *cs = env_cpu(env); 363 bool rvs = riscv_has_ext(env, RVS); 364 bool prv_u = env->priv == PRV_U; 365 bool prv_s = env->priv == PRV_S; 366 367 if (((prv_s || (!rvs && prv_u)) && get_field(env->mstatus, MSTATUS_TW)) || 368 (rvs && prv_u && !riscv_cpu_virt_enabled(env))) { 369 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 370 } else if (riscv_cpu_virt_enabled(env) && (prv_u || 371 (prv_s && get_field(env->hstatus, HSTATUS_VTW)))) { 372 riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC()); 373 } else { 374 cs->halted = 1; 375 cs->exception_index = EXCP_HLT; 376 cpu_loop_exit(cs); 377 } 378 } 379 380 void helper_tlb_flush(CPURISCVState *env) 381 { 382 CPUState *cs = env_cpu(env); 383 if (!(env->priv >= PRV_S) || 384 (env->priv == PRV_S && 385 get_field(env->mstatus, MSTATUS_TVM))) { 386 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 387 } else if (riscv_has_ext(env, RVH) && riscv_cpu_virt_enabled(env) && 388 get_field(env->hstatus, HSTATUS_VTVM)) { 389 riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC()); 390 } else { 391 tlb_flush(cs); 392 } 393 } 394 395 void helper_tlb_flush_all(CPURISCVState *env) 396 { 397 CPUState *cs = env_cpu(env); 398 tlb_flush_all_cpus_synced(cs); 399 } 400 401 void helper_hyp_tlb_flush(CPURISCVState *env) 402 { 403 CPUState *cs = env_cpu(env); 404 405 if (env->priv == PRV_S && riscv_cpu_virt_enabled(env)) { 406 riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC()); 407 } 408 409 if (env->priv == PRV_M || 410 (env->priv == PRV_S && !riscv_cpu_virt_enabled(env))) { 411 tlb_flush(cs); 412 return; 413 } 414 415 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 416 } 417 418 void helper_hyp_gvma_tlb_flush(CPURISCVState *env) 419 { 420 if (env->priv == PRV_S && !riscv_cpu_virt_enabled(env) && 421 get_field(env->mstatus, MSTATUS_TVM)) { 422 riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 423 } 424 425 helper_hyp_tlb_flush(env); 426 } 427 428 target_ulong helper_hyp_hlvx_hu(CPURISCVState *env, target_ulong address) 429 { 430 int mmu_idx = cpu_mmu_index(env, true) | TB_FLAGS_PRIV_HYP_ACCESS_MASK; 431 432 return cpu_lduw_mmuidx_ra(env, address, mmu_idx, GETPC()); 433 } 434 435 target_ulong helper_hyp_hlvx_wu(CPURISCVState *env, target_ulong address) 436 { 437 int mmu_idx = cpu_mmu_index(env, true) | TB_FLAGS_PRIV_HYP_ACCESS_MASK; 438 439 return cpu_ldl_mmuidx_ra(env, address, mmu_idx, GETPC()); 440 } 441 442 #endif /* !CONFIG_USER_ONLY */ 443