1 /* 2 * RISC-V Emulation Helpers for QEMU. 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/log.h" 22 #include "cpu.h" 23 #include "qemu/main-loop.h" 24 #include "exec/exec-all.h" 25 #include "exec/helper-proto.h" 26 27 #ifndef CONFIG_USER_ONLY 28 29 #if defined(TARGET_RISCV32) 30 static const char valid_vm_1_09[16] = { 31 [VM_1_09_MBARE] = 1, 32 [VM_1_09_SV32] = 1, 33 }; 34 static const char valid_vm_1_10[16] = { 35 [VM_1_10_MBARE] = 1, 36 [VM_1_10_SV32] = 1 37 }; 38 #elif defined(TARGET_RISCV64) 39 static const char valid_vm_1_09[16] = { 40 [VM_1_09_MBARE] = 1, 41 [VM_1_09_SV39] = 1, 42 [VM_1_09_SV48] = 1, 43 }; 44 static const char valid_vm_1_10[16] = { 45 [VM_1_10_MBARE] = 1, 46 [VM_1_10_SV39] = 1, 47 [VM_1_10_SV48] = 1, 48 [VM_1_10_SV57] = 1 49 }; 50 #endif 51 52 static int validate_vm(CPURISCVState *env, target_ulong vm) 53 { 54 return (env->priv_ver >= PRIV_VERSION_1_10_0) ? 55 valid_vm_1_10[vm & 0xf] : valid_vm_1_09[vm & 0xf]; 56 } 57 58 #endif 59 60 /* Exceptions processing helpers */ 61 void QEMU_NORETURN do_raise_exception_err(CPURISCVState *env, 62 uint32_t exception, uintptr_t pc) 63 { 64 CPUState *cs = CPU(riscv_env_get_cpu(env)); 65 qemu_log_mask(CPU_LOG_INT, "%s: %d\n", __func__, exception); 66 cs->exception_index = exception; 67 cpu_loop_exit_restore(cs, pc); 68 } 69 70 void helper_raise_exception(CPURISCVState *env, uint32_t exception) 71 { 72 do_raise_exception_err(env, exception, 0); 73 } 74 75 static void validate_mstatus_fs(CPURISCVState *env, uintptr_t ra) 76 { 77 #ifndef CONFIG_USER_ONLY 78 if (!(env->mstatus & MSTATUS_FS)) { 79 do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, ra); 80 } 81 #endif 82 } 83 84 /* 85 * Handle writes to CSRs and any resulting special behavior 86 * 87 * Adapted from Spike's processor_t::set_csr 88 */ 89 void csr_write_helper(CPURISCVState *env, target_ulong val_to_write, 90 target_ulong csrno) 91 { 92 #ifndef CONFIG_USER_ONLY 93 uint64_t delegable_ints = MIP_SSIP | MIP_STIP | MIP_SEIP; 94 uint64_t all_ints = delegable_ints | MIP_MSIP | MIP_MTIP; 95 #endif 96 97 switch (csrno) { 98 case CSR_FFLAGS: 99 validate_mstatus_fs(env, GETPC()); 100 cpu_riscv_set_fflags(env, val_to_write & (FSR_AEXC >> FSR_AEXC_SHIFT)); 101 break; 102 case CSR_FRM: 103 validate_mstatus_fs(env, GETPC()); 104 env->frm = val_to_write & (FSR_RD >> FSR_RD_SHIFT); 105 break; 106 case CSR_FCSR: 107 validate_mstatus_fs(env, GETPC()); 108 env->frm = (val_to_write & FSR_RD) >> FSR_RD_SHIFT; 109 cpu_riscv_set_fflags(env, (val_to_write & FSR_AEXC) >> FSR_AEXC_SHIFT); 110 break; 111 #ifndef CONFIG_USER_ONLY 112 case CSR_MSTATUS: { 113 target_ulong mstatus = env->mstatus; 114 target_ulong mask = 0; 115 target_ulong mpp = get_field(val_to_write, MSTATUS_MPP); 116 117 /* flush tlb on mstatus fields that affect VM */ 118 if (env->priv_ver <= PRIV_VERSION_1_09_1) { 119 if ((val_to_write ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP | 120 MSTATUS_MPRV | MSTATUS_SUM | MSTATUS_VM)) { 121 helper_tlb_flush(env); 122 } 123 mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE | 124 MSTATUS_SPP | MSTATUS_FS | MSTATUS_MPRV | MSTATUS_SUM | 125 MSTATUS_MPP | MSTATUS_MXR | 126 (validate_vm(env, get_field(val_to_write, MSTATUS_VM)) ? 127 MSTATUS_VM : 0); 128 } 129 if (env->priv_ver >= PRIV_VERSION_1_10_0) { 130 if ((val_to_write ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP | 131 MSTATUS_MPRV | MSTATUS_SUM)) { 132 helper_tlb_flush(env); 133 } 134 mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE | 135 MSTATUS_SPP | MSTATUS_FS | MSTATUS_MPRV | MSTATUS_SUM | 136 MSTATUS_MPP | MSTATUS_MXR; 137 } 138 139 /* silenty discard mstatus.mpp writes for unsupported modes */ 140 if (mpp == PRV_H || 141 (!riscv_has_ext(env, RVS) && mpp == PRV_S) || 142 (!riscv_has_ext(env, RVU) && mpp == PRV_U)) { 143 mask &= ~MSTATUS_MPP; 144 } 145 146 mstatus = (mstatus & ~mask) | (val_to_write & mask); 147 148 /* Note: this is a workaround for an issue where mstatus.FS 149 does not report dirty after floating point operations 150 that modify floating point state. This workaround is 151 technically compliant with the RISC-V Privileged 152 specification as it is legal to return only off, or dirty. 153 at the expense of extra floating point save/restore. */ 154 155 /* FP is always dirty or off */ 156 if (mstatus & MSTATUS_FS) { 157 mstatus |= MSTATUS_FS; 158 } 159 160 int dirty = ((mstatus & MSTATUS_FS) == MSTATUS_FS) | 161 ((mstatus & MSTATUS_XS) == MSTATUS_XS); 162 mstatus = set_field(mstatus, MSTATUS_SD, dirty); 163 env->mstatus = mstatus; 164 break; 165 } 166 case CSR_MIP: { 167 /* 168 * Since the writeable bits in MIP are not set asynchrously by the 169 * CLINT, no additional locking is needed for read-modifiy-write 170 * CSR operations 171 */ 172 qemu_mutex_lock_iothread(); 173 RISCVCPU *cpu = riscv_env_get_cpu(env); 174 riscv_cpu_update_mip(cpu, MIP_SSIP | MIP_STIP, 175 (val_to_write & (MIP_SSIP | MIP_STIP))); 176 /* 177 * csrs, csrc on mip.SEIP is not decomposable into separate read and 178 * write steps, so a different implementation is needed 179 */ 180 qemu_mutex_unlock_iothread(); 181 break; 182 } 183 case CSR_MIE: { 184 env->mie = (env->mie & ~all_ints) | 185 (val_to_write & all_ints); 186 break; 187 } 188 case CSR_MIDELEG: 189 env->mideleg = (env->mideleg & ~delegable_ints) 190 | (val_to_write & delegable_ints); 191 break; 192 case CSR_MEDELEG: { 193 target_ulong mask = 0; 194 mask |= 1ULL << (RISCV_EXCP_INST_ADDR_MIS); 195 mask |= 1ULL << (RISCV_EXCP_INST_ACCESS_FAULT); 196 mask |= 1ULL << (RISCV_EXCP_ILLEGAL_INST); 197 mask |= 1ULL << (RISCV_EXCP_BREAKPOINT); 198 mask |= 1ULL << (RISCV_EXCP_LOAD_ADDR_MIS); 199 mask |= 1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT); 200 mask |= 1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS); 201 mask |= 1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT); 202 mask |= 1ULL << (RISCV_EXCP_U_ECALL); 203 mask |= 1ULL << (RISCV_EXCP_S_ECALL); 204 mask |= 1ULL << (RISCV_EXCP_H_ECALL); 205 mask |= 1ULL << (RISCV_EXCP_M_ECALL); 206 mask |= 1ULL << (RISCV_EXCP_INST_PAGE_FAULT); 207 mask |= 1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT); 208 mask |= 1ULL << (RISCV_EXCP_STORE_PAGE_FAULT); 209 env->medeleg = (env->medeleg & ~mask) 210 | (val_to_write & mask); 211 break; 212 } 213 case CSR_MINSTRET: 214 /* minstret is WARL so unsupported writes are ignored */ 215 break; 216 case CSR_MCYCLE: 217 /* mcycle is WARL so unsupported writes are ignored */ 218 break; 219 #if defined(TARGET_RISCV32) 220 case CSR_MINSTRETH: 221 /* minstreth is WARL so unsupported writes are ignored */ 222 break; 223 case CSR_MCYCLEH: 224 /* mcycleh is WARL so unsupported writes are ignored */ 225 break; 226 #endif 227 case CSR_MUCOUNTEREN: 228 if (env->priv_ver <= PRIV_VERSION_1_09_1) { 229 env->scounteren = val_to_write; 230 break; 231 } else { 232 goto do_illegal; 233 } 234 case CSR_MSCOUNTEREN: 235 if (env->priv_ver <= PRIV_VERSION_1_09_1) { 236 env->mcounteren = val_to_write; 237 break; 238 } else { 239 goto do_illegal; 240 } 241 case CSR_SSTATUS: { 242 target_ulong ms = env->mstatus; 243 target_ulong mask = SSTATUS_SIE | SSTATUS_SPIE | SSTATUS_UIE 244 | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS 245 | SSTATUS_SUM | SSTATUS_SD; 246 if (env->priv_ver >= PRIV_VERSION_1_10_0) { 247 mask |= SSTATUS_MXR; 248 } 249 ms = (ms & ~mask) | (val_to_write & mask); 250 csr_write_helper(env, ms, CSR_MSTATUS); 251 break; 252 } 253 case CSR_SIP: { 254 qemu_mutex_lock_iothread(); 255 target_ulong next_mip = (env->mip & ~env->mideleg) 256 | (val_to_write & env->mideleg); 257 qemu_mutex_unlock_iothread(); 258 csr_write_helper(env, next_mip, CSR_MIP); 259 break; 260 } 261 case CSR_SIE: { 262 target_ulong next_mie = (env->mie & ~env->mideleg) 263 | (val_to_write & env->mideleg); 264 csr_write_helper(env, next_mie, CSR_MIE); 265 break; 266 } 267 case CSR_SATP: /* CSR_SPTBR */ { 268 if (!riscv_feature(env, RISCV_FEATURE_MMU)) { 269 break; 270 } 271 if (env->priv_ver <= PRIV_VERSION_1_09_1 && (val_to_write ^ env->sptbr)) 272 { 273 helper_tlb_flush(env); 274 env->sptbr = val_to_write & (((target_ulong) 275 1 << (TARGET_PHYS_ADDR_SPACE_BITS - PGSHIFT)) - 1); 276 } 277 if (env->priv_ver >= PRIV_VERSION_1_10_0 && 278 validate_vm(env, get_field(val_to_write, SATP_MODE)) && 279 ((val_to_write ^ env->satp) & (SATP_MODE | SATP_ASID | SATP_PPN))) 280 { 281 helper_tlb_flush(env); 282 env->satp = val_to_write; 283 } 284 break; 285 } 286 case CSR_SEPC: 287 env->sepc = val_to_write; 288 break; 289 case CSR_STVEC: 290 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */ 291 if ((val_to_write & 3) == 0) { 292 env->stvec = val_to_write >> 2 << 2; 293 } else { 294 qemu_log_mask(LOG_UNIMP, 295 "CSR_STVEC: vectored traps not supported\n"); 296 } 297 break; 298 case CSR_SCOUNTEREN: 299 if (env->priv_ver >= PRIV_VERSION_1_10_0) { 300 env->scounteren = val_to_write; 301 break; 302 } else { 303 goto do_illegal; 304 } 305 case CSR_SSCRATCH: 306 env->sscratch = val_to_write; 307 break; 308 case CSR_SCAUSE: 309 env->scause = val_to_write; 310 break; 311 case CSR_SBADADDR: 312 env->sbadaddr = val_to_write; 313 break; 314 case CSR_MEPC: 315 env->mepc = val_to_write; 316 break; 317 case CSR_MTVEC: 318 /* bits [1:0] indicate mode; 0 = direct, 1 = vectored, 2 >= reserved */ 319 if ((val_to_write & 3) == 0) { 320 env->mtvec = val_to_write >> 2 << 2; 321 } else { 322 qemu_log_mask(LOG_UNIMP, 323 "CSR_MTVEC: vectored traps not supported\n"); 324 } 325 break; 326 case CSR_MCOUNTEREN: 327 if (env->priv_ver >= PRIV_VERSION_1_10_0) { 328 env->mcounteren = val_to_write; 329 break; 330 } else { 331 goto do_illegal; 332 } 333 case CSR_MSCRATCH: 334 env->mscratch = val_to_write; 335 break; 336 case CSR_MCAUSE: 337 env->mcause = val_to_write; 338 break; 339 case CSR_MBADADDR: 340 env->mbadaddr = val_to_write; 341 break; 342 case CSR_MISA: 343 /* misa is WARL so unsupported writes are ignored */ 344 break; 345 case CSR_PMPCFG0: 346 case CSR_PMPCFG1: 347 case CSR_PMPCFG2: 348 case CSR_PMPCFG3: 349 pmpcfg_csr_write(env, csrno - CSR_PMPCFG0, val_to_write); 350 break; 351 case CSR_PMPADDR0: 352 case CSR_PMPADDR1: 353 case CSR_PMPADDR2: 354 case CSR_PMPADDR3: 355 case CSR_PMPADDR4: 356 case CSR_PMPADDR5: 357 case CSR_PMPADDR6: 358 case CSR_PMPADDR7: 359 case CSR_PMPADDR8: 360 case CSR_PMPADDR9: 361 case CSR_PMPADDR10: 362 case CSR_PMPADDR11: 363 case CSR_PMPADDR12: 364 case CSR_PMPADDR13: 365 case CSR_PMPADDR14: 366 case CSR_PMPADDR15: 367 pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val_to_write); 368 break; 369 #endif 370 #if !defined(CONFIG_USER_ONLY) 371 do_illegal: 372 #endif 373 default: 374 do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 375 } 376 } 377 378 /* 379 * Handle reads to CSRs and any resulting special behavior 380 * 381 * Adapted from Spike's processor_t::get_csr 382 */ 383 target_ulong csr_read_helper(CPURISCVState *env, target_ulong csrno) 384 { 385 #ifndef CONFIG_USER_ONLY 386 target_ulong ctr_en = env->priv == PRV_U ? env->scounteren : 387 env->priv == PRV_S ? env->mcounteren : -1U; 388 #else 389 target_ulong ctr_en = -1; 390 #endif 391 target_ulong ctr_ok = (ctr_en >> (csrno & 31)) & 1; 392 393 if (csrno >= CSR_HPMCOUNTER3 && csrno <= CSR_HPMCOUNTER31) { 394 if (ctr_ok) { 395 return 0; 396 } 397 } 398 #if defined(TARGET_RISCV32) 399 if (csrno >= CSR_HPMCOUNTER3H && csrno <= CSR_HPMCOUNTER31H) { 400 if (ctr_ok) { 401 return 0; 402 } 403 } 404 #endif 405 if (csrno >= CSR_MHPMCOUNTER3 && csrno <= CSR_MHPMCOUNTER31) { 406 return 0; 407 } 408 #if defined(TARGET_RISCV32) 409 if (csrno >= CSR_MHPMCOUNTER3 && csrno <= CSR_MHPMCOUNTER31) { 410 return 0; 411 } 412 #endif 413 if (csrno >= CSR_MHPMEVENT3 && csrno <= CSR_MHPMEVENT31) { 414 return 0; 415 } 416 417 switch (csrno) { 418 case CSR_FFLAGS: 419 validate_mstatus_fs(env, GETPC()); 420 return cpu_riscv_get_fflags(env); 421 case CSR_FRM: 422 validate_mstatus_fs(env, GETPC()); 423 return env->frm; 424 case CSR_FCSR: 425 validate_mstatus_fs(env, GETPC()); 426 return (cpu_riscv_get_fflags(env) << FSR_AEXC_SHIFT) 427 | (env->frm << FSR_RD_SHIFT); 428 /* rdtime/rdtimeh is trapped and emulated by bbl in system mode */ 429 #ifdef CONFIG_USER_ONLY 430 case CSR_TIME: 431 return cpu_get_host_ticks(); 432 #if defined(TARGET_RISCV32) 433 case CSR_TIMEH: 434 return cpu_get_host_ticks() >> 32; 435 #endif 436 #endif 437 case CSR_INSTRET: 438 case CSR_CYCLE: 439 if (ctr_ok) { 440 #if !defined(CONFIG_USER_ONLY) 441 if (use_icount) { 442 return cpu_get_icount(); 443 } else { 444 return cpu_get_host_ticks(); 445 } 446 #else 447 return cpu_get_host_ticks(); 448 #endif 449 } 450 break; 451 #if defined(TARGET_RISCV32) 452 case CSR_INSTRETH: 453 case CSR_CYCLEH: 454 if (ctr_ok) { 455 #if !defined(CONFIG_USER_ONLY) 456 if (use_icount) { 457 return cpu_get_icount() >> 32; 458 } else { 459 return cpu_get_host_ticks() >> 32; 460 } 461 #else 462 return cpu_get_host_ticks() >> 32; 463 #endif 464 } 465 break; 466 #endif 467 #ifndef CONFIG_USER_ONLY 468 case CSR_MINSTRET: 469 case CSR_MCYCLE: 470 if (use_icount) { 471 return cpu_get_icount(); 472 } else { 473 return cpu_get_host_ticks(); 474 } 475 case CSR_MINSTRETH: 476 case CSR_MCYCLEH: 477 #if defined(TARGET_RISCV32) 478 if (use_icount) { 479 return cpu_get_icount() >> 32; 480 } else { 481 return cpu_get_host_ticks() >> 32; 482 } 483 #endif 484 break; 485 case CSR_MUCOUNTEREN: 486 if (env->priv_ver <= PRIV_VERSION_1_09_1) { 487 return env->scounteren; 488 } else { 489 break; /* illegal instruction */ 490 } 491 case CSR_MSCOUNTEREN: 492 if (env->priv_ver <= PRIV_VERSION_1_09_1) { 493 return env->mcounteren; 494 } else { 495 break; /* illegal instruction */ 496 } 497 case CSR_SSTATUS: { 498 target_ulong mask = SSTATUS_SIE | SSTATUS_SPIE | SSTATUS_UIE 499 | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS 500 | SSTATUS_SUM | SSTATUS_SD; 501 if (env->priv_ver >= PRIV_VERSION_1_10_0) { 502 mask |= SSTATUS_MXR; 503 } 504 return env->mstatus & mask; 505 } 506 case CSR_SIP: { 507 qemu_mutex_lock_iothread(); 508 target_ulong tmp = env->mip & env->mideleg; 509 qemu_mutex_unlock_iothread(); 510 return tmp; 511 } 512 case CSR_SIE: 513 return env->mie & env->mideleg; 514 case CSR_SEPC: 515 return env->sepc; 516 case CSR_SBADADDR: 517 return env->sbadaddr; 518 case CSR_STVEC: 519 return env->stvec; 520 case CSR_SCOUNTEREN: 521 if (env->priv_ver >= PRIV_VERSION_1_10_0) { 522 return env->scounteren; 523 } else { 524 break; /* illegal instruction */ 525 } 526 case CSR_SCAUSE: 527 return env->scause; 528 case CSR_SATP: /* CSR_SPTBR */ 529 if (!riscv_feature(env, RISCV_FEATURE_MMU)) { 530 return 0; 531 } 532 if (env->priv_ver >= PRIV_VERSION_1_10_0) { 533 return env->satp; 534 } else { 535 return env->sptbr; 536 } 537 case CSR_SSCRATCH: 538 return env->sscratch; 539 case CSR_MSTATUS: 540 return env->mstatus; 541 case CSR_MIP: { 542 qemu_mutex_lock_iothread(); 543 target_ulong tmp = env->mip; 544 qemu_mutex_unlock_iothread(); 545 return tmp; 546 } 547 case CSR_MIE: 548 return env->mie; 549 case CSR_MEPC: 550 return env->mepc; 551 case CSR_MSCRATCH: 552 return env->mscratch; 553 case CSR_MCAUSE: 554 return env->mcause; 555 case CSR_MBADADDR: 556 return env->mbadaddr; 557 case CSR_MISA: 558 return env->misa; 559 case CSR_MARCHID: 560 return 0; /* as spike does */ 561 case CSR_MIMPID: 562 return 0; /* as spike does */ 563 case CSR_MVENDORID: 564 return 0; /* as spike does */ 565 case CSR_MHARTID: 566 return env->mhartid; 567 case CSR_MTVEC: 568 return env->mtvec; 569 case CSR_MCOUNTEREN: 570 if (env->priv_ver >= PRIV_VERSION_1_10_0) { 571 return env->mcounteren; 572 } else { 573 break; /* illegal instruction */ 574 } 575 case CSR_MEDELEG: 576 return env->medeleg; 577 case CSR_MIDELEG: 578 return env->mideleg; 579 case CSR_PMPCFG0: 580 case CSR_PMPCFG1: 581 case CSR_PMPCFG2: 582 case CSR_PMPCFG3: 583 return pmpcfg_csr_read(env, csrno - CSR_PMPCFG0); 584 case CSR_PMPADDR0: 585 case CSR_PMPADDR1: 586 case CSR_PMPADDR2: 587 case CSR_PMPADDR3: 588 case CSR_PMPADDR4: 589 case CSR_PMPADDR5: 590 case CSR_PMPADDR6: 591 case CSR_PMPADDR7: 592 case CSR_PMPADDR8: 593 case CSR_PMPADDR9: 594 case CSR_PMPADDR10: 595 case CSR_PMPADDR11: 596 case CSR_PMPADDR12: 597 case CSR_PMPADDR13: 598 case CSR_PMPADDR14: 599 case CSR_PMPADDR15: 600 return pmpaddr_csr_read(env, csrno - CSR_PMPADDR0); 601 #endif 602 } 603 /* used by e.g. MTIME read */ 604 do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 605 } 606 607 /* 608 * Check that CSR access is allowed. 609 * 610 * Adapted from Spike's decode.h:validate_csr 611 */ 612 static void validate_csr(CPURISCVState *env, uint64_t which, 613 uint64_t write, uintptr_t ra) 614 { 615 #ifndef CONFIG_USER_ONLY 616 unsigned csr_priv = get_field((which), 0x300); 617 unsigned csr_read_only = get_field((which), 0xC00) == 3; 618 if (((write) && csr_read_only) || (env->priv < csr_priv)) { 619 do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, ra); 620 } 621 #endif 622 } 623 624 target_ulong helper_csrrw(CPURISCVState *env, target_ulong src, 625 target_ulong csr) 626 { 627 validate_csr(env, csr, 1, GETPC()); 628 uint64_t csr_backup = csr_read_helper(env, csr); 629 csr_write_helper(env, src, csr); 630 return csr_backup; 631 } 632 633 target_ulong helper_csrrs(CPURISCVState *env, target_ulong src, 634 target_ulong csr, target_ulong rs1_pass) 635 { 636 validate_csr(env, csr, rs1_pass != 0, GETPC()); 637 uint64_t csr_backup = csr_read_helper(env, csr); 638 if (rs1_pass != 0) { 639 csr_write_helper(env, src | csr_backup, csr); 640 } 641 return csr_backup; 642 } 643 644 target_ulong helper_csrrc(CPURISCVState *env, target_ulong src, 645 target_ulong csr, target_ulong rs1_pass) 646 { 647 validate_csr(env, csr, rs1_pass != 0, GETPC()); 648 uint64_t csr_backup = csr_read_helper(env, csr); 649 if (rs1_pass != 0) { 650 csr_write_helper(env, (~src) & csr_backup, csr); 651 } 652 return csr_backup; 653 } 654 655 #ifndef CONFIG_USER_ONLY 656 657 target_ulong helper_sret(CPURISCVState *env, target_ulong cpu_pc_deb) 658 { 659 if (!(env->priv >= PRV_S)) { 660 do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 661 } 662 663 target_ulong retpc = env->sepc; 664 if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) { 665 do_raise_exception_err(env, RISCV_EXCP_INST_ADDR_MIS, GETPC()); 666 } 667 668 target_ulong mstatus = env->mstatus; 669 target_ulong prev_priv = get_field(mstatus, MSTATUS_SPP); 670 mstatus = set_field(mstatus, 671 env->priv_ver >= PRIV_VERSION_1_10_0 ? 672 MSTATUS_SIE : MSTATUS_UIE << prev_priv, 673 get_field(mstatus, MSTATUS_SPIE)); 674 mstatus = set_field(mstatus, MSTATUS_SPIE, 0); 675 mstatus = set_field(mstatus, MSTATUS_SPP, PRV_U); 676 riscv_set_mode(env, prev_priv); 677 csr_write_helper(env, mstatus, CSR_MSTATUS); 678 679 return retpc; 680 } 681 682 target_ulong helper_mret(CPURISCVState *env, target_ulong cpu_pc_deb) 683 { 684 if (!(env->priv >= PRV_M)) { 685 do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 686 } 687 688 target_ulong retpc = env->mepc; 689 if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) { 690 do_raise_exception_err(env, RISCV_EXCP_INST_ADDR_MIS, GETPC()); 691 } 692 693 target_ulong mstatus = env->mstatus; 694 target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP); 695 mstatus = set_field(mstatus, 696 env->priv_ver >= PRIV_VERSION_1_10_0 ? 697 MSTATUS_MIE : MSTATUS_UIE << prev_priv, 698 get_field(mstatus, MSTATUS_MPIE)); 699 mstatus = set_field(mstatus, MSTATUS_MPIE, 0); 700 mstatus = set_field(mstatus, MSTATUS_MPP, PRV_U); 701 riscv_set_mode(env, prev_priv); 702 csr_write_helper(env, mstatus, CSR_MSTATUS); 703 704 return retpc; 705 } 706 707 void helper_wfi(CPURISCVState *env) 708 { 709 CPUState *cs = CPU(riscv_env_get_cpu(env)); 710 711 cs->halted = 1; 712 cs->exception_index = EXCP_HLT; 713 cpu_loop_exit(cs); 714 } 715 716 void helper_tlb_flush(CPURISCVState *env) 717 { 718 RISCVCPU *cpu = riscv_env_get_cpu(env); 719 CPUState *cs = CPU(cpu); 720 tlb_flush(cs); 721 } 722 723 #endif /* !CONFIG_USER_ONLY */ 724