1 /* 2 * RISC-V Emulation Helpers for QEMU. 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/log.h" 22 #include "cpu.h" 23 #include "qemu/main-loop.h" 24 #include "exec/exec-all.h" 25 #include "exec/helper-proto.h" 26 27 #ifndef CONFIG_USER_ONLY 28 29 #if defined(TARGET_RISCV32) 30 static const char valid_vm_1_09[16] = { 31 [VM_1_09_MBARE] = 1, 32 [VM_1_09_SV32] = 1, 33 }; 34 static const char valid_vm_1_10[16] = { 35 [VM_1_10_MBARE] = 1, 36 [VM_1_10_SV32] = 1 37 }; 38 #elif defined(TARGET_RISCV64) 39 static const char valid_vm_1_09[16] = { 40 [VM_1_09_MBARE] = 1, 41 [VM_1_09_SV39] = 1, 42 [VM_1_09_SV48] = 1, 43 }; 44 static const char valid_vm_1_10[16] = { 45 [VM_1_10_MBARE] = 1, 46 [VM_1_10_SV39] = 1, 47 [VM_1_10_SV48] = 1, 48 [VM_1_10_SV57] = 1 49 }; 50 #endif 51 52 static int validate_vm(CPURISCVState *env, target_ulong vm) 53 { 54 return (env->priv_ver >= PRIV_VERSION_1_10_0) ? 55 valid_vm_1_10[vm & 0xf] : valid_vm_1_09[vm & 0xf]; 56 } 57 58 #endif 59 60 /* Exceptions processing helpers */ 61 void QEMU_NORETURN do_raise_exception_err(CPURISCVState *env, 62 uint32_t exception, uintptr_t pc) 63 { 64 CPUState *cs = CPU(riscv_env_get_cpu(env)); 65 qemu_log_mask(CPU_LOG_INT, "%s: %d\n", __func__, exception); 66 cs->exception_index = exception; 67 cpu_loop_exit_restore(cs, pc); 68 } 69 70 void helper_raise_exception(CPURISCVState *env, uint32_t exception) 71 { 72 do_raise_exception_err(env, exception, 0); 73 } 74 75 static void validate_mstatus_fs(CPURISCVState *env, uintptr_t ra) 76 { 77 #ifndef CONFIG_USER_ONLY 78 if (!(env->mstatus & MSTATUS_FS)) { 79 do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, ra); 80 } 81 #endif 82 } 83 84 /* 85 * Handle writes to CSRs and any resulting special behavior 86 * 87 * Adapted from Spike's processor_t::set_csr 88 */ 89 void csr_write_helper(CPURISCVState *env, target_ulong val_to_write, 90 target_ulong csrno) 91 { 92 #ifndef CONFIG_USER_ONLY 93 uint64_t delegable_ints = MIP_SSIP | MIP_STIP | MIP_SEIP | (1 << IRQ_X_COP); 94 uint64_t all_ints = delegable_ints | MIP_MSIP | MIP_MTIP; 95 #endif 96 97 switch (csrno) { 98 case CSR_FFLAGS: 99 validate_mstatus_fs(env, GETPC()); 100 cpu_riscv_set_fflags(env, val_to_write & (FSR_AEXC >> FSR_AEXC_SHIFT)); 101 break; 102 case CSR_FRM: 103 validate_mstatus_fs(env, GETPC()); 104 env->frm = val_to_write & (FSR_RD >> FSR_RD_SHIFT); 105 break; 106 case CSR_FCSR: 107 validate_mstatus_fs(env, GETPC()); 108 env->frm = (val_to_write & FSR_RD) >> FSR_RD_SHIFT; 109 cpu_riscv_set_fflags(env, (val_to_write & FSR_AEXC) >> FSR_AEXC_SHIFT); 110 break; 111 #ifndef CONFIG_USER_ONLY 112 case CSR_MSTATUS: { 113 target_ulong mstatus = env->mstatus; 114 target_ulong mask = 0; 115 target_ulong mpp = get_field(val_to_write, MSTATUS_MPP); 116 117 /* flush tlb on mstatus fields that affect VM */ 118 if (env->priv_ver <= PRIV_VERSION_1_09_1) { 119 if ((val_to_write ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP | 120 MSTATUS_MPRV | MSTATUS_SUM | MSTATUS_VM)) { 121 helper_tlb_flush(env); 122 } 123 mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE | 124 MSTATUS_SPP | MSTATUS_FS | MSTATUS_MPRV | MSTATUS_SUM | 125 MSTATUS_MPP | MSTATUS_MXR | 126 (validate_vm(env, get_field(val_to_write, MSTATUS_VM)) ? 127 MSTATUS_VM : 0); 128 } 129 if (env->priv_ver >= PRIV_VERSION_1_10_0) { 130 if ((val_to_write ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP | 131 MSTATUS_MPRV | MSTATUS_SUM)) { 132 helper_tlb_flush(env); 133 } 134 mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE | 135 MSTATUS_SPP | MSTATUS_FS | MSTATUS_MPRV | MSTATUS_SUM | 136 MSTATUS_MPP | MSTATUS_MXR; 137 } 138 139 /* silenty discard mstatus.mpp writes for unsupported modes */ 140 if (mpp == PRV_H || 141 (!riscv_has_ext(env, RVS) && mpp == PRV_S) || 142 (!riscv_has_ext(env, RVU) && mpp == PRV_U)) { 143 mask &= ~MSTATUS_MPP; 144 } 145 146 mstatus = (mstatus & ~mask) | (val_to_write & mask); 147 148 /* Note: this is a workaround for an issue where mstatus.FS 149 does not report dirty after floating point operations 150 that modify floating point state. This workaround is 151 technically compliant with the RISC-V Privileged 152 specification as it is legal to return only off, or dirty. 153 at the expense of extra floating point save/restore. */ 154 155 /* FP is always dirty or off */ 156 if (mstatus & MSTATUS_FS) { 157 mstatus |= MSTATUS_FS; 158 } 159 160 int dirty = ((mstatus & MSTATUS_FS) == MSTATUS_FS) | 161 ((mstatus & MSTATUS_XS) == MSTATUS_XS); 162 mstatus = set_field(mstatus, MSTATUS_SD, dirty); 163 env->mstatus = mstatus; 164 break; 165 } 166 case CSR_MIP: { 167 /* 168 * Since the writeable bits in MIP are not set asynchrously by the 169 * CLINT, no additional locking is needed for read-modifiy-write 170 * CSR operations 171 */ 172 qemu_mutex_lock_iothread(); 173 RISCVCPU *cpu = riscv_env_get_cpu(env); 174 riscv_set_local_interrupt(cpu, MIP_SSIP, 175 (val_to_write & MIP_SSIP) != 0); 176 riscv_set_local_interrupt(cpu, MIP_STIP, 177 (val_to_write & MIP_STIP) != 0); 178 /* 179 * csrs, csrc on mip.SEIP is not decomposable into separate read and 180 * write steps, so a different implementation is needed 181 */ 182 qemu_mutex_unlock_iothread(); 183 break; 184 } 185 case CSR_MIE: { 186 env->mie = (env->mie & ~all_ints) | 187 (val_to_write & all_ints); 188 break; 189 } 190 case CSR_MIDELEG: 191 env->mideleg = (env->mideleg & ~delegable_ints) 192 | (val_to_write & delegable_ints); 193 break; 194 case CSR_MEDELEG: { 195 target_ulong mask = 0; 196 mask |= 1ULL << (RISCV_EXCP_INST_ADDR_MIS); 197 mask |= 1ULL << (RISCV_EXCP_INST_ACCESS_FAULT); 198 mask |= 1ULL << (RISCV_EXCP_ILLEGAL_INST); 199 mask |= 1ULL << (RISCV_EXCP_BREAKPOINT); 200 mask |= 1ULL << (RISCV_EXCP_LOAD_ADDR_MIS); 201 mask |= 1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT); 202 mask |= 1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS); 203 mask |= 1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT); 204 mask |= 1ULL << (RISCV_EXCP_U_ECALL); 205 mask |= 1ULL << (RISCV_EXCP_S_ECALL); 206 mask |= 1ULL << (RISCV_EXCP_H_ECALL); 207 mask |= 1ULL << (RISCV_EXCP_M_ECALL); 208 mask |= 1ULL << (RISCV_EXCP_INST_PAGE_FAULT); 209 mask |= 1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT); 210 mask |= 1ULL << (RISCV_EXCP_STORE_PAGE_FAULT); 211 env->medeleg = (env->medeleg & ~mask) 212 | (val_to_write & mask); 213 break; 214 } 215 case CSR_MINSTRET: 216 /* minstret is WARL so unsupported writes are ignored */ 217 break; 218 case CSR_MCYCLE: 219 /* mcycle is WARL so unsupported writes are ignored */ 220 break; 221 #if defined(TARGET_RISCV32) 222 case CSR_MINSTRETH: 223 /* minstreth is WARL so unsupported writes are ignored */ 224 break; 225 case CSR_MCYCLEH: 226 /* mcycleh is WARL so unsupported writes are ignored */ 227 break; 228 #endif 229 case CSR_MUCOUNTEREN: 230 if (env->priv_ver <= PRIV_VERSION_1_09_1) { 231 env->scounteren = val_to_write; 232 break; 233 } else { 234 goto do_illegal; 235 } 236 case CSR_MSCOUNTEREN: 237 if (env->priv_ver <= PRIV_VERSION_1_09_1) { 238 env->mcounteren = val_to_write; 239 break; 240 } else { 241 goto do_illegal; 242 } 243 case CSR_SSTATUS: { 244 target_ulong ms = env->mstatus; 245 target_ulong mask = SSTATUS_SIE | SSTATUS_SPIE | SSTATUS_UIE 246 | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS 247 | SSTATUS_SUM | SSTATUS_SD; 248 if (env->priv_ver >= PRIV_VERSION_1_10_0) { 249 mask |= SSTATUS_MXR; 250 } 251 ms = (ms & ~mask) | (val_to_write & mask); 252 csr_write_helper(env, ms, CSR_MSTATUS); 253 break; 254 } 255 case CSR_SIP: { 256 qemu_mutex_lock_iothread(); 257 target_ulong next_mip = (env->mip & ~env->mideleg) 258 | (val_to_write & env->mideleg); 259 qemu_mutex_unlock_iothread(); 260 csr_write_helper(env, next_mip, CSR_MIP); 261 break; 262 } 263 case CSR_SIE: { 264 target_ulong next_mie = (env->mie & ~env->mideleg) 265 | (val_to_write & env->mideleg); 266 csr_write_helper(env, next_mie, CSR_MIE); 267 break; 268 } 269 case CSR_SATP: /* CSR_SPTBR */ { 270 if (!riscv_feature(env, RISCV_FEATURE_MMU)) { 271 break; 272 } 273 if (env->priv_ver <= PRIV_VERSION_1_09_1 && (val_to_write ^ env->sptbr)) 274 { 275 helper_tlb_flush(env); 276 env->sptbr = val_to_write & (((target_ulong) 277 1 << (TARGET_PHYS_ADDR_SPACE_BITS - PGSHIFT)) - 1); 278 } 279 if (env->priv_ver >= PRIV_VERSION_1_10_0 && 280 validate_vm(env, get_field(val_to_write, SATP_MODE)) && 281 ((val_to_write ^ env->satp) & (SATP_MODE | SATP_ASID | SATP_PPN))) 282 { 283 helper_tlb_flush(env); 284 env->satp = val_to_write; 285 } 286 break; 287 } 288 case CSR_SEPC: 289 env->sepc = val_to_write; 290 break; 291 case CSR_STVEC: 292 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */ 293 if ((val_to_write & 3) == 0) { 294 env->stvec = val_to_write >> 2 << 2; 295 } else { 296 qemu_log_mask(LOG_UNIMP, 297 "CSR_STVEC: vectored traps not supported\n"); 298 } 299 break; 300 case CSR_SCOUNTEREN: 301 if (env->priv_ver >= PRIV_VERSION_1_10_0) { 302 env->scounteren = val_to_write; 303 break; 304 } else { 305 goto do_illegal; 306 } 307 case CSR_SSCRATCH: 308 env->sscratch = val_to_write; 309 break; 310 case CSR_SCAUSE: 311 env->scause = val_to_write; 312 break; 313 case CSR_SBADADDR: 314 env->sbadaddr = val_to_write; 315 break; 316 case CSR_MEPC: 317 env->mepc = val_to_write; 318 break; 319 case CSR_MTVEC: 320 /* bits [1:0] indicate mode; 0 = direct, 1 = vectored, 2 >= reserved */ 321 if ((val_to_write & 3) == 0) { 322 env->mtvec = val_to_write >> 2 << 2; 323 } else { 324 qemu_log_mask(LOG_UNIMP, 325 "CSR_MTVEC: vectored traps not supported\n"); 326 } 327 break; 328 case CSR_MCOUNTEREN: 329 if (env->priv_ver >= PRIV_VERSION_1_10_0) { 330 env->mcounteren = val_to_write; 331 break; 332 } else { 333 goto do_illegal; 334 } 335 case CSR_MSCRATCH: 336 env->mscratch = val_to_write; 337 break; 338 case CSR_MCAUSE: 339 env->mcause = val_to_write; 340 break; 341 case CSR_MBADADDR: 342 env->mbadaddr = val_to_write; 343 break; 344 case CSR_MISA: 345 /* misa is WARL so unsupported writes are ignored */ 346 break; 347 case CSR_PMPCFG0: 348 case CSR_PMPCFG1: 349 case CSR_PMPCFG2: 350 case CSR_PMPCFG3: 351 pmpcfg_csr_write(env, csrno - CSR_PMPCFG0, val_to_write); 352 break; 353 case CSR_PMPADDR0: 354 case CSR_PMPADDR1: 355 case CSR_PMPADDR2: 356 case CSR_PMPADDR3: 357 case CSR_PMPADDR4: 358 case CSR_PMPADDR5: 359 case CSR_PMPADDR6: 360 case CSR_PMPADDR7: 361 case CSR_PMPADDR8: 362 case CSR_PMPADDR9: 363 case CSR_PMPADDR10: 364 case CSR_PMPADDR11: 365 case CSR_PMPADDR12: 366 case CSR_PMPADDR13: 367 case CSR_PMPADDR14: 368 case CSR_PMPADDR15: 369 pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val_to_write); 370 break; 371 #endif 372 #if !defined(CONFIG_USER_ONLY) 373 do_illegal: 374 #endif 375 default: 376 do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 377 } 378 } 379 380 /* 381 * Handle reads to CSRs and any resulting special behavior 382 * 383 * Adapted from Spike's processor_t::get_csr 384 */ 385 target_ulong csr_read_helper(CPURISCVState *env, target_ulong csrno) 386 { 387 #ifndef CONFIG_USER_ONLY 388 target_ulong ctr_en = env->priv == PRV_U ? env->scounteren : 389 env->priv == PRV_S ? env->mcounteren : -1U; 390 #else 391 target_ulong ctr_en = -1; 392 #endif 393 target_ulong ctr_ok = (ctr_en >> (csrno & 31)) & 1; 394 395 if (csrno >= CSR_HPMCOUNTER3 && csrno <= CSR_HPMCOUNTER31) { 396 if (ctr_ok) { 397 return 0; 398 } 399 } 400 #if defined(TARGET_RISCV32) 401 if (csrno >= CSR_HPMCOUNTER3H && csrno <= CSR_HPMCOUNTER31H) { 402 if (ctr_ok) { 403 return 0; 404 } 405 } 406 #endif 407 if (csrno >= CSR_MHPMCOUNTER3 && csrno <= CSR_MHPMCOUNTER31) { 408 return 0; 409 } 410 #if defined(TARGET_RISCV32) 411 if (csrno >= CSR_MHPMCOUNTER3 && csrno <= CSR_MHPMCOUNTER31) { 412 return 0; 413 } 414 #endif 415 if (csrno >= CSR_MHPMEVENT3 && csrno <= CSR_MHPMEVENT31) { 416 return 0; 417 } 418 419 switch (csrno) { 420 case CSR_FFLAGS: 421 validate_mstatus_fs(env, GETPC()); 422 return cpu_riscv_get_fflags(env); 423 case CSR_FRM: 424 validate_mstatus_fs(env, GETPC()); 425 return env->frm; 426 case CSR_FCSR: 427 validate_mstatus_fs(env, GETPC()); 428 return (cpu_riscv_get_fflags(env) << FSR_AEXC_SHIFT) 429 | (env->frm << FSR_RD_SHIFT); 430 /* rdtime/rdtimeh is trapped and emulated by bbl in system mode */ 431 #ifdef CONFIG_USER_ONLY 432 case CSR_TIME: 433 return cpu_get_host_ticks(); 434 #if defined(TARGET_RISCV32) 435 case CSR_TIMEH: 436 return cpu_get_host_ticks() >> 32; 437 #endif 438 #endif 439 case CSR_INSTRET: 440 case CSR_CYCLE: 441 if (ctr_ok) { 442 #if !defined(CONFIG_USER_ONLY) 443 if (use_icount) { 444 return cpu_get_icount(); 445 } else { 446 return cpu_get_host_ticks(); 447 } 448 #else 449 return cpu_get_host_ticks(); 450 #endif 451 } 452 break; 453 #if defined(TARGET_RISCV32) 454 case CSR_INSTRETH: 455 case CSR_CYCLEH: 456 if (ctr_ok) { 457 #if !defined(CONFIG_USER_ONLY) 458 if (use_icount) { 459 return cpu_get_icount() >> 32; 460 } else { 461 return cpu_get_host_ticks() >> 32; 462 } 463 #else 464 return cpu_get_host_ticks() >> 32; 465 #endif 466 } 467 break; 468 #endif 469 #ifndef CONFIG_USER_ONLY 470 case CSR_MINSTRET: 471 case CSR_MCYCLE: 472 if (use_icount) { 473 return cpu_get_icount(); 474 } else { 475 return cpu_get_host_ticks(); 476 } 477 case CSR_MINSTRETH: 478 case CSR_MCYCLEH: 479 #if defined(TARGET_RISCV32) 480 if (use_icount) { 481 return cpu_get_icount() >> 32; 482 } else { 483 return cpu_get_host_ticks() >> 32; 484 } 485 #endif 486 break; 487 case CSR_MUCOUNTEREN: 488 if (env->priv_ver <= PRIV_VERSION_1_09_1) { 489 return env->scounteren; 490 } else { 491 break; /* illegal instruction */ 492 } 493 case CSR_MSCOUNTEREN: 494 if (env->priv_ver <= PRIV_VERSION_1_09_1) { 495 return env->mcounteren; 496 } else { 497 break; /* illegal instruction */ 498 } 499 case CSR_SSTATUS: { 500 target_ulong mask = SSTATUS_SIE | SSTATUS_SPIE | SSTATUS_UIE 501 | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS 502 | SSTATUS_SUM | SSTATUS_SD; 503 if (env->priv_ver >= PRIV_VERSION_1_10_0) { 504 mask |= SSTATUS_MXR; 505 } 506 return env->mstatus & mask; 507 } 508 case CSR_SIP: { 509 qemu_mutex_lock_iothread(); 510 target_ulong tmp = env->mip & env->mideleg; 511 qemu_mutex_unlock_iothread(); 512 return tmp; 513 } 514 case CSR_SIE: 515 return env->mie & env->mideleg; 516 case CSR_SEPC: 517 return env->sepc; 518 case CSR_SBADADDR: 519 return env->sbadaddr; 520 case CSR_STVEC: 521 return env->stvec; 522 case CSR_SCOUNTEREN: 523 if (env->priv_ver >= PRIV_VERSION_1_10_0) { 524 return env->scounteren; 525 } else { 526 break; /* illegal instruction */ 527 } 528 case CSR_SCAUSE: 529 return env->scause; 530 case CSR_SATP: /* CSR_SPTBR */ 531 if (!riscv_feature(env, RISCV_FEATURE_MMU)) { 532 return 0; 533 } 534 if (env->priv_ver >= PRIV_VERSION_1_10_0) { 535 return env->satp; 536 } else { 537 return env->sptbr; 538 } 539 case CSR_SSCRATCH: 540 return env->sscratch; 541 case CSR_MSTATUS: 542 return env->mstatus; 543 case CSR_MIP: { 544 qemu_mutex_lock_iothread(); 545 target_ulong tmp = env->mip; 546 qemu_mutex_unlock_iothread(); 547 return tmp; 548 } 549 case CSR_MIE: 550 return env->mie; 551 case CSR_MEPC: 552 return env->mepc; 553 case CSR_MSCRATCH: 554 return env->mscratch; 555 case CSR_MCAUSE: 556 return env->mcause; 557 case CSR_MBADADDR: 558 return env->mbadaddr; 559 case CSR_MISA: 560 return env->misa; 561 case CSR_MARCHID: 562 return 0; /* as spike does */ 563 case CSR_MIMPID: 564 return 0; /* as spike does */ 565 case CSR_MVENDORID: 566 return 0; /* as spike does */ 567 case CSR_MHARTID: 568 return env->mhartid; 569 case CSR_MTVEC: 570 return env->mtvec; 571 case CSR_MCOUNTEREN: 572 if (env->priv_ver >= PRIV_VERSION_1_10_0) { 573 return env->mcounteren; 574 } else { 575 break; /* illegal instruction */ 576 } 577 case CSR_MEDELEG: 578 return env->medeleg; 579 case CSR_MIDELEG: 580 return env->mideleg; 581 case CSR_PMPCFG0: 582 case CSR_PMPCFG1: 583 case CSR_PMPCFG2: 584 case CSR_PMPCFG3: 585 return pmpcfg_csr_read(env, csrno - CSR_PMPCFG0); 586 case CSR_PMPADDR0: 587 case CSR_PMPADDR1: 588 case CSR_PMPADDR2: 589 case CSR_PMPADDR3: 590 case CSR_PMPADDR4: 591 case CSR_PMPADDR5: 592 case CSR_PMPADDR6: 593 case CSR_PMPADDR7: 594 case CSR_PMPADDR8: 595 case CSR_PMPADDR9: 596 case CSR_PMPADDR10: 597 case CSR_PMPADDR11: 598 case CSR_PMPADDR12: 599 case CSR_PMPADDR13: 600 case CSR_PMPADDR14: 601 case CSR_PMPADDR15: 602 return pmpaddr_csr_read(env, csrno - CSR_PMPADDR0); 603 #endif 604 } 605 /* used by e.g. MTIME read */ 606 do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 607 } 608 609 /* 610 * Check that CSR access is allowed. 611 * 612 * Adapted from Spike's decode.h:validate_csr 613 */ 614 static void validate_csr(CPURISCVState *env, uint64_t which, 615 uint64_t write, uintptr_t ra) 616 { 617 #ifndef CONFIG_USER_ONLY 618 unsigned csr_priv = get_field((which), 0x300); 619 unsigned csr_read_only = get_field((which), 0xC00) == 3; 620 if (((write) && csr_read_only) || (env->priv < csr_priv)) { 621 do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, ra); 622 } 623 #endif 624 } 625 626 target_ulong helper_csrrw(CPURISCVState *env, target_ulong src, 627 target_ulong csr) 628 { 629 validate_csr(env, csr, 1, GETPC()); 630 uint64_t csr_backup = csr_read_helper(env, csr); 631 csr_write_helper(env, src, csr); 632 return csr_backup; 633 } 634 635 target_ulong helper_csrrs(CPURISCVState *env, target_ulong src, 636 target_ulong csr, target_ulong rs1_pass) 637 { 638 validate_csr(env, csr, rs1_pass != 0, GETPC()); 639 uint64_t csr_backup = csr_read_helper(env, csr); 640 if (rs1_pass != 0) { 641 csr_write_helper(env, src | csr_backup, csr); 642 } 643 return csr_backup; 644 } 645 646 target_ulong helper_csrrc(CPURISCVState *env, target_ulong src, 647 target_ulong csr, target_ulong rs1_pass) 648 { 649 validate_csr(env, csr, rs1_pass != 0, GETPC()); 650 uint64_t csr_backup = csr_read_helper(env, csr); 651 if (rs1_pass != 0) { 652 csr_write_helper(env, (~src) & csr_backup, csr); 653 } 654 return csr_backup; 655 } 656 657 #ifndef CONFIG_USER_ONLY 658 659 /* iothread_mutex must be held */ 660 void riscv_set_local_interrupt(RISCVCPU *cpu, target_ulong mask, int value) 661 { 662 target_ulong old_mip = cpu->env.mip; 663 cpu->env.mip = (old_mip & ~mask) | (value ? mask : 0); 664 665 if (cpu->env.mip && !old_mip) { 666 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD); 667 } else if (!cpu->env.mip && old_mip) { 668 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_HARD); 669 } 670 } 671 672 void riscv_set_mode(CPURISCVState *env, target_ulong newpriv) 673 { 674 if (newpriv > PRV_M) { 675 g_assert_not_reached(); 676 } 677 if (newpriv == PRV_H) { 678 newpriv = PRV_U; 679 } 680 /* tlb_flush is unnecessary as mode is contained in mmu_idx */ 681 env->priv = newpriv; 682 } 683 684 target_ulong helper_sret(CPURISCVState *env, target_ulong cpu_pc_deb) 685 { 686 if (!(env->priv >= PRV_S)) { 687 do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 688 } 689 690 target_ulong retpc = env->sepc; 691 if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) { 692 do_raise_exception_err(env, RISCV_EXCP_INST_ADDR_MIS, GETPC()); 693 } 694 695 target_ulong mstatus = env->mstatus; 696 target_ulong prev_priv = get_field(mstatus, MSTATUS_SPP); 697 mstatus = set_field(mstatus, 698 env->priv_ver >= PRIV_VERSION_1_10_0 ? 699 MSTATUS_SIE : MSTATUS_UIE << prev_priv, 700 get_field(mstatus, MSTATUS_SPIE)); 701 mstatus = set_field(mstatus, MSTATUS_SPIE, 0); 702 mstatus = set_field(mstatus, MSTATUS_SPP, PRV_U); 703 riscv_set_mode(env, prev_priv); 704 csr_write_helper(env, mstatus, CSR_MSTATUS); 705 706 return retpc; 707 } 708 709 target_ulong helper_mret(CPURISCVState *env, target_ulong cpu_pc_deb) 710 { 711 if (!(env->priv >= PRV_M)) { 712 do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); 713 } 714 715 target_ulong retpc = env->mepc; 716 if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) { 717 do_raise_exception_err(env, RISCV_EXCP_INST_ADDR_MIS, GETPC()); 718 } 719 720 target_ulong mstatus = env->mstatus; 721 target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP); 722 mstatus = set_field(mstatus, 723 env->priv_ver >= PRIV_VERSION_1_10_0 ? 724 MSTATUS_MIE : MSTATUS_UIE << prev_priv, 725 get_field(mstatus, MSTATUS_MPIE)); 726 mstatus = set_field(mstatus, MSTATUS_MPIE, 0); 727 mstatus = set_field(mstatus, MSTATUS_MPP, PRV_U); 728 riscv_set_mode(env, prev_priv); 729 csr_write_helper(env, mstatus, CSR_MSTATUS); 730 731 return retpc; 732 } 733 734 735 void helper_wfi(CPURISCVState *env) 736 { 737 CPUState *cs = CPU(riscv_env_get_cpu(env)); 738 739 cs->halted = 1; 740 cs->exception_index = EXCP_HLT; 741 cpu_loop_exit(cs); 742 } 743 744 void helper_tlb_flush(CPURISCVState *env) 745 { 746 RISCVCPU *cpu = riscv_env_get_cpu(env); 747 CPUState *cs = CPU(cpu); 748 tlb_flush(cs); 749 } 750 751 #endif /* !CONFIG_USER_ONLY */ 752