1 /* 2 * RISC-V Control and Status Registers. 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/log.h" 22 #include "qemu/timer.h" 23 #include "cpu.h" 24 #include "tcg/tcg-cpu.h" 25 #include "pmu.h" 26 #include "time_helper.h" 27 #include "exec/exec-all.h" 28 #include "exec/cputlb.h" 29 #include "exec/tb-flush.h" 30 #include "exec/icount.h" 31 #include "accel/tcg/getpc.h" 32 #include "qemu/guest-random.h" 33 #include "qapi/error.h" 34 #include <stdbool.h> 35 36 /* CSR function table public API */ 37 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops) 38 { 39 *ops = csr_ops[csrno & (CSR_TABLE_SIZE - 1)]; 40 } 41 42 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops) 43 { 44 csr_ops[csrno & (CSR_TABLE_SIZE - 1)] = *ops; 45 } 46 47 /* Predicates */ 48 #if !defined(CONFIG_USER_ONLY) 49 RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit) 50 { 51 bool virt = env->virt_enabled; 52 53 if (env->priv == PRV_M || !riscv_cpu_cfg(env)->ext_smstateen) { 54 return RISCV_EXCP_NONE; 55 } 56 57 if (!(env->mstateen[index] & bit)) { 58 return RISCV_EXCP_ILLEGAL_INST; 59 } 60 61 if (virt) { 62 if (!(env->hstateen[index] & bit)) { 63 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 64 } 65 66 if (env->priv == PRV_U && !(env->sstateen[index] & bit)) { 67 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 68 } 69 } 70 71 if (env->priv == PRV_U && riscv_has_ext(env, RVS)) { 72 if (!(env->sstateen[index] & bit)) { 73 return RISCV_EXCP_ILLEGAL_INST; 74 } 75 } 76 77 return RISCV_EXCP_NONE; 78 } 79 #endif 80 81 static RISCVException fs(CPURISCVState *env, int csrno) 82 { 83 #if !defined(CONFIG_USER_ONLY) 84 if (!env->debugger && !riscv_cpu_fp_enabled(env) && 85 !riscv_cpu_cfg(env)->ext_zfinx) { 86 return RISCV_EXCP_ILLEGAL_INST; 87 } 88 89 if (!env->debugger && !riscv_cpu_fp_enabled(env)) { 90 return smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR); 91 } 92 #endif 93 return RISCV_EXCP_NONE; 94 } 95 96 static RISCVException vs(CPURISCVState *env, int csrno) 97 { 98 if (riscv_cpu_cfg(env)->ext_zve32x) { 99 #if !defined(CONFIG_USER_ONLY) 100 if (!env->debugger && !riscv_cpu_vector_enabled(env)) { 101 return RISCV_EXCP_ILLEGAL_INST; 102 } 103 #endif 104 return RISCV_EXCP_NONE; 105 } 106 return RISCV_EXCP_ILLEGAL_INST; 107 } 108 109 static RISCVException ctr(CPURISCVState *env, int csrno) 110 { 111 #if !defined(CONFIG_USER_ONLY) 112 RISCVCPU *cpu = env_archcpu(env); 113 int ctr_index; 114 target_ulong ctr_mask; 115 int base_csrno = CSR_CYCLE; 116 bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false; 117 118 if (rv32 && csrno >= CSR_CYCLEH) { 119 /* Offset for RV32 hpmcounternh counters */ 120 base_csrno += 0x80; 121 } 122 ctr_index = csrno - base_csrno; 123 ctr_mask = BIT(ctr_index); 124 125 if ((csrno >= CSR_CYCLE && csrno <= CSR_INSTRET) || 126 (csrno >= CSR_CYCLEH && csrno <= CSR_INSTRETH)) { 127 if (!riscv_cpu_cfg(env)->ext_zicntr) { 128 return RISCV_EXCP_ILLEGAL_INST; 129 } 130 131 goto skip_ext_pmu_check; 132 } 133 134 if (!(cpu->pmu_avail_ctrs & ctr_mask)) { 135 /* No counter is enabled in PMU or the counter is out of range */ 136 return RISCV_EXCP_ILLEGAL_INST; 137 } 138 139 skip_ext_pmu_check: 140 141 if (env->debugger) { 142 return RISCV_EXCP_NONE; 143 } 144 145 if (env->priv < PRV_M && !get_field(env->mcounteren, ctr_mask)) { 146 return RISCV_EXCP_ILLEGAL_INST; 147 } 148 149 if (env->virt_enabled) { 150 if (!get_field(env->hcounteren, ctr_mask) || 151 (env->priv == PRV_U && !get_field(env->scounteren, ctr_mask))) { 152 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 153 } 154 } 155 156 if (riscv_has_ext(env, RVS) && env->priv == PRV_U && 157 !get_field(env->scounteren, ctr_mask)) { 158 return RISCV_EXCP_ILLEGAL_INST; 159 } 160 161 #endif 162 return RISCV_EXCP_NONE; 163 } 164 165 static RISCVException ctr32(CPURISCVState *env, int csrno) 166 { 167 if (riscv_cpu_mxl(env) != MXL_RV32) { 168 return RISCV_EXCP_ILLEGAL_INST; 169 } 170 171 return ctr(env, csrno); 172 } 173 174 static RISCVException zcmt(CPURISCVState *env, int csrno) 175 { 176 if (!riscv_cpu_cfg(env)->ext_zcmt) { 177 return RISCV_EXCP_ILLEGAL_INST; 178 } 179 180 #if !defined(CONFIG_USER_ONLY) 181 RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_JVT); 182 if (ret != RISCV_EXCP_NONE) { 183 return ret; 184 } 185 #endif 186 187 return RISCV_EXCP_NONE; 188 } 189 190 static RISCVException cfi_ss(CPURISCVState *env, int csrno) 191 { 192 if (!env_archcpu(env)->cfg.ext_zicfiss) { 193 return RISCV_EXCP_ILLEGAL_INST; 194 } 195 196 /* If ext implemented, M-mode always have access to SSP CSR */ 197 if (env->priv == PRV_M) { 198 return RISCV_EXCP_NONE; 199 } 200 201 /* if bcfi not active for current env, access to csr is illegal */ 202 if (!cpu_get_bcfien(env)) { 203 #if !defined(CONFIG_USER_ONLY) 204 if (env->debugger) { 205 return RISCV_EXCP_NONE; 206 } 207 #endif 208 return RISCV_EXCP_ILLEGAL_INST; 209 } 210 211 return RISCV_EXCP_NONE; 212 } 213 214 #if !defined(CONFIG_USER_ONLY) 215 static RISCVException mctr(CPURISCVState *env, int csrno) 216 { 217 RISCVCPU *cpu = env_archcpu(env); 218 uint32_t pmu_avail_ctrs = cpu->pmu_avail_ctrs; 219 int ctr_index; 220 int base_csrno = CSR_MHPMCOUNTER3; 221 222 if ((riscv_cpu_mxl(env) == MXL_RV32) && csrno >= CSR_MCYCLEH) { 223 /* Offset for RV32 mhpmcounternh counters */ 224 csrno -= 0x80; 225 } 226 227 g_assert(csrno >= CSR_MHPMCOUNTER3 && csrno <= CSR_MHPMCOUNTER31); 228 229 ctr_index = csrno - base_csrno; 230 if ((BIT(ctr_index) & pmu_avail_ctrs >> 3) == 0) { 231 /* The PMU is not enabled or counter is out of range */ 232 return RISCV_EXCP_ILLEGAL_INST; 233 } 234 235 return RISCV_EXCP_NONE; 236 } 237 238 static RISCVException mctr32(CPURISCVState *env, int csrno) 239 { 240 if (riscv_cpu_mxl(env) != MXL_RV32) { 241 return RISCV_EXCP_ILLEGAL_INST; 242 } 243 244 return mctr(env, csrno); 245 } 246 247 static RISCVException sscofpmf(CPURISCVState *env, int csrno) 248 { 249 if (!riscv_cpu_cfg(env)->ext_sscofpmf) { 250 return RISCV_EXCP_ILLEGAL_INST; 251 } 252 253 return RISCV_EXCP_NONE; 254 } 255 256 static RISCVException sscofpmf_32(CPURISCVState *env, int csrno) 257 { 258 if (riscv_cpu_mxl(env) != MXL_RV32) { 259 return RISCV_EXCP_ILLEGAL_INST; 260 } 261 262 return sscofpmf(env, csrno); 263 } 264 265 static RISCVException smcntrpmf(CPURISCVState *env, int csrno) 266 { 267 if (!riscv_cpu_cfg(env)->ext_smcntrpmf) { 268 return RISCV_EXCP_ILLEGAL_INST; 269 } 270 271 return RISCV_EXCP_NONE; 272 } 273 274 static RISCVException smcntrpmf_32(CPURISCVState *env, int csrno) 275 { 276 if (riscv_cpu_mxl(env) != MXL_RV32) { 277 return RISCV_EXCP_ILLEGAL_INST; 278 } 279 280 return smcntrpmf(env, csrno); 281 } 282 283 static RISCVException any(CPURISCVState *env, int csrno) 284 { 285 return RISCV_EXCP_NONE; 286 } 287 288 static RISCVException any32(CPURISCVState *env, int csrno) 289 { 290 if (riscv_cpu_mxl(env) != MXL_RV32) { 291 return RISCV_EXCP_ILLEGAL_INST; 292 } 293 294 return any(env, csrno); 295 296 } 297 298 static RISCVException aia_any(CPURISCVState *env, int csrno) 299 { 300 if (!riscv_cpu_cfg(env)->ext_smaia) { 301 return RISCV_EXCP_ILLEGAL_INST; 302 } 303 304 return any(env, csrno); 305 } 306 307 static RISCVException aia_any32(CPURISCVState *env, int csrno) 308 { 309 if (!riscv_cpu_cfg(env)->ext_smaia) { 310 return RISCV_EXCP_ILLEGAL_INST; 311 } 312 313 return any32(env, csrno); 314 } 315 316 static RISCVException csrind_any(CPURISCVState *env, int csrno) 317 { 318 if (!riscv_cpu_cfg(env)->ext_smcsrind) { 319 return RISCV_EXCP_ILLEGAL_INST; 320 } 321 322 return RISCV_EXCP_NONE; 323 } 324 325 static RISCVException csrind_or_aia_any(CPURISCVState *env, int csrno) 326 { 327 if (!riscv_cpu_cfg(env)->ext_smaia && !riscv_cpu_cfg(env)->ext_smcsrind) { 328 return RISCV_EXCP_ILLEGAL_INST; 329 } 330 331 return any(env, csrno); 332 } 333 334 static RISCVException smode(CPURISCVState *env, int csrno) 335 { 336 if (riscv_has_ext(env, RVS)) { 337 return RISCV_EXCP_NONE; 338 } 339 340 return RISCV_EXCP_ILLEGAL_INST; 341 } 342 343 static RISCVException smode32(CPURISCVState *env, int csrno) 344 { 345 if (riscv_cpu_mxl(env) != MXL_RV32) { 346 return RISCV_EXCP_ILLEGAL_INST; 347 } 348 349 return smode(env, csrno); 350 } 351 352 static RISCVException aia_smode(CPURISCVState *env, int csrno) 353 { 354 int ret; 355 356 if (!riscv_cpu_cfg(env)->ext_ssaia) { 357 return RISCV_EXCP_ILLEGAL_INST; 358 } 359 360 if (csrno == CSR_STOPEI) { 361 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_IMSIC); 362 } else { 363 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA); 364 } 365 366 if (ret != RISCV_EXCP_NONE) { 367 return ret; 368 } 369 370 return smode(env, csrno); 371 } 372 373 static RISCVException aia_smode32(CPURISCVState *env, int csrno) 374 { 375 int ret; 376 377 if (!riscv_cpu_cfg(env)->ext_ssaia) { 378 return RISCV_EXCP_ILLEGAL_INST; 379 } 380 381 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA); 382 if (ret != RISCV_EXCP_NONE) { 383 return ret; 384 } 385 386 return smode32(env, csrno); 387 } 388 389 static RISCVException scountinhibit_pred(CPURISCVState *env, int csrno) 390 { 391 RISCVCPU *cpu = env_archcpu(env); 392 393 if (!cpu->cfg.ext_ssccfg || !cpu->cfg.ext_smcdeleg) { 394 return RISCV_EXCP_ILLEGAL_INST; 395 } 396 397 if (env->virt_enabled) { 398 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 399 } 400 401 return smode(env, csrno); 402 } 403 404 static bool csrind_extensions_present(CPURISCVState *env) 405 { 406 return riscv_cpu_cfg(env)->ext_smcsrind || riscv_cpu_cfg(env)->ext_sscsrind; 407 } 408 409 static bool aia_extensions_present(CPURISCVState *env) 410 { 411 return riscv_cpu_cfg(env)->ext_smaia || riscv_cpu_cfg(env)->ext_ssaia; 412 } 413 414 static bool csrind_or_aia_extensions_present(CPURISCVState *env) 415 { 416 return csrind_extensions_present(env) || aia_extensions_present(env); 417 } 418 419 static RISCVException csrind_smode(CPURISCVState *env, int csrno) 420 { 421 if (!csrind_extensions_present(env)) { 422 return RISCV_EXCP_ILLEGAL_INST; 423 } 424 425 return smode(env, csrno); 426 } 427 428 static RISCVException csrind_or_aia_smode(CPURISCVState *env, int csrno) 429 { 430 if (!csrind_or_aia_extensions_present(env)) { 431 return RISCV_EXCP_ILLEGAL_INST; 432 } 433 434 return smode(env, csrno); 435 } 436 437 static RISCVException hmode(CPURISCVState *env, int csrno) 438 { 439 if (riscv_has_ext(env, RVH)) { 440 return RISCV_EXCP_NONE; 441 } 442 443 return RISCV_EXCP_ILLEGAL_INST; 444 } 445 446 static RISCVException hmode32(CPURISCVState *env, int csrno) 447 { 448 if (riscv_cpu_mxl(env) != MXL_RV32) { 449 return RISCV_EXCP_ILLEGAL_INST; 450 } 451 452 return hmode(env, csrno); 453 454 } 455 456 static RISCVException csrind_hmode(CPURISCVState *env, int csrno) 457 { 458 if (!csrind_extensions_present(env)) { 459 return RISCV_EXCP_ILLEGAL_INST; 460 } 461 462 return hmode(env, csrno); 463 } 464 465 static RISCVException csrind_or_aia_hmode(CPURISCVState *env, int csrno) 466 { 467 if (!csrind_or_aia_extensions_present(env)) { 468 return RISCV_EXCP_ILLEGAL_INST; 469 } 470 471 return hmode(env, csrno); 472 } 473 474 static RISCVException umode(CPURISCVState *env, int csrno) 475 { 476 if (riscv_has_ext(env, RVU)) { 477 return RISCV_EXCP_NONE; 478 } 479 480 return RISCV_EXCP_ILLEGAL_INST; 481 } 482 483 static RISCVException umode32(CPURISCVState *env, int csrno) 484 { 485 if (riscv_cpu_mxl(env) != MXL_RV32) { 486 return RISCV_EXCP_ILLEGAL_INST; 487 } 488 489 return umode(env, csrno); 490 } 491 492 static RISCVException mstateen(CPURISCVState *env, int csrno) 493 { 494 if (!riscv_cpu_cfg(env)->ext_smstateen) { 495 return RISCV_EXCP_ILLEGAL_INST; 496 } 497 498 return any(env, csrno); 499 } 500 501 static RISCVException hstateen_pred(CPURISCVState *env, int csrno, int base) 502 { 503 if (!riscv_cpu_cfg(env)->ext_smstateen) { 504 return RISCV_EXCP_ILLEGAL_INST; 505 } 506 507 RISCVException ret = hmode(env, csrno); 508 if (ret != RISCV_EXCP_NONE) { 509 return ret; 510 } 511 512 if (env->debugger) { 513 return RISCV_EXCP_NONE; 514 } 515 516 if (env->priv < PRV_M) { 517 if (!(env->mstateen[csrno - base] & SMSTATEEN_STATEEN)) { 518 return RISCV_EXCP_ILLEGAL_INST; 519 } 520 } 521 522 return RISCV_EXCP_NONE; 523 } 524 525 static RISCVException hstateen(CPURISCVState *env, int csrno) 526 { 527 return hstateen_pred(env, csrno, CSR_HSTATEEN0); 528 } 529 530 static RISCVException hstateenh(CPURISCVState *env, int csrno) 531 { 532 return hstateen_pred(env, csrno, CSR_HSTATEEN0H); 533 } 534 535 static RISCVException sstateen(CPURISCVState *env, int csrno) 536 { 537 bool virt = env->virt_enabled; 538 int index = csrno - CSR_SSTATEEN0; 539 540 if (!riscv_cpu_cfg(env)->ext_smstateen) { 541 return RISCV_EXCP_ILLEGAL_INST; 542 } 543 544 RISCVException ret = smode(env, csrno); 545 if (ret != RISCV_EXCP_NONE) { 546 return ret; 547 } 548 549 if (env->debugger) { 550 return RISCV_EXCP_NONE; 551 } 552 553 if (env->priv < PRV_M) { 554 if (!(env->mstateen[index] & SMSTATEEN_STATEEN)) { 555 return RISCV_EXCP_ILLEGAL_INST; 556 } 557 558 if (virt) { 559 if (!(env->hstateen[index] & SMSTATEEN_STATEEN)) { 560 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 561 } 562 } 563 } 564 565 return RISCV_EXCP_NONE; 566 } 567 568 static RISCVException sstc(CPURISCVState *env, int csrno) 569 { 570 bool hmode_check = false; 571 572 if (!riscv_cpu_cfg(env)->ext_sstc || !env->rdtime_fn) { 573 return RISCV_EXCP_ILLEGAL_INST; 574 } 575 576 if ((csrno == CSR_VSTIMECMP) || (csrno == CSR_VSTIMECMPH)) { 577 hmode_check = true; 578 } 579 580 RISCVException ret = hmode_check ? hmode(env, csrno) : smode(env, csrno); 581 if (ret != RISCV_EXCP_NONE) { 582 return ret; 583 } 584 585 if (env->debugger) { 586 return RISCV_EXCP_NONE; 587 } 588 589 if (env->priv == PRV_M) { 590 return RISCV_EXCP_NONE; 591 } 592 593 /* 594 * No need of separate function for rv32 as menvcfg stores both menvcfg 595 * menvcfgh for RV32. 596 */ 597 if (!(get_field(env->mcounteren, COUNTEREN_TM) && 598 get_field(env->menvcfg, MENVCFG_STCE))) { 599 return RISCV_EXCP_ILLEGAL_INST; 600 } 601 602 if (env->virt_enabled) { 603 if (!(get_field(env->hcounteren, COUNTEREN_TM) && 604 get_field(env->henvcfg, HENVCFG_STCE))) { 605 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 606 } 607 } 608 609 return RISCV_EXCP_NONE; 610 } 611 612 static RISCVException sstc_32(CPURISCVState *env, int csrno) 613 { 614 if (riscv_cpu_mxl(env) != MXL_RV32) { 615 return RISCV_EXCP_ILLEGAL_INST; 616 } 617 618 return sstc(env, csrno); 619 } 620 621 static RISCVException satp(CPURISCVState *env, int csrno) 622 { 623 if (env->priv == PRV_S && !env->virt_enabled && 624 get_field(env->mstatus, MSTATUS_TVM)) { 625 return RISCV_EXCP_ILLEGAL_INST; 626 } 627 if (env->priv == PRV_S && env->virt_enabled && 628 get_field(env->hstatus, HSTATUS_VTVM)) { 629 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 630 } 631 632 return smode(env, csrno); 633 } 634 635 static RISCVException hgatp(CPURISCVState *env, int csrno) 636 { 637 if (env->priv == PRV_S && !env->virt_enabled && 638 get_field(env->mstatus, MSTATUS_TVM)) { 639 return RISCV_EXCP_ILLEGAL_INST; 640 } 641 642 return hmode(env, csrno); 643 } 644 645 /* 646 * M-mode: 647 * Without ext_smctr raise illegal inst excep. 648 * Otherwise everything is accessible to m-mode. 649 * 650 * S-mode: 651 * Without ext_ssctr or mstateen.ctr raise illegal inst excep. 652 * Otherwise everything other than mctrctl is accessible. 653 * 654 * VS-mode: 655 * Without ext_ssctr or mstateen.ctr raise illegal inst excep. 656 * Without hstateen.ctr raise virtual illegal inst excep. 657 * Otherwise allow sctrctl (vsctrctl), sctrstatus, 0x200-0x2ff entry range. 658 * Always raise illegal instruction exception for sctrdepth. 659 */ 660 static RISCVException ctr_mmode(CPURISCVState *env, int csrno) 661 { 662 /* Check if smctr-ext is present */ 663 if (riscv_cpu_cfg(env)->ext_smctr) { 664 return RISCV_EXCP_NONE; 665 } 666 667 return RISCV_EXCP_ILLEGAL_INST; 668 } 669 670 static RISCVException ctr_smode(CPURISCVState *env, int csrno) 671 { 672 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env); 673 674 if (!cfg->ext_smctr && !cfg->ext_ssctr) { 675 return RISCV_EXCP_ILLEGAL_INST; 676 } 677 678 RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_CTR); 679 if (ret == RISCV_EXCP_NONE && csrno == CSR_SCTRDEPTH && 680 env->virt_enabled) { 681 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 682 } 683 684 return ret; 685 } 686 687 static RISCVException aia_hmode(CPURISCVState *env, int csrno) 688 { 689 int ret; 690 691 if (!riscv_cpu_cfg(env)->ext_ssaia) { 692 return RISCV_EXCP_ILLEGAL_INST; 693 } 694 695 if (csrno == CSR_VSTOPEI) { 696 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_IMSIC); 697 } else { 698 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA); 699 } 700 701 if (ret != RISCV_EXCP_NONE) { 702 return ret; 703 } 704 705 return hmode(env, csrno); 706 } 707 708 static RISCVException aia_hmode32(CPURISCVState *env, int csrno) 709 { 710 int ret; 711 712 if (!riscv_cpu_cfg(env)->ext_ssaia) { 713 return RISCV_EXCP_ILLEGAL_INST; 714 } 715 716 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA); 717 if (ret != RISCV_EXCP_NONE) { 718 return ret; 719 } 720 721 if (!riscv_cpu_cfg(env)->ext_ssaia) { 722 return RISCV_EXCP_ILLEGAL_INST; 723 } 724 725 return hmode32(env, csrno); 726 } 727 728 static RISCVException dbltrp_hmode(CPURISCVState *env, int csrno) 729 { 730 if (riscv_cpu_cfg(env)->ext_ssdbltrp) { 731 return RISCV_EXCP_NONE; 732 } 733 734 return hmode(env, csrno); 735 } 736 737 static RISCVException pmp(CPURISCVState *env, int csrno) 738 { 739 if (riscv_cpu_cfg(env)->pmp) { 740 if (csrno <= CSR_PMPCFG3) { 741 uint32_t reg_index = csrno - CSR_PMPCFG0; 742 743 /* TODO: RV128 restriction check */ 744 if ((reg_index & 1) && (riscv_cpu_mxl(env) == MXL_RV64)) { 745 return RISCV_EXCP_ILLEGAL_INST; 746 } 747 } 748 749 return RISCV_EXCP_NONE; 750 } 751 752 return RISCV_EXCP_ILLEGAL_INST; 753 } 754 755 static RISCVException have_mseccfg(CPURISCVState *env, int csrno) 756 { 757 if (riscv_cpu_cfg(env)->ext_smepmp) { 758 return RISCV_EXCP_NONE; 759 } 760 if (riscv_cpu_cfg(env)->ext_zkr) { 761 return RISCV_EXCP_NONE; 762 } 763 if (riscv_cpu_cfg(env)->ext_smmpm) { 764 return RISCV_EXCP_NONE; 765 } 766 767 return RISCV_EXCP_ILLEGAL_INST; 768 } 769 770 static RISCVException debug(CPURISCVState *env, int csrno) 771 { 772 if (riscv_cpu_cfg(env)->debug) { 773 return RISCV_EXCP_NONE; 774 } 775 776 return RISCV_EXCP_ILLEGAL_INST; 777 } 778 779 static RISCVException rnmi(CPURISCVState *env, int csrno) 780 { 781 RISCVCPU *cpu = env_archcpu(env); 782 783 if (cpu->cfg.ext_smrnmi) { 784 return RISCV_EXCP_NONE; 785 } 786 787 return RISCV_EXCP_ILLEGAL_INST; 788 } 789 #endif 790 791 static RISCVException seed(CPURISCVState *env, int csrno) 792 { 793 if (!riscv_cpu_cfg(env)->ext_zkr) { 794 return RISCV_EXCP_ILLEGAL_INST; 795 } 796 797 #if !defined(CONFIG_USER_ONLY) 798 if (env->debugger) { 799 return RISCV_EXCP_NONE; 800 } 801 802 /* 803 * With a CSR read-write instruction: 804 * 1) The seed CSR is always available in machine mode as normal. 805 * 2) Attempted access to seed from virtual modes VS and VU always raises 806 * an exception(virtual instruction exception only if mseccfg.sseed=1). 807 * 3) Without the corresponding access control bit set to 1, any attempted 808 * access to seed from U, S or HS modes will raise an illegal instruction 809 * exception. 810 */ 811 if (env->priv == PRV_M) { 812 return RISCV_EXCP_NONE; 813 } else if (env->virt_enabled) { 814 if (env->mseccfg & MSECCFG_SSEED) { 815 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 816 } else { 817 return RISCV_EXCP_ILLEGAL_INST; 818 } 819 } else { 820 if (env->priv == PRV_S && (env->mseccfg & MSECCFG_SSEED)) { 821 return RISCV_EXCP_NONE; 822 } else if (env->priv == PRV_U && (env->mseccfg & MSECCFG_USEED)) { 823 return RISCV_EXCP_NONE; 824 } else { 825 return RISCV_EXCP_ILLEGAL_INST; 826 } 827 } 828 #else 829 return RISCV_EXCP_NONE; 830 #endif 831 } 832 833 /* zicfiss CSR_SSP read and write */ 834 static int read_ssp(CPURISCVState *env, int csrno, target_ulong *val) 835 { 836 *val = env->ssp; 837 return RISCV_EXCP_NONE; 838 } 839 840 static int write_ssp(CPURISCVState *env, int csrno, target_ulong val) 841 { 842 env->ssp = val; 843 return RISCV_EXCP_NONE; 844 } 845 846 /* User Floating-Point CSRs */ 847 static RISCVException read_fflags(CPURISCVState *env, int csrno, 848 target_ulong *val) 849 { 850 *val = riscv_cpu_get_fflags(env); 851 return RISCV_EXCP_NONE; 852 } 853 854 static RISCVException write_fflags(CPURISCVState *env, int csrno, 855 target_ulong val) 856 { 857 #if !defined(CONFIG_USER_ONLY) 858 if (riscv_has_ext(env, RVF)) { 859 env->mstatus |= MSTATUS_FS; 860 } 861 #endif 862 riscv_cpu_set_fflags(env, val & (FSR_AEXC >> FSR_AEXC_SHIFT)); 863 return RISCV_EXCP_NONE; 864 } 865 866 static RISCVException read_frm(CPURISCVState *env, int csrno, 867 target_ulong *val) 868 { 869 *val = env->frm; 870 return RISCV_EXCP_NONE; 871 } 872 873 static RISCVException write_frm(CPURISCVState *env, int csrno, 874 target_ulong val) 875 { 876 #if !defined(CONFIG_USER_ONLY) 877 if (riscv_has_ext(env, RVF)) { 878 env->mstatus |= MSTATUS_FS; 879 } 880 #endif 881 env->frm = val & (FSR_RD >> FSR_RD_SHIFT); 882 return RISCV_EXCP_NONE; 883 } 884 885 static RISCVException read_fcsr(CPURISCVState *env, int csrno, 886 target_ulong *val) 887 { 888 *val = (riscv_cpu_get_fflags(env) << FSR_AEXC_SHIFT) 889 | (env->frm << FSR_RD_SHIFT); 890 return RISCV_EXCP_NONE; 891 } 892 893 static RISCVException write_fcsr(CPURISCVState *env, int csrno, 894 target_ulong val) 895 { 896 #if !defined(CONFIG_USER_ONLY) 897 if (riscv_has_ext(env, RVF)) { 898 env->mstatus |= MSTATUS_FS; 899 } 900 #endif 901 env->frm = (val & FSR_RD) >> FSR_RD_SHIFT; 902 riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT); 903 return RISCV_EXCP_NONE; 904 } 905 906 static RISCVException read_vtype(CPURISCVState *env, int csrno, 907 target_ulong *val) 908 { 909 uint64_t vill; 910 switch (env->xl) { 911 case MXL_RV32: 912 vill = (uint32_t)env->vill << 31; 913 break; 914 case MXL_RV64: 915 vill = (uint64_t)env->vill << 63; 916 break; 917 default: 918 g_assert_not_reached(); 919 } 920 *val = (target_ulong)vill | env->vtype; 921 return RISCV_EXCP_NONE; 922 } 923 924 static RISCVException read_vl(CPURISCVState *env, int csrno, 925 target_ulong *val) 926 { 927 *val = env->vl; 928 return RISCV_EXCP_NONE; 929 } 930 931 static RISCVException read_vlenb(CPURISCVState *env, int csrno, 932 target_ulong *val) 933 { 934 *val = riscv_cpu_cfg(env)->vlenb; 935 return RISCV_EXCP_NONE; 936 } 937 938 static RISCVException read_vxrm(CPURISCVState *env, int csrno, 939 target_ulong *val) 940 { 941 *val = env->vxrm; 942 return RISCV_EXCP_NONE; 943 } 944 945 static RISCVException write_vxrm(CPURISCVState *env, int csrno, 946 target_ulong val) 947 { 948 #if !defined(CONFIG_USER_ONLY) 949 env->mstatus |= MSTATUS_VS; 950 #endif 951 env->vxrm = val; 952 return RISCV_EXCP_NONE; 953 } 954 955 static RISCVException read_vxsat(CPURISCVState *env, int csrno, 956 target_ulong *val) 957 { 958 *val = env->vxsat & BIT(0); 959 return RISCV_EXCP_NONE; 960 } 961 962 static RISCVException write_vxsat(CPURISCVState *env, int csrno, 963 target_ulong val) 964 { 965 #if !defined(CONFIG_USER_ONLY) 966 env->mstatus |= MSTATUS_VS; 967 #endif 968 env->vxsat = val & BIT(0); 969 return RISCV_EXCP_NONE; 970 } 971 972 static RISCVException read_vstart(CPURISCVState *env, int csrno, 973 target_ulong *val) 974 { 975 *val = env->vstart; 976 return RISCV_EXCP_NONE; 977 } 978 979 static RISCVException write_vstart(CPURISCVState *env, int csrno, 980 target_ulong val) 981 { 982 #if !defined(CONFIG_USER_ONLY) 983 env->mstatus |= MSTATUS_VS; 984 #endif 985 /* 986 * The vstart CSR is defined to have only enough writable bits 987 * to hold the largest element index, i.e. lg2(VLEN) bits. 988 */ 989 env->vstart = val & ~(~0ULL << ctzl(riscv_cpu_cfg(env)->vlenb << 3)); 990 return RISCV_EXCP_NONE; 991 } 992 993 static RISCVException read_vcsr(CPURISCVState *env, int csrno, 994 target_ulong *val) 995 { 996 *val = (env->vxrm << VCSR_VXRM_SHIFT) | (env->vxsat << VCSR_VXSAT_SHIFT); 997 return RISCV_EXCP_NONE; 998 } 999 1000 static RISCVException write_vcsr(CPURISCVState *env, int csrno, 1001 target_ulong val) 1002 { 1003 #if !defined(CONFIG_USER_ONLY) 1004 env->mstatus |= MSTATUS_VS; 1005 #endif 1006 env->vxrm = (val & VCSR_VXRM) >> VCSR_VXRM_SHIFT; 1007 env->vxsat = (val & VCSR_VXSAT) >> VCSR_VXSAT_SHIFT; 1008 return RISCV_EXCP_NONE; 1009 } 1010 1011 #if defined(CONFIG_USER_ONLY) 1012 /* User Timers and Counters */ 1013 static target_ulong get_ticks(bool shift) 1014 { 1015 int64_t val = cpu_get_host_ticks(); 1016 target_ulong result = shift ? val >> 32 : val; 1017 1018 return result; 1019 } 1020 1021 static RISCVException read_time(CPURISCVState *env, int csrno, 1022 target_ulong *val) 1023 { 1024 *val = cpu_get_host_ticks(); 1025 return RISCV_EXCP_NONE; 1026 } 1027 1028 static RISCVException read_timeh(CPURISCVState *env, int csrno, 1029 target_ulong *val) 1030 { 1031 *val = cpu_get_host_ticks() >> 32; 1032 return RISCV_EXCP_NONE; 1033 } 1034 1035 static RISCVException read_hpmcounter(CPURISCVState *env, int csrno, 1036 target_ulong *val) 1037 { 1038 *val = get_ticks(false); 1039 return RISCV_EXCP_NONE; 1040 } 1041 1042 static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno, 1043 target_ulong *val) 1044 { 1045 *val = get_ticks(true); 1046 return RISCV_EXCP_NONE; 1047 } 1048 1049 #else /* CONFIG_USER_ONLY */ 1050 1051 static RISCVException read_mcyclecfg(CPURISCVState *env, int csrno, 1052 target_ulong *val) 1053 { 1054 *val = env->mcyclecfg; 1055 return RISCV_EXCP_NONE; 1056 } 1057 1058 static RISCVException write_mcyclecfg(CPURISCVState *env, int csrno, 1059 target_ulong val) 1060 { 1061 uint64_t inh_avail_mask; 1062 1063 if (riscv_cpu_mxl(env) == MXL_RV32) { 1064 env->mcyclecfg = val; 1065 } else { 1066 /* Set xINH fields if priv mode supported */ 1067 inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MCYCLECFG_BIT_MINH; 1068 inh_avail_mask |= riscv_has_ext(env, RVU) ? MCYCLECFG_BIT_UINH : 0; 1069 inh_avail_mask |= riscv_has_ext(env, RVS) ? MCYCLECFG_BIT_SINH : 0; 1070 inh_avail_mask |= (riscv_has_ext(env, RVH) && 1071 riscv_has_ext(env, RVU)) ? MCYCLECFG_BIT_VUINH : 0; 1072 inh_avail_mask |= (riscv_has_ext(env, RVH) && 1073 riscv_has_ext(env, RVS)) ? MCYCLECFG_BIT_VSINH : 0; 1074 env->mcyclecfg = val & inh_avail_mask; 1075 } 1076 1077 return RISCV_EXCP_NONE; 1078 } 1079 1080 static RISCVException read_mcyclecfgh(CPURISCVState *env, int csrno, 1081 target_ulong *val) 1082 { 1083 *val = env->mcyclecfgh; 1084 return RISCV_EXCP_NONE; 1085 } 1086 1087 static RISCVException write_mcyclecfgh(CPURISCVState *env, int csrno, 1088 target_ulong val) 1089 { 1090 target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK | 1091 MCYCLECFGH_BIT_MINH); 1092 1093 /* Set xINH fields if priv mode supported */ 1094 inh_avail_mask |= riscv_has_ext(env, RVU) ? MCYCLECFGH_BIT_UINH : 0; 1095 inh_avail_mask |= riscv_has_ext(env, RVS) ? MCYCLECFGH_BIT_SINH : 0; 1096 inh_avail_mask |= (riscv_has_ext(env, RVH) && 1097 riscv_has_ext(env, RVU)) ? MCYCLECFGH_BIT_VUINH : 0; 1098 inh_avail_mask |= (riscv_has_ext(env, RVH) && 1099 riscv_has_ext(env, RVS)) ? MCYCLECFGH_BIT_VSINH : 0; 1100 1101 env->mcyclecfgh = val & inh_avail_mask; 1102 return RISCV_EXCP_NONE; 1103 } 1104 1105 static RISCVException read_minstretcfg(CPURISCVState *env, int csrno, 1106 target_ulong *val) 1107 { 1108 *val = env->minstretcfg; 1109 return RISCV_EXCP_NONE; 1110 } 1111 1112 static RISCVException write_minstretcfg(CPURISCVState *env, int csrno, 1113 target_ulong val) 1114 { 1115 uint64_t inh_avail_mask; 1116 1117 if (riscv_cpu_mxl(env) == MXL_RV32) { 1118 env->minstretcfg = val; 1119 } else { 1120 inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MINSTRETCFG_BIT_MINH; 1121 inh_avail_mask |= riscv_has_ext(env, RVU) ? MINSTRETCFG_BIT_UINH : 0; 1122 inh_avail_mask |= riscv_has_ext(env, RVS) ? MINSTRETCFG_BIT_SINH : 0; 1123 inh_avail_mask |= (riscv_has_ext(env, RVH) && 1124 riscv_has_ext(env, RVU)) ? MINSTRETCFG_BIT_VUINH : 0; 1125 inh_avail_mask |= (riscv_has_ext(env, RVH) && 1126 riscv_has_ext(env, RVS)) ? MINSTRETCFG_BIT_VSINH : 0; 1127 env->minstretcfg = val & inh_avail_mask; 1128 } 1129 return RISCV_EXCP_NONE; 1130 } 1131 1132 static RISCVException read_minstretcfgh(CPURISCVState *env, int csrno, 1133 target_ulong *val) 1134 { 1135 *val = env->minstretcfgh; 1136 return RISCV_EXCP_NONE; 1137 } 1138 1139 static RISCVException write_minstretcfgh(CPURISCVState *env, int csrno, 1140 target_ulong val) 1141 { 1142 target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK | 1143 MINSTRETCFGH_BIT_MINH); 1144 1145 inh_avail_mask |= riscv_has_ext(env, RVU) ? MINSTRETCFGH_BIT_UINH : 0; 1146 inh_avail_mask |= riscv_has_ext(env, RVS) ? MINSTRETCFGH_BIT_SINH : 0; 1147 inh_avail_mask |= (riscv_has_ext(env, RVH) && 1148 riscv_has_ext(env, RVU)) ? MINSTRETCFGH_BIT_VUINH : 0; 1149 inh_avail_mask |= (riscv_has_ext(env, RVH) && 1150 riscv_has_ext(env, RVS)) ? MINSTRETCFGH_BIT_VSINH : 0; 1151 1152 env->minstretcfgh = val & inh_avail_mask; 1153 return RISCV_EXCP_NONE; 1154 } 1155 1156 static RISCVException read_mhpmevent(CPURISCVState *env, int csrno, 1157 target_ulong *val) 1158 { 1159 int evt_index = csrno - CSR_MCOUNTINHIBIT; 1160 1161 *val = env->mhpmevent_val[evt_index]; 1162 1163 return RISCV_EXCP_NONE; 1164 } 1165 1166 static RISCVException write_mhpmevent(CPURISCVState *env, int csrno, 1167 target_ulong val) 1168 { 1169 int evt_index = csrno - CSR_MCOUNTINHIBIT; 1170 uint64_t mhpmevt_val = val; 1171 uint64_t inh_avail_mask; 1172 1173 if (riscv_cpu_mxl(env) == MXL_RV32) { 1174 env->mhpmevent_val[evt_index] = val; 1175 mhpmevt_val = mhpmevt_val | 1176 ((uint64_t)env->mhpmeventh_val[evt_index] << 32); 1177 } else { 1178 inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MHPMEVENT_BIT_MINH; 1179 inh_avail_mask |= riscv_has_ext(env, RVU) ? MHPMEVENT_BIT_UINH : 0; 1180 inh_avail_mask |= riscv_has_ext(env, RVS) ? MHPMEVENT_BIT_SINH : 0; 1181 inh_avail_mask |= (riscv_has_ext(env, RVH) && 1182 riscv_has_ext(env, RVU)) ? MHPMEVENT_BIT_VUINH : 0; 1183 inh_avail_mask |= (riscv_has_ext(env, RVH) && 1184 riscv_has_ext(env, RVS)) ? MHPMEVENT_BIT_VSINH : 0; 1185 mhpmevt_val = val & inh_avail_mask; 1186 env->mhpmevent_val[evt_index] = mhpmevt_val; 1187 } 1188 1189 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index); 1190 1191 return RISCV_EXCP_NONE; 1192 } 1193 1194 static RISCVException read_mhpmeventh(CPURISCVState *env, int csrno, 1195 target_ulong *val) 1196 { 1197 int evt_index = csrno - CSR_MHPMEVENT3H + 3; 1198 1199 *val = env->mhpmeventh_val[evt_index]; 1200 1201 return RISCV_EXCP_NONE; 1202 } 1203 1204 static RISCVException write_mhpmeventh(CPURISCVState *env, int csrno, 1205 target_ulong val) 1206 { 1207 int evt_index = csrno - CSR_MHPMEVENT3H + 3; 1208 uint64_t mhpmevth_val; 1209 uint64_t mhpmevt_val = env->mhpmevent_val[evt_index]; 1210 target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK | 1211 MHPMEVENTH_BIT_MINH); 1212 1213 inh_avail_mask |= riscv_has_ext(env, RVU) ? MHPMEVENTH_BIT_UINH : 0; 1214 inh_avail_mask |= riscv_has_ext(env, RVS) ? MHPMEVENTH_BIT_SINH : 0; 1215 inh_avail_mask |= (riscv_has_ext(env, RVH) && 1216 riscv_has_ext(env, RVU)) ? MHPMEVENTH_BIT_VUINH : 0; 1217 inh_avail_mask |= (riscv_has_ext(env, RVH) && 1218 riscv_has_ext(env, RVS)) ? MHPMEVENTH_BIT_VSINH : 0; 1219 1220 mhpmevth_val = val & inh_avail_mask; 1221 mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32); 1222 env->mhpmeventh_val[evt_index] = mhpmevth_val; 1223 1224 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index); 1225 1226 return RISCV_EXCP_NONE; 1227 } 1228 1229 static target_ulong riscv_pmu_ctr_get_fixed_counters_val(CPURISCVState *env, 1230 int counter_idx, 1231 bool upper_half) 1232 { 1233 int inst = riscv_pmu_ctr_monitor_instructions(env, counter_idx); 1234 uint64_t *counter_arr_virt = env->pmu_fixed_ctrs[inst].counter_virt; 1235 uint64_t *counter_arr = env->pmu_fixed_ctrs[inst].counter; 1236 target_ulong result = 0; 1237 uint64_t curr_val = 0; 1238 uint64_t cfg_val = 0; 1239 1240 if (counter_idx == 0) { 1241 cfg_val = upper_half ? ((uint64_t)env->mcyclecfgh << 32) : 1242 env->mcyclecfg; 1243 } else if (counter_idx == 2) { 1244 cfg_val = upper_half ? ((uint64_t)env->minstretcfgh << 32) : 1245 env->minstretcfg; 1246 } else { 1247 cfg_val = upper_half ? 1248 ((uint64_t)env->mhpmeventh_val[counter_idx] << 32) : 1249 env->mhpmevent_val[counter_idx]; 1250 cfg_val &= MHPMEVENT_FILTER_MASK; 1251 } 1252 1253 if (!cfg_val) { 1254 if (icount_enabled()) { 1255 curr_val = inst ? icount_get_raw() : icount_get(); 1256 } else { 1257 curr_val = cpu_get_host_ticks(); 1258 } 1259 1260 goto done; 1261 } 1262 1263 /* Update counter before reading. */ 1264 riscv_pmu_update_fixed_ctrs(env, env->priv, env->virt_enabled); 1265 1266 if (!(cfg_val & MCYCLECFG_BIT_MINH)) { 1267 curr_val += counter_arr[PRV_M]; 1268 } 1269 1270 if (!(cfg_val & MCYCLECFG_BIT_SINH)) { 1271 curr_val += counter_arr[PRV_S]; 1272 } 1273 1274 if (!(cfg_val & MCYCLECFG_BIT_UINH)) { 1275 curr_val += counter_arr[PRV_U]; 1276 } 1277 1278 if (!(cfg_val & MCYCLECFG_BIT_VSINH)) { 1279 curr_val += counter_arr_virt[PRV_S]; 1280 } 1281 1282 if (!(cfg_val & MCYCLECFG_BIT_VUINH)) { 1283 curr_val += counter_arr_virt[PRV_U]; 1284 } 1285 1286 done: 1287 if (riscv_cpu_mxl(env) == MXL_RV32) { 1288 result = upper_half ? curr_val >> 32 : curr_val; 1289 } else { 1290 result = curr_val; 1291 } 1292 1293 return result; 1294 } 1295 1296 static RISCVException riscv_pmu_write_ctr(CPURISCVState *env, target_ulong val, 1297 uint32_t ctr_idx) 1298 { 1299 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx]; 1300 uint64_t mhpmctr_val = val; 1301 1302 counter->mhpmcounter_val = val; 1303 if (!get_field(env->mcountinhibit, BIT(ctr_idx)) && 1304 (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || 1305 riscv_pmu_ctr_monitor_instructions(env, ctr_idx))) { 1306 counter->mhpmcounter_prev = riscv_pmu_ctr_get_fixed_counters_val(env, 1307 ctr_idx, false); 1308 if (ctr_idx > 2) { 1309 if (riscv_cpu_mxl(env) == MXL_RV32) { 1310 mhpmctr_val = mhpmctr_val | 1311 ((uint64_t)counter->mhpmcounterh_val << 32); 1312 } 1313 riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx); 1314 } 1315 } else { 1316 /* Other counters can keep incrementing from the given value */ 1317 counter->mhpmcounter_prev = val; 1318 } 1319 1320 return RISCV_EXCP_NONE; 1321 } 1322 1323 static RISCVException riscv_pmu_write_ctrh(CPURISCVState *env, target_ulong val, 1324 uint32_t ctr_idx) 1325 { 1326 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx]; 1327 uint64_t mhpmctr_val = counter->mhpmcounter_val; 1328 uint64_t mhpmctrh_val = val; 1329 1330 counter->mhpmcounterh_val = val; 1331 mhpmctr_val = mhpmctr_val | (mhpmctrh_val << 32); 1332 if (!get_field(env->mcountinhibit, BIT(ctr_idx)) && 1333 (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || 1334 riscv_pmu_ctr_monitor_instructions(env, ctr_idx))) { 1335 counter->mhpmcounterh_prev = riscv_pmu_ctr_get_fixed_counters_val(env, 1336 ctr_idx, true); 1337 if (ctr_idx > 2) { 1338 riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx); 1339 } 1340 } else { 1341 counter->mhpmcounterh_prev = val; 1342 } 1343 1344 return RISCV_EXCP_NONE; 1345 } 1346 1347 static int write_mhpmcounter(CPURISCVState *env, int csrno, target_ulong val) 1348 { 1349 int ctr_idx = csrno - CSR_MCYCLE; 1350 1351 return riscv_pmu_write_ctr(env, val, ctr_idx); 1352 } 1353 1354 static int write_mhpmcounterh(CPURISCVState *env, int csrno, target_ulong val) 1355 { 1356 int ctr_idx = csrno - CSR_MCYCLEH; 1357 1358 return riscv_pmu_write_ctrh(env, val, ctr_idx); 1359 } 1360 1361 RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val, 1362 bool upper_half, uint32_t ctr_idx) 1363 { 1364 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx]; 1365 target_ulong ctr_prev = upper_half ? counter->mhpmcounterh_prev : 1366 counter->mhpmcounter_prev; 1367 target_ulong ctr_val = upper_half ? counter->mhpmcounterh_val : 1368 counter->mhpmcounter_val; 1369 1370 if (get_field(env->mcountinhibit, BIT(ctr_idx))) { 1371 /* 1372 * Counter should not increment if inhibit bit is set. Just return the 1373 * current counter value. 1374 */ 1375 *val = ctr_val; 1376 return RISCV_EXCP_NONE; 1377 } 1378 1379 /* 1380 * The kernel computes the perf delta by subtracting the current value from 1381 * the value it initialized previously (ctr_val). 1382 */ 1383 if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || 1384 riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) { 1385 *val = riscv_pmu_ctr_get_fixed_counters_val(env, ctr_idx, upper_half) - 1386 ctr_prev + ctr_val; 1387 } else { 1388 *val = ctr_val; 1389 } 1390 1391 return RISCV_EXCP_NONE; 1392 } 1393 1394 static RISCVException read_hpmcounter(CPURISCVState *env, int csrno, 1395 target_ulong *val) 1396 { 1397 uint16_t ctr_index; 1398 1399 if (csrno >= CSR_MCYCLE && csrno <= CSR_MHPMCOUNTER31) { 1400 ctr_index = csrno - CSR_MCYCLE; 1401 } else if (csrno >= CSR_CYCLE && csrno <= CSR_HPMCOUNTER31) { 1402 ctr_index = csrno - CSR_CYCLE; 1403 } else { 1404 return RISCV_EXCP_ILLEGAL_INST; 1405 } 1406 1407 return riscv_pmu_read_ctr(env, val, false, ctr_index); 1408 } 1409 1410 static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno, 1411 target_ulong *val) 1412 { 1413 uint16_t ctr_index; 1414 1415 if (csrno >= CSR_MCYCLEH && csrno <= CSR_MHPMCOUNTER31H) { 1416 ctr_index = csrno - CSR_MCYCLEH; 1417 } else if (csrno >= CSR_CYCLEH && csrno <= CSR_HPMCOUNTER31H) { 1418 ctr_index = csrno - CSR_CYCLEH; 1419 } else { 1420 return RISCV_EXCP_ILLEGAL_INST; 1421 } 1422 1423 return riscv_pmu_read_ctr(env, val, true, ctr_index); 1424 } 1425 1426 static int rmw_cd_mhpmcounter(CPURISCVState *env, int ctr_idx, 1427 target_ulong *val, target_ulong new_val, 1428 target_ulong wr_mask) 1429 { 1430 if (wr_mask != 0 && wr_mask != -1) { 1431 return -EINVAL; 1432 } 1433 1434 if (!wr_mask && val) { 1435 riscv_pmu_read_ctr(env, val, false, ctr_idx); 1436 } else if (wr_mask) { 1437 riscv_pmu_write_ctr(env, new_val, ctr_idx); 1438 } else { 1439 return -EINVAL; 1440 } 1441 1442 return 0; 1443 } 1444 1445 static int rmw_cd_mhpmcounterh(CPURISCVState *env, int ctr_idx, 1446 target_ulong *val, target_ulong new_val, 1447 target_ulong wr_mask) 1448 { 1449 if (wr_mask != 0 && wr_mask != -1) { 1450 return -EINVAL; 1451 } 1452 1453 if (!wr_mask && val) { 1454 riscv_pmu_read_ctr(env, val, true, ctr_idx); 1455 } else if (wr_mask) { 1456 riscv_pmu_write_ctrh(env, new_val, ctr_idx); 1457 } else { 1458 return -EINVAL; 1459 } 1460 1461 return 0; 1462 } 1463 1464 static int rmw_cd_mhpmevent(CPURISCVState *env, int evt_index, 1465 target_ulong *val, target_ulong new_val, 1466 target_ulong wr_mask) 1467 { 1468 uint64_t mhpmevt_val = new_val; 1469 1470 if (wr_mask != 0 && wr_mask != -1) { 1471 return -EINVAL; 1472 } 1473 1474 if (!wr_mask && val) { 1475 *val = env->mhpmevent_val[evt_index]; 1476 if (riscv_cpu_cfg(env)->ext_sscofpmf) { 1477 *val &= ~MHPMEVENT_BIT_MINH; 1478 } 1479 } else if (wr_mask) { 1480 wr_mask &= ~MHPMEVENT_BIT_MINH; 1481 mhpmevt_val = (new_val & wr_mask) | 1482 (env->mhpmevent_val[evt_index] & ~wr_mask); 1483 if (riscv_cpu_mxl(env) == MXL_RV32) { 1484 mhpmevt_val = mhpmevt_val | 1485 ((uint64_t)env->mhpmeventh_val[evt_index] << 32); 1486 } 1487 env->mhpmevent_val[evt_index] = mhpmevt_val; 1488 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index); 1489 } else { 1490 return -EINVAL; 1491 } 1492 1493 return 0; 1494 } 1495 1496 static int rmw_cd_mhpmeventh(CPURISCVState *env, int evt_index, 1497 target_ulong *val, target_ulong new_val, 1498 target_ulong wr_mask) 1499 { 1500 uint64_t mhpmevth_val; 1501 uint64_t mhpmevt_val = env->mhpmevent_val[evt_index]; 1502 1503 if (wr_mask != 0 && wr_mask != -1) { 1504 return -EINVAL; 1505 } 1506 1507 if (!wr_mask && val) { 1508 *val = env->mhpmeventh_val[evt_index]; 1509 if (riscv_cpu_cfg(env)->ext_sscofpmf) { 1510 *val &= ~MHPMEVENTH_BIT_MINH; 1511 } 1512 } else if (wr_mask) { 1513 wr_mask &= ~MHPMEVENTH_BIT_MINH; 1514 env->mhpmeventh_val[evt_index] = 1515 (new_val & wr_mask) | (env->mhpmeventh_val[evt_index] & ~wr_mask); 1516 mhpmevth_val = env->mhpmeventh_val[evt_index]; 1517 mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32); 1518 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index); 1519 } else { 1520 return -EINVAL; 1521 } 1522 1523 return 0; 1524 } 1525 1526 static int rmw_cd_ctr_cfg(CPURISCVState *env, int cfg_index, target_ulong *val, 1527 target_ulong new_val, target_ulong wr_mask) 1528 { 1529 switch (cfg_index) { 1530 case 0: /* CYCLECFG */ 1531 if (wr_mask) { 1532 wr_mask &= ~MCYCLECFG_BIT_MINH; 1533 env->mcyclecfg = (new_val & wr_mask) | (env->mcyclecfg & ~wr_mask); 1534 } else { 1535 *val = env->mcyclecfg &= ~MHPMEVENTH_BIT_MINH; 1536 } 1537 break; 1538 case 2: /* INSTRETCFG */ 1539 if (wr_mask) { 1540 wr_mask &= ~MINSTRETCFG_BIT_MINH; 1541 env->minstretcfg = (new_val & wr_mask) | 1542 (env->minstretcfg & ~wr_mask); 1543 } else { 1544 *val = env->minstretcfg &= ~MHPMEVENTH_BIT_MINH; 1545 } 1546 break; 1547 default: 1548 return -EINVAL; 1549 } 1550 return 0; 1551 } 1552 1553 static int rmw_cd_ctr_cfgh(CPURISCVState *env, int cfg_index, target_ulong *val, 1554 target_ulong new_val, target_ulong wr_mask) 1555 { 1556 1557 if (riscv_cpu_mxl(env) != MXL_RV32) { 1558 return RISCV_EXCP_ILLEGAL_INST; 1559 } 1560 1561 switch (cfg_index) { 1562 case 0: /* CYCLECFGH */ 1563 if (wr_mask) { 1564 wr_mask &= ~MCYCLECFGH_BIT_MINH; 1565 env->mcyclecfgh = (new_val & wr_mask) | 1566 (env->mcyclecfgh & ~wr_mask); 1567 } else { 1568 *val = env->mcyclecfgh; 1569 } 1570 break; 1571 case 2: /* INSTRETCFGH */ 1572 if (wr_mask) { 1573 wr_mask &= ~MINSTRETCFGH_BIT_MINH; 1574 env->minstretcfgh = (new_val & wr_mask) | 1575 (env->minstretcfgh & ~wr_mask); 1576 } else { 1577 *val = env->minstretcfgh; 1578 } 1579 break; 1580 default: 1581 return -EINVAL; 1582 } 1583 return 0; 1584 } 1585 1586 1587 static RISCVException read_scountovf(CPURISCVState *env, int csrno, 1588 target_ulong *val) 1589 { 1590 int mhpmevt_start = CSR_MHPMEVENT3 - CSR_MCOUNTINHIBIT; 1591 int i; 1592 *val = 0; 1593 target_ulong *mhpm_evt_val; 1594 uint64_t of_bit_mask; 1595 1596 /* Virtualize scountovf for counter delegation */ 1597 if (riscv_cpu_cfg(env)->ext_sscofpmf && 1598 riscv_cpu_cfg(env)->ext_ssccfg && 1599 get_field(env->menvcfg, MENVCFG_CDE) && 1600 env->virt_enabled) { 1601 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 1602 } 1603 1604 if (riscv_cpu_mxl(env) == MXL_RV32) { 1605 mhpm_evt_val = env->mhpmeventh_val; 1606 of_bit_mask = MHPMEVENTH_BIT_OF; 1607 } else { 1608 mhpm_evt_val = env->mhpmevent_val; 1609 of_bit_mask = MHPMEVENT_BIT_OF; 1610 } 1611 1612 for (i = mhpmevt_start; i < RV_MAX_MHPMEVENTS; i++) { 1613 if ((get_field(env->mcounteren, BIT(i))) && 1614 (mhpm_evt_val[i] & of_bit_mask)) { 1615 *val |= BIT(i); 1616 } 1617 } 1618 1619 return RISCV_EXCP_NONE; 1620 } 1621 1622 static RISCVException read_time(CPURISCVState *env, int csrno, 1623 target_ulong *val) 1624 { 1625 uint64_t delta = env->virt_enabled ? env->htimedelta : 0; 1626 1627 if (!env->rdtime_fn) { 1628 return RISCV_EXCP_ILLEGAL_INST; 1629 } 1630 1631 *val = env->rdtime_fn(env->rdtime_fn_arg) + delta; 1632 return RISCV_EXCP_NONE; 1633 } 1634 1635 static RISCVException read_timeh(CPURISCVState *env, int csrno, 1636 target_ulong *val) 1637 { 1638 uint64_t delta = env->virt_enabled ? env->htimedelta : 0; 1639 1640 if (!env->rdtime_fn) { 1641 return RISCV_EXCP_ILLEGAL_INST; 1642 } 1643 1644 *val = (env->rdtime_fn(env->rdtime_fn_arg) + delta) >> 32; 1645 return RISCV_EXCP_NONE; 1646 } 1647 1648 static RISCVException read_vstimecmp(CPURISCVState *env, int csrno, 1649 target_ulong *val) 1650 { 1651 *val = env->vstimecmp; 1652 1653 return RISCV_EXCP_NONE; 1654 } 1655 1656 static RISCVException read_vstimecmph(CPURISCVState *env, int csrno, 1657 target_ulong *val) 1658 { 1659 *val = env->vstimecmp >> 32; 1660 1661 return RISCV_EXCP_NONE; 1662 } 1663 1664 static RISCVException write_vstimecmp(CPURISCVState *env, int csrno, 1665 target_ulong val) 1666 { 1667 if (riscv_cpu_mxl(env) == MXL_RV32) { 1668 env->vstimecmp = deposit64(env->vstimecmp, 0, 32, (uint64_t)val); 1669 } else { 1670 env->vstimecmp = val; 1671 } 1672 1673 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp, 1674 env->htimedelta, MIP_VSTIP); 1675 1676 return RISCV_EXCP_NONE; 1677 } 1678 1679 static RISCVException write_vstimecmph(CPURISCVState *env, int csrno, 1680 target_ulong val) 1681 { 1682 env->vstimecmp = deposit64(env->vstimecmp, 32, 32, (uint64_t)val); 1683 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp, 1684 env->htimedelta, MIP_VSTIP); 1685 1686 return RISCV_EXCP_NONE; 1687 } 1688 1689 static RISCVException read_stimecmp(CPURISCVState *env, int csrno, 1690 target_ulong *val) 1691 { 1692 if (env->virt_enabled) { 1693 *val = env->vstimecmp; 1694 } else { 1695 *val = env->stimecmp; 1696 } 1697 1698 return RISCV_EXCP_NONE; 1699 } 1700 1701 static RISCVException read_stimecmph(CPURISCVState *env, int csrno, 1702 target_ulong *val) 1703 { 1704 if (env->virt_enabled) { 1705 *val = env->vstimecmp >> 32; 1706 } else { 1707 *val = env->stimecmp >> 32; 1708 } 1709 1710 return RISCV_EXCP_NONE; 1711 } 1712 1713 static RISCVException write_stimecmp(CPURISCVState *env, int csrno, 1714 target_ulong val) 1715 { 1716 if (env->virt_enabled) { 1717 if (env->hvictl & HVICTL_VTI) { 1718 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 1719 } 1720 return write_vstimecmp(env, csrno, val); 1721 } 1722 1723 if (riscv_cpu_mxl(env) == MXL_RV32) { 1724 env->stimecmp = deposit64(env->stimecmp, 0, 32, (uint64_t)val); 1725 } else { 1726 env->stimecmp = val; 1727 } 1728 1729 riscv_timer_write_timecmp(env, env->stimer, env->stimecmp, 0, MIP_STIP); 1730 1731 return RISCV_EXCP_NONE; 1732 } 1733 1734 static RISCVException write_stimecmph(CPURISCVState *env, int csrno, 1735 target_ulong val) 1736 { 1737 if (env->virt_enabled) { 1738 if (env->hvictl & HVICTL_VTI) { 1739 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 1740 } 1741 return write_vstimecmph(env, csrno, val); 1742 } 1743 1744 env->stimecmp = deposit64(env->stimecmp, 32, 32, (uint64_t)val); 1745 riscv_timer_write_timecmp(env, env->stimer, env->stimecmp, 0, MIP_STIP); 1746 1747 return RISCV_EXCP_NONE; 1748 } 1749 1750 #define VSTOPI_NUM_SRCS 5 1751 1752 /* 1753 * All core local interrupts except the fixed ones 0:12. This macro is for 1754 * virtual interrupts logic so please don't change this to avoid messing up 1755 * the whole support, For reference see AIA spec: `5.3 Interrupt filtering and 1756 * virtual interrupts for supervisor level` and `6.3.2 Virtual interrupts for 1757 * VS level`. 1758 */ 1759 #define LOCAL_INTERRUPTS (~0x1FFFULL) 1760 1761 static const uint64_t delegable_ints = 1762 S_MODE_INTERRUPTS | VS_MODE_INTERRUPTS | MIP_LCOFIP; 1763 static const uint64_t vs_delegable_ints = 1764 (VS_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & ~MIP_LCOFIP; 1765 static const uint64_t all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS | 1766 HS_MODE_INTERRUPTS | LOCAL_INTERRUPTS; 1767 #define DELEGABLE_EXCPS ((1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | \ 1768 (1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | \ 1769 (1ULL << (RISCV_EXCP_ILLEGAL_INST)) | \ 1770 (1ULL << (RISCV_EXCP_BREAKPOINT)) | \ 1771 (1ULL << (RISCV_EXCP_LOAD_ADDR_MIS)) | \ 1772 (1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT)) | \ 1773 (1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS)) | \ 1774 (1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT)) | \ 1775 (1ULL << (RISCV_EXCP_U_ECALL)) | \ 1776 (1ULL << (RISCV_EXCP_S_ECALL)) | \ 1777 (1ULL << (RISCV_EXCP_VS_ECALL)) | \ 1778 (1ULL << (RISCV_EXCP_M_ECALL)) | \ 1779 (1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | \ 1780 (1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | \ 1781 (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | \ 1782 (1ULL << (RISCV_EXCP_SW_CHECK)) | \ 1783 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | \ 1784 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | \ 1785 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | \ 1786 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT))) 1787 static const target_ulong vs_delegable_excps = DELEGABLE_EXCPS & 1788 ~((1ULL << (RISCV_EXCP_S_ECALL)) | 1789 (1ULL << (RISCV_EXCP_VS_ECALL)) | 1790 (1ULL << (RISCV_EXCP_M_ECALL)) | 1791 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | 1792 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | 1793 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | 1794 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT))); 1795 static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE | 1796 SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS | 1797 SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS; 1798 1799 /* 1800 * Spec allows for bits 13:63 to be either read-only or writable. 1801 * So far we have interrupt LCOFIP in that region which is writable. 1802 * 1803 * Also, spec allows to inject virtual interrupts in this region even 1804 * without any hardware interrupts for that interrupt number. 1805 * 1806 * For now interrupt in 13:63 region are all kept writable. 13 being 1807 * LCOFIP and 14:63 being virtual only. Change this in future if we 1808 * introduce more interrupts that are not writable. 1809 */ 1810 1811 /* Bit STIP can be an alias of mip.STIP that's why it's writable in mvip. */ 1812 static const uint64_t mvip_writable_mask = MIP_SSIP | MIP_STIP | MIP_SEIP | 1813 LOCAL_INTERRUPTS; 1814 static const uint64_t mvien_writable_mask = MIP_SSIP | MIP_SEIP | 1815 LOCAL_INTERRUPTS; 1816 1817 static const uint64_t sip_writable_mask = SIP_SSIP | LOCAL_INTERRUPTS; 1818 static const uint64_t hip_writable_mask = MIP_VSSIP; 1819 static const uint64_t hvip_writable_mask = MIP_VSSIP | MIP_VSTIP | 1820 MIP_VSEIP | LOCAL_INTERRUPTS; 1821 static const uint64_t hvien_writable_mask = LOCAL_INTERRUPTS; 1822 1823 static const uint64_t vsip_writable_mask = MIP_VSSIP | LOCAL_INTERRUPTS; 1824 1825 const bool valid_vm_1_10_32[16] = { 1826 [VM_1_10_MBARE] = true, 1827 [VM_1_10_SV32] = true 1828 }; 1829 1830 const bool valid_vm_1_10_64[16] = { 1831 [VM_1_10_MBARE] = true, 1832 [VM_1_10_SV39] = true, 1833 [VM_1_10_SV48] = true, 1834 [VM_1_10_SV57] = true 1835 }; 1836 1837 /* Machine Information Registers */ 1838 static RISCVException read_zero(CPURISCVState *env, int csrno, 1839 target_ulong *val) 1840 { 1841 *val = 0; 1842 return RISCV_EXCP_NONE; 1843 } 1844 1845 static RISCVException write_ignore(CPURISCVState *env, int csrno, 1846 target_ulong val) 1847 { 1848 return RISCV_EXCP_NONE; 1849 } 1850 1851 static RISCVException read_mvendorid(CPURISCVState *env, int csrno, 1852 target_ulong *val) 1853 { 1854 *val = riscv_cpu_cfg(env)->mvendorid; 1855 return RISCV_EXCP_NONE; 1856 } 1857 1858 static RISCVException read_marchid(CPURISCVState *env, int csrno, 1859 target_ulong *val) 1860 { 1861 *val = riscv_cpu_cfg(env)->marchid; 1862 return RISCV_EXCP_NONE; 1863 } 1864 1865 static RISCVException read_mimpid(CPURISCVState *env, int csrno, 1866 target_ulong *val) 1867 { 1868 *val = riscv_cpu_cfg(env)->mimpid; 1869 return RISCV_EXCP_NONE; 1870 } 1871 1872 static RISCVException read_mhartid(CPURISCVState *env, int csrno, 1873 target_ulong *val) 1874 { 1875 *val = env->mhartid; 1876 return RISCV_EXCP_NONE; 1877 } 1878 1879 /* Machine Trap Setup */ 1880 1881 /* We do not store SD explicitly, only compute it on demand. */ 1882 static uint64_t add_status_sd(RISCVMXL xl, uint64_t status) 1883 { 1884 if ((status & MSTATUS_FS) == MSTATUS_FS || 1885 (status & MSTATUS_VS) == MSTATUS_VS || 1886 (status & MSTATUS_XS) == MSTATUS_XS) { 1887 switch (xl) { 1888 case MXL_RV32: 1889 return status | MSTATUS32_SD; 1890 case MXL_RV64: 1891 return status | MSTATUS64_SD; 1892 case MXL_RV128: 1893 return MSTATUSH128_SD; 1894 default: 1895 g_assert_not_reached(); 1896 } 1897 } 1898 return status; 1899 } 1900 1901 static RISCVException read_mstatus(CPURISCVState *env, int csrno, 1902 target_ulong *val) 1903 { 1904 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus); 1905 return RISCV_EXCP_NONE; 1906 } 1907 1908 static bool validate_vm(CPURISCVState *env, target_ulong vm) 1909 { 1910 uint64_t mode_supported = riscv_cpu_cfg(env)->satp_mode.map; 1911 return get_field(mode_supported, (1 << vm)); 1912 } 1913 1914 static target_ulong legalize_xatp(CPURISCVState *env, target_ulong old_xatp, 1915 target_ulong val) 1916 { 1917 target_ulong mask; 1918 bool vm; 1919 if (riscv_cpu_mxl(env) == MXL_RV32) { 1920 vm = validate_vm(env, get_field(val, SATP32_MODE)); 1921 mask = (val ^ old_xatp) & (SATP32_MODE | SATP32_ASID | SATP32_PPN); 1922 } else { 1923 vm = validate_vm(env, get_field(val, SATP64_MODE)); 1924 mask = (val ^ old_xatp) & (SATP64_MODE | SATP64_ASID | SATP64_PPN); 1925 } 1926 1927 if (vm && mask) { 1928 /* 1929 * The ISA defines SATP.MODE=Bare as "no translation", but we still 1930 * pass these through QEMU's TLB emulation as it improves 1931 * performance. Flushing the TLB on SATP writes with paging 1932 * enabled avoids leaking those invalid cached mappings. 1933 */ 1934 tlb_flush(env_cpu(env)); 1935 return val; 1936 } 1937 return old_xatp; 1938 } 1939 1940 static target_ulong legalize_mpp(CPURISCVState *env, target_ulong old_mpp, 1941 target_ulong val) 1942 { 1943 bool valid = false; 1944 target_ulong new_mpp = get_field(val, MSTATUS_MPP); 1945 1946 switch (new_mpp) { 1947 case PRV_M: 1948 valid = true; 1949 break; 1950 case PRV_S: 1951 valid = riscv_has_ext(env, RVS); 1952 break; 1953 case PRV_U: 1954 valid = riscv_has_ext(env, RVU); 1955 break; 1956 } 1957 1958 /* Remain field unchanged if new_mpp value is invalid */ 1959 if (!valid) { 1960 val = set_field(val, MSTATUS_MPP, old_mpp); 1961 } 1962 1963 return val; 1964 } 1965 1966 static RISCVException write_mstatus(CPURISCVState *env, int csrno, 1967 target_ulong val) 1968 { 1969 uint64_t mstatus = env->mstatus; 1970 uint64_t mask = 0; 1971 RISCVMXL xl = riscv_cpu_mxl(env); 1972 1973 /* 1974 * MPP field have been made WARL since priv version 1.11. However, 1975 * legalization for it will not break any software running on 1.10. 1976 */ 1977 val = legalize_mpp(env, get_field(mstatus, MSTATUS_MPP), val); 1978 1979 /* flush tlb on mstatus fields that affect VM */ 1980 if ((val ^ mstatus) & MSTATUS_MXR) { 1981 tlb_flush(env_cpu(env)); 1982 } 1983 mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE | 1984 MSTATUS_SPP | MSTATUS_MPRV | MSTATUS_SUM | 1985 MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR | 1986 MSTATUS_TW; 1987 1988 if (riscv_has_ext(env, RVF)) { 1989 mask |= MSTATUS_FS; 1990 } 1991 if (riscv_has_ext(env, RVV)) { 1992 mask |= MSTATUS_VS; 1993 } 1994 1995 if (riscv_env_smode_dbltrp_enabled(env, env->virt_enabled)) { 1996 mask |= MSTATUS_SDT; 1997 if ((val & MSTATUS_SDT) != 0) { 1998 val &= ~MSTATUS_SIE; 1999 } 2000 } 2001 2002 if (riscv_cpu_cfg(env)->ext_smdbltrp) { 2003 mask |= MSTATUS_MDT; 2004 if ((val & MSTATUS_MDT) != 0) { 2005 val &= ~MSTATUS_MIE; 2006 } 2007 } 2008 2009 if (xl != MXL_RV32 || env->debugger) { 2010 if (riscv_has_ext(env, RVH)) { 2011 mask |= MSTATUS_MPV | MSTATUS_GVA; 2012 } 2013 if ((val & MSTATUS64_UXL) != 0) { 2014 mask |= MSTATUS64_UXL; 2015 } 2016 } 2017 2018 /* If cfi lp extension is available, then apply cfi lp mask */ 2019 if (env_archcpu(env)->cfg.ext_zicfilp) { 2020 mask |= (MSTATUS_MPELP | MSTATUS_SPELP); 2021 } 2022 2023 mstatus = (mstatus & ~mask) | (val & mask); 2024 2025 env->mstatus = mstatus; 2026 2027 /* 2028 * Except in debug mode, UXL/SXL can only be modified by higher 2029 * privilege mode. So xl will not be changed in normal mode. 2030 */ 2031 if (env->debugger) { 2032 env->xl = cpu_recompute_xl(env); 2033 } 2034 2035 return RISCV_EXCP_NONE; 2036 } 2037 2038 static RISCVException read_mstatush(CPURISCVState *env, int csrno, 2039 target_ulong *val) 2040 { 2041 *val = env->mstatus >> 32; 2042 return RISCV_EXCP_NONE; 2043 } 2044 2045 static RISCVException write_mstatush(CPURISCVState *env, int csrno, 2046 target_ulong val) 2047 { 2048 uint64_t valh = (uint64_t)val << 32; 2049 uint64_t mask = riscv_has_ext(env, RVH) ? MSTATUS_MPV | MSTATUS_GVA : 0; 2050 2051 if (riscv_cpu_cfg(env)->ext_smdbltrp) { 2052 mask |= MSTATUS_MDT; 2053 if ((valh & MSTATUS_MDT) != 0) { 2054 mask |= MSTATUS_MIE; 2055 } 2056 } 2057 env->mstatus = (env->mstatus & ~mask) | (valh & mask); 2058 2059 return RISCV_EXCP_NONE; 2060 } 2061 2062 static RISCVException read_mstatus_i128(CPURISCVState *env, int csrno, 2063 Int128 *val) 2064 { 2065 *val = int128_make128(env->mstatus, add_status_sd(MXL_RV128, 2066 env->mstatus)); 2067 return RISCV_EXCP_NONE; 2068 } 2069 2070 static RISCVException read_misa_i128(CPURISCVState *env, int csrno, 2071 Int128 *val) 2072 { 2073 *val = int128_make128(env->misa_ext, (uint64_t)MXL_RV128 << 62); 2074 return RISCV_EXCP_NONE; 2075 } 2076 2077 static RISCVException read_misa(CPURISCVState *env, int csrno, 2078 target_ulong *val) 2079 { 2080 target_ulong misa; 2081 2082 switch (env->misa_mxl) { 2083 case MXL_RV32: 2084 misa = (target_ulong)MXL_RV32 << 30; 2085 break; 2086 #ifdef TARGET_RISCV64 2087 case MXL_RV64: 2088 misa = (target_ulong)MXL_RV64 << 62; 2089 break; 2090 #endif 2091 default: 2092 g_assert_not_reached(); 2093 } 2094 2095 *val = misa | env->misa_ext; 2096 return RISCV_EXCP_NONE; 2097 } 2098 2099 static RISCVException write_misa(CPURISCVState *env, int csrno, 2100 target_ulong val) 2101 { 2102 RISCVCPU *cpu = env_archcpu(env); 2103 uint32_t orig_misa_ext = env->misa_ext; 2104 Error *local_err = NULL; 2105 2106 if (!riscv_cpu_cfg(env)->misa_w) { 2107 /* drop write to misa */ 2108 return RISCV_EXCP_NONE; 2109 } 2110 2111 /* Mask extensions that are not supported by this hart */ 2112 val &= env->misa_ext_mask; 2113 2114 /* 2115 * Suppress 'C' if next instruction is not aligned 2116 * TODO: this should check next_pc 2117 */ 2118 if ((val & RVC) && (GETPC() & ~3) != 0) { 2119 val &= ~RVC; 2120 } 2121 2122 /* Disable RVG if any of its dependencies are disabled */ 2123 if (!(val & RVI && val & RVM && val & RVA && 2124 val & RVF && val & RVD)) { 2125 val &= ~RVG; 2126 } 2127 2128 /* If nothing changed, do nothing. */ 2129 if (val == env->misa_ext) { 2130 return RISCV_EXCP_NONE; 2131 } 2132 2133 env->misa_ext = val; 2134 riscv_cpu_validate_set_extensions(cpu, &local_err); 2135 if (local_err != NULL) { 2136 /* Rollback on validation error */ 2137 qemu_log_mask(LOG_GUEST_ERROR, "Unable to write MISA ext value " 2138 "0x%x, keeping existing MISA ext 0x%x\n", 2139 env->misa_ext, orig_misa_ext); 2140 2141 env->misa_ext = orig_misa_ext; 2142 2143 return RISCV_EXCP_NONE; 2144 } 2145 2146 if (!(env->misa_ext & RVF)) { 2147 env->mstatus &= ~MSTATUS_FS; 2148 } 2149 2150 /* flush translation cache */ 2151 tb_flush(env_cpu(env)); 2152 env->xl = riscv_cpu_mxl(env); 2153 return RISCV_EXCP_NONE; 2154 } 2155 2156 static RISCVException read_medeleg(CPURISCVState *env, int csrno, 2157 target_ulong *val) 2158 { 2159 *val = env->medeleg; 2160 return RISCV_EXCP_NONE; 2161 } 2162 2163 static RISCVException write_medeleg(CPURISCVState *env, int csrno, 2164 target_ulong val) 2165 { 2166 env->medeleg = (env->medeleg & ~DELEGABLE_EXCPS) | (val & DELEGABLE_EXCPS); 2167 return RISCV_EXCP_NONE; 2168 } 2169 2170 static RISCVException rmw_mideleg64(CPURISCVState *env, int csrno, 2171 uint64_t *ret_val, 2172 uint64_t new_val, uint64_t wr_mask) 2173 { 2174 uint64_t mask = wr_mask & delegable_ints; 2175 2176 if (ret_val) { 2177 *ret_val = env->mideleg; 2178 } 2179 2180 env->mideleg = (env->mideleg & ~mask) | (new_val & mask); 2181 2182 if (riscv_has_ext(env, RVH)) { 2183 env->mideleg |= HS_MODE_INTERRUPTS; 2184 } 2185 2186 return RISCV_EXCP_NONE; 2187 } 2188 2189 static RISCVException rmw_mideleg(CPURISCVState *env, int csrno, 2190 target_ulong *ret_val, 2191 target_ulong new_val, target_ulong wr_mask) 2192 { 2193 uint64_t rval; 2194 RISCVException ret; 2195 2196 ret = rmw_mideleg64(env, csrno, &rval, new_val, wr_mask); 2197 if (ret_val) { 2198 *ret_val = rval; 2199 } 2200 2201 return ret; 2202 } 2203 2204 static RISCVException rmw_midelegh(CPURISCVState *env, int csrno, 2205 target_ulong *ret_val, 2206 target_ulong new_val, 2207 target_ulong wr_mask) 2208 { 2209 uint64_t rval; 2210 RISCVException ret; 2211 2212 ret = rmw_mideleg64(env, csrno, &rval, 2213 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 2214 if (ret_val) { 2215 *ret_val = rval >> 32; 2216 } 2217 2218 return ret; 2219 } 2220 2221 static RISCVException rmw_mie64(CPURISCVState *env, int csrno, 2222 uint64_t *ret_val, 2223 uint64_t new_val, uint64_t wr_mask) 2224 { 2225 uint64_t mask = wr_mask & all_ints; 2226 2227 if (ret_val) { 2228 *ret_val = env->mie; 2229 } 2230 2231 env->mie = (env->mie & ~mask) | (new_val & mask); 2232 2233 if (!riscv_has_ext(env, RVH)) { 2234 env->mie &= ~((uint64_t)HS_MODE_INTERRUPTS); 2235 } 2236 2237 return RISCV_EXCP_NONE; 2238 } 2239 2240 static RISCVException rmw_mie(CPURISCVState *env, int csrno, 2241 target_ulong *ret_val, 2242 target_ulong new_val, target_ulong wr_mask) 2243 { 2244 uint64_t rval; 2245 RISCVException ret; 2246 2247 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask); 2248 if (ret_val) { 2249 *ret_val = rval; 2250 } 2251 2252 return ret; 2253 } 2254 2255 static RISCVException rmw_mieh(CPURISCVState *env, int csrno, 2256 target_ulong *ret_val, 2257 target_ulong new_val, target_ulong wr_mask) 2258 { 2259 uint64_t rval; 2260 RISCVException ret; 2261 2262 ret = rmw_mie64(env, csrno, &rval, 2263 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 2264 if (ret_val) { 2265 *ret_val = rval >> 32; 2266 } 2267 2268 return ret; 2269 } 2270 2271 static RISCVException rmw_mvien64(CPURISCVState *env, int csrno, 2272 uint64_t *ret_val, 2273 uint64_t new_val, uint64_t wr_mask) 2274 { 2275 uint64_t mask = wr_mask & mvien_writable_mask; 2276 2277 if (ret_val) { 2278 *ret_val = env->mvien; 2279 } 2280 2281 env->mvien = (env->mvien & ~mask) | (new_val & mask); 2282 2283 return RISCV_EXCP_NONE; 2284 } 2285 2286 static RISCVException rmw_mvien(CPURISCVState *env, int csrno, 2287 target_ulong *ret_val, 2288 target_ulong new_val, target_ulong wr_mask) 2289 { 2290 uint64_t rval; 2291 RISCVException ret; 2292 2293 ret = rmw_mvien64(env, csrno, &rval, new_val, wr_mask); 2294 if (ret_val) { 2295 *ret_val = rval; 2296 } 2297 2298 return ret; 2299 } 2300 2301 static RISCVException rmw_mvienh(CPURISCVState *env, int csrno, 2302 target_ulong *ret_val, 2303 target_ulong new_val, target_ulong wr_mask) 2304 { 2305 uint64_t rval; 2306 RISCVException ret; 2307 2308 ret = rmw_mvien64(env, csrno, &rval, 2309 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 2310 if (ret_val) { 2311 *ret_val = rval >> 32; 2312 } 2313 2314 return ret; 2315 } 2316 2317 static RISCVException read_mtopi(CPURISCVState *env, int csrno, 2318 target_ulong *val) 2319 { 2320 int irq; 2321 uint8_t iprio; 2322 2323 irq = riscv_cpu_mirq_pending(env); 2324 if (irq <= 0 || irq > 63) { 2325 *val = 0; 2326 } else { 2327 iprio = env->miprio[irq]; 2328 if (!iprio) { 2329 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_M) { 2330 iprio = IPRIO_MMAXIPRIO; 2331 } 2332 } 2333 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT; 2334 *val |= iprio; 2335 } 2336 2337 return RISCV_EXCP_NONE; 2338 } 2339 2340 static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno) 2341 { 2342 if (!env->virt_enabled) { 2343 return csrno; 2344 } 2345 2346 switch (csrno) { 2347 case CSR_SISELECT: 2348 return CSR_VSISELECT; 2349 case CSR_SIREG: 2350 return CSR_VSIREG; 2351 case CSR_STOPEI: 2352 return CSR_VSTOPEI; 2353 default: 2354 return csrno; 2355 }; 2356 } 2357 2358 static int csrind_xlate_vs_csrno(CPURISCVState *env, int csrno) 2359 { 2360 if (!env->virt_enabled) { 2361 return csrno; 2362 } 2363 2364 switch (csrno) { 2365 case CSR_SISELECT: 2366 return CSR_VSISELECT; 2367 case CSR_SIREG: 2368 case CSR_SIREG2: 2369 case CSR_SIREG3: 2370 case CSR_SIREG4: 2371 case CSR_SIREG5: 2372 case CSR_SIREG6: 2373 return CSR_VSIREG + (csrno - CSR_SIREG); 2374 default: 2375 return csrno; 2376 }; 2377 } 2378 2379 static RISCVException rmw_xiselect(CPURISCVState *env, int csrno, 2380 target_ulong *val, target_ulong new_val, 2381 target_ulong wr_mask) 2382 { 2383 target_ulong *iselect; 2384 int ret; 2385 2386 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT); 2387 if (ret != RISCV_EXCP_NONE) { 2388 return ret; 2389 } 2390 2391 /* Translate CSR number for VS-mode */ 2392 csrno = csrind_xlate_vs_csrno(env, csrno); 2393 2394 /* Find the iselect CSR based on CSR number */ 2395 switch (csrno) { 2396 case CSR_MISELECT: 2397 iselect = &env->miselect; 2398 break; 2399 case CSR_SISELECT: 2400 iselect = &env->siselect; 2401 break; 2402 case CSR_VSISELECT: 2403 iselect = &env->vsiselect; 2404 break; 2405 default: 2406 return RISCV_EXCP_ILLEGAL_INST; 2407 }; 2408 2409 if (val) { 2410 *val = *iselect; 2411 } 2412 2413 if (riscv_cpu_cfg(env)->ext_smcsrind || riscv_cpu_cfg(env)->ext_sscsrind) { 2414 wr_mask &= ISELECT_MASK_SXCSRIND; 2415 } else { 2416 wr_mask &= ISELECT_MASK_AIA; 2417 } 2418 2419 if (wr_mask) { 2420 *iselect = (*iselect & ~wr_mask) | (new_val & wr_mask); 2421 } 2422 2423 return RISCV_EXCP_NONE; 2424 } 2425 2426 static bool xiselect_aia_range(target_ulong isel) 2427 { 2428 return (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) || 2429 (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST); 2430 } 2431 2432 static bool xiselect_cd_range(target_ulong isel) 2433 { 2434 return (ISELECT_CD_FIRST <= isel && isel <= ISELECT_CD_LAST); 2435 } 2436 2437 static bool xiselect_ctr_range(int csrno, target_ulong isel) 2438 { 2439 /* MIREG-MIREG6 for the range 0x200-0x2ff are not used by CTR. */ 2440 return CTR_ENTRIES_FIRST <= isel && isel <= CTR_ENTRIES_LAST && 2441 csrno < CSR_MIREG; 2442 } 2443 2444 static int rmw_iprio(target_ulong xlen, 2445 target_ulong iselect, uint8_t *iprio, 2446 target_ulong *val, target_ulong new_val, 2447 target_ulong wr_mask, int ext_irq_no) 2448 { 2449 int i, firq, nirqs; 2450 target_ulong old_val; 2451 2452 if (iselect < ISELECT_IPRIO0 || ISELECT_IPRIO15 < iselect) { 2453 return -EINVAL; 2454 } 2455 if (xlen != 32 && iselect & 0x1) { 2456 return -EINVAL; 2457 } 2458 2459 nirqs = 4 * (xlen / 32); 2460 firq = ((iselect - ISELECT_IPRIO0) / (xlen / 32)) * (nirqs); 2461 2462 old_val = 0; 2463 for (i = 0; i < nirqs; i++) { 2464 old_val |= ((target_ulong)iprio[firq + i]) << (IPRIO_IRQ_BITS * i); 2465 } 2466 2467 if (val) { 2468 *val = old_val; 2469 } 2470 2471 if (wr_mask) { 2472 new_val = (old_val & ~wr_mask) | (new_val & wr_mask); 2473 for (i = 0; i < nirqs; i++) { 2474 /* 2475 * M-level and S-level external IRQ priority always read-only 2476 * zero. This means default priority order is always preferred 2477 * for M-level and S-level external IRQs. 2478 */ 2479 if ((firq + i) == ext_irq_no) { 2480 continue; 2481 } 2482 iprio[firq + i] = (new_val >> (IPRIO_IRQ_BITS * i)) & 0xff; 2483 } 2484 } 2485 2486 return 0; 2487 } 2488 2489 static int rmw_ctrsource(CPURISCVState *env, int isel, target_ulong *val, 2490 target_ulong new_val, target_ulong wr_mask) 2491 { 2492 /* 2493 * CTR arrays are treated as circular buffers and TOS always points to next 2494 * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry 2495 * 0 is always the latest one, traversal is a bit different here. See the 2496 * below example. 2497 * 2498 * Depth = 16. 2499 * 2500 * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F] 2501 * TOS H 2502 * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7 2503 */ 2504 const uint64_t entry = isel - CTR_ENTRIES_FIRST; 2505 const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK); 2506 uint64_t idx; 2507 2508 /* Entry greater than depth-1 is read-only zero */ 2509 if (entry >= depth) { 2510 if (val) { 2511 *val = 0; 2512 } 2513 return 0; 2514 } 2515 2516 idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK); 2517 idx = (idx - entry - 1) & (depth - 1); 2518 2519 if (val) { 2520 *val = env->ctr_src[idx]; 2521 } 2522 2523 env->ctr_src[idx] = (env->ctr_src[idx] & ~wr_mask) | (new_val & wr_mask); 2524 2525 return 0; 2526 } 2527 2528 static int rmw_ctrtarget(CPURISCVState *env, int isel, target_ulong *val, 2529 target_ulong new_val, target_ulong wr_mask) 2530 { 2531 /* 2532 * CTR arrays are treated as circular buffers and TOS always points to next 2533 * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry 2534 * 0 is always the latest one, traversal is a bit different here. See the 2535 * below example. 2536 * 2537 * Depth = 16. 2538 * 2539 * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F] 2540 * head H 2541 * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7 2542 */ 2543 const uint64_t entry = isel - CTR_ENTRIES_FIRST; 2544 const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK); 2545 uint64_t idx; 2546 2547 /* Entry greater than depth-1 is read-only zero */ 2548 if (entry >= depth) { 2549 if (val) { 2550 *val = 0; 2551 } 2552 return 0; 2553 } 2554 2555 idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK); 2556 idx = (idx - entry - 1) & (depth - 1); 2557 2558 if (val) { 2559 *val = env->ctr_dst[idx]; 2560 } 2561 2562 env->ctr_dst[idx] = (env->ctr_dst[idx] & ~wr_mask) | (new_val & wr_mask); 2563 2564 return 0; 2565 } 2566 2567 static int rmw_ctrdata(CPURISCVState *env, int isel, target_ulong *val, 2568 target_ulong new_val, target_ulong wr_mask) 2569 { 2570 /* 2571 * CTR arrays are treated as circular buffers and TOS always points to next 2572 * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry 2573 * 0 is always the latest one, traversal is a bit different here. See the 2574 * below example. 2575 * 2576 * Depth = 16. 2577 * 2578 * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F] 2579 * head H 2580 * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7 2581 */ 2582 const uint64_t entry = isel - CTR_ENTRIES_FIRST; 2583 const uint64_t mask = wr_mask & CTRDATA_MASK; 2584 const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK); 2585 uint64_t idx; 2586 2587 /* Entry greater than depth-1 is read-only zero */ 2588 if (entry >= depth) { 2589 if (val) { 2590 *val = 0; 2591 } 2592 return 0; 2593 } 2594 2595 idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK); 2596 idx = (idx - entry - 1) & (depth - 1); 2597 2598 if (val) { 2599 *val = env->ctr_data[idx]; 2600 } 2601 2602 env->ctr_data[idx] = (env->ctr_data[idx] & ~mask) | (new_val & mask); 2603 2604 return 0; 2605 } 2606 2607 static RISCVException rmw_xireg_aia(CPURISCVState *env, int csrno, 2608 target_ulong isel, target_ulong *val, 2609 target_ulong new_val, target_ulong wr_mask) 2610 { 2611 bool virt = false, isel_reserved = false; 2612 int ret = -EINVAL; 2613 uint8_t *iprio; 2614 target_ulong priv, vgein; 2615 2616 /* VS-mode CSR number passed in has already been translated */ 2617 switch (csrno) { 2618 case CSR_MIREG: 2619 if (!riscv_cpu_cfg(env)->ext_smaia) { 2620 goto done; 2621 } 2622 iprio = env->miprio; 2623 priv = PRV_M; 2624 break; 2625 case CSR_SIREG: 2626 if (!riscv_cpu_cfg(env)->ext_ssaia || 2627 (env->priv == PRV_S && env->mvien & MIP_SEIP && 2628 env->siselect >= ISELECT_IMSIC_EIDELIVERY && 2629 env->siselect <= ISELECT_IMSIC_EIE63)) { 2630 goto done; 2631 } 2632 iprio = env->siprio; 2633 priv = PRV_S; 2634 break; 2635 case CSR_VSIREG: 2636 if (!riscv_cpu_cfg(env)->ext_ssaia) { 2637 goto done; 2638 } 2639 iprio = env->hviprio; 2640 priv = PRV_S; 2641 virt = true; 2642 break; 2643 default: 2644 goto done; 2645 }; 2646 2647 /* Find the selected guest interrupt file */ 2648 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0; 2649 2650 if (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) { 2651 /* Local interrupt priority registers not available for VS-mode */ 2652 if (!virt) { 2653 ret = rmw_iprio(riscv_cpu_mxl_bits(env), 2654 isel, iprio, val, new_val, wr_mask, 2655 (priv == PRV_M) ? IRQ_M_EXT : IRQ_S_EXT); 2656 } 2657 } else if (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST) { 2658 /* IMSIC registers only available when machine implements it. */ 2659 if (env->aia_ireg_rmw_fn[priv]) { 2660 /* Selected guest interrupt file should not be zero */ 2661 if (virt && (!vgein || env->geilen < vgein)) { 2662 goto done; 2663 } 2664 /* Call machine specific IMSIC register emulation */ 2665 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv], 2666 AIA_MAKE_IREG(isel, priv, virt, vgein, 2667 riscv_cpu_mxl_bits(env)), 2668 val, new_val, wr_mask); 2669 } 2670 } else { 2671 isel_reserved = true; 2672 } 2673 2674 done: 2675 /* 2676 * If AIA is not enabled, illegal instruction exception is always 2677 * returned regardless of whether we are in VS-mode or not 2678 */ 2679 if (ret) { 2680 return (env->virt_enabled && virt && !isel_reserved) ? 2681 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; 2682 } 2683 2684 return RISCV_EXCP_NONE; 2685 } 2686 2687 static int rmw_xireg_cd(CPURISCVState *env, int csrno, 2688 target_ulong isel, target_ulong *val, 2689 target_ulong new_val, target_ulong wr_mask) 2690 { 2691 int ret = -EINVAL; 2692 int ctr_index = isel - ISELECT_CD_FIRST; 2693 int isel_hpm_start = ISELECT_CD_FIRST + 3; 2694 2695 if (!riscv_cpu_cfg(env)->ext_smcdeleg || !riscv_cpu_cfg(env)->ext_ssccfg) { 2696 ret = RISCV_EXCP_ILLEGAL_INST; 2697 goto done; 2698 } 2699 2700 /* Invalid siselect value for reserved */ 2701 if (ctr_index == 1) { 2702 goto done; 2703 } 2704 2705 /* sireg4 and sireg5 provides access RV32 only CSRs */ 2706 if (((csrno == CSR_SIREG5) || (csrno == CSR_SIREG4)) && 2707 (riscv_cpu_mxl(env) != MXL_RV32)) { 2708 ret = RISCV_EXCP_ILLEGAL_INST; 2709 goto done; 2710 } 2711 2712 /* Check Sscofpmf dependancy */ 2713 if (!riscv_cpu_cfg(env)->ext_sscofpmf && csrno == CSR_SIREG5 && 2714 (isel_hpm_start <= isel && isel <= ISELECT_CD_LAST)) { 2715 goto done; 2716 } 2717 2718 /* Check smcntrpmf dependancy */ 2719 if (!riscv_cpu_cfg(env)->ext_smcntrpmf && 2720 (csrno == CSR_SIREG2 || csrno == CSR_SIREG5) && 2721 (ISELECT_CD_FIRST <= isel && isel < isel_hpm_start)) { 2722 goto done; 2723 } 2724 2725 if (!get_field(env->mcounteren, BIT(ctr_index)) || 2726 !get_field(env->menvcfg, MENVCFG_CDE)) { 2727 goto done; 2728 } 2729 2730 switch (csrno) { 2731 case CSR_SIREG: 2732 ret = rmw_cd_mhpmcounter(env, ctr_index, val, new_val, wr_mask); 2733 break; 2734 case CSR_SIREG4: 2735 ret = rmw_cd_mhpmcounterh(env, ctr_index, val, new_val, wr_mask); 2736 break; 2737 case CSR_SIREG2: 2738 if (ctr_index <= 2) { 2739 ret = rmw_cd_ctr_cfg(env, ctr_index, val, new_val, wr_mask); 2740 } else { 2741 ret = rmw_cd_mhpmevent(env, ctr_index, val, new_val, wr_mask); 2742 } 2743 break; 2744 case CSR_SIREG5: 2745 if (ctr_index <= 2) { 2746 ret = rmw_cd_ctr_cfgh(env, ctr_index, val, new_val, wr_mask); 2747 } else { 2748 ret = rmw_cd_mhpmeventh(env, ctr_index, val, new_val, wr_mask); 2749 } 2750 break; 2751 default: 2752 goto done; 2753 } 2754 2755 done: 2756 return ret; 2757 } 2758 2759 static int rmw_xireg_ctr(CPURISCVState *env, int csrno, 2760 target_ulong isel, target_ulong *val, 2761 target_ulong new_val, target_ulong wr_mask) 2762 { 2763 if (!riscv_cpu_cfg(env)->ext_smctr && !riscv_cpu_cfg(env)->ext_ssctr) { 2764 return -EINVAL; 2765 } 2766 2767 if (csrno == CSR_SIREG || csrno == CSR_VSIREG) { 2768 return rmw_ctrsource(env, isel, val, new_val, wr_mask); 2769 } else if (csrno == CSR_SIREG2 || csrno == CSR_VSIREG2) { 2770 return rmw_ctrtarget(env, isel, val, new_val, wr_mask); 2771 } else if (csrno == CSR_SIREG3 || csrno == CSR_VSIREG3) { 2772 return rmw_ctrdata(env, isel, val, new_val, wr_mask); 2773 } else if (val) { 2774 *val = 0; 2775 } 2776 2777 return 0; 2778 } 2779 2780 /* 2781 * rmw_xireg_csrind: Perform indirect access to xireg and xireg2-xireg6 2782 * 2783 * Perform indirect access to xireg and xireg2-xireg6. 2784 * This is a generic interface for all xireg CSRs. Apart from AIA, all other 2785 * extension using csrind should be implemented here. 2786 */ 2787 static int rmw_xireg_csrind(CPURISCVState *env, int csrno, 2788 target_ulong isel, target_ulong *val, 2789 target_ulong new_val, target_ulong wr_mask) 2790 { 2791 bool virt = csrno == CSR_VSIREG ? true : false; 2792 int ret = -EINVAL; 2793 2794 if (xiselect_cd_range(isel)) { 2795 ret = rmw_xireg_cd(env, csrno, isel, val, new_val, wr_mask); 2796 } else if (xiselect_ctr_range(csrno, isel)) { 2797 ret = rmw_xireg_ctr(env, csrno, isel, val, new_val, wr_mask); 2798 } else { 2799 /* 2800 * As per the specification, access to unimplented region is undefined 2801 * but recommendation is to raise illegal instruction exception. 2802 */ 2803 return RISCV_EXCP_ILLEGAL_INST; 2804 } 2805 2806 if (ret) { 2807 return (env->virt_enabled && virt) ? 2808 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; 2809 } 2810 2811 return RISCV_EXCP_NONE; 2812 } 2813 2814 static int rmw_xiregi(CPURISCVState *env, int csrno, target_ulong *val, 2815 target_ulong new_val, target_ulong wr_mask) 2816 { 2817 int ret = -EINVAL; 2818 target_ulong isel; 2819 2820 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT); 2821 if (ret != RISCV_EXCP_NONE) { 2822 return ret; 2823 } 2824 2825 /* Translate CSR number for VS-mode */ 2826 csrno = csrind_xlate_vs_csrno(env, csrno); 2827 2828 if (CSR_MIREG <= csrno && csrno <= CSR_MIREG6 && 2829 csrno != CSR_MIREG4 - 1) { 2830 isel = env->miselect; 2831 } else if (CSR_SIREG <= csrno && csrno <= CSR_SIREG6 && 2832 csrno != CSR_SIREG4 - 1) { 2833 isel = env->siselect; 2834 } else if (CSR_VSIREG <= csrno && csrno <= CSR_VSIREG6 && 2835 csrno != CSR_VSIREG4 - 1) { 2836 isel = env->vsiselect; 2837 } else { 2838 return RISCV_EXCP_ILLEGAL_INST; 2839 } 2840 2841 return rmw_xireg_csrind(env, csrno, isel, val, new_val, wr_mask); 2842 } 2843 2844 static RISCVException rmw_xireg(CPURISCVState *env, int csrno, 2845 target_ulong *val, target_ulong new_val, 2846 target_ulong wr_mask) 2847 { 2848 int ret = -EINVAL; 2849 target_ulong isel; 2850 2851 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT); 2852 if (ret != RISCV_EXCP_NONE) { 2853 return ret; 2854 } 2855 2856 /* Translate CSR number for VS-mode */ 2857 csrno = csrind_xlate_vs_csrno(env, csrno); 2858 2859 /* Decode register details from CSR number */ 2860 switch (csrno) { 2861 case CSR_MIREG: 2862 isel = env->miselect; 2863 break; 2864 case CSR_SIREG: 2865 isel = env->siselect; 2866 break; 2867 case CSR_VSIREG: 2868 isel = env->vsiselect; 2869 break; 2870 default: 2871 goto done; 2872 }; 2873 2874 /* 2875 * Use the xiselect range to determine actual op on xireg. 2876 * 2877 * Since we only checked the existence of AIA or Indirect Access in the 2878 * predicate, we should check the existence of the exact extension when 2879 * we get to a specific range and return illegal instruction exception even 2880 * in VS-mode. 2881 */ 2882 if (xiselect_aia_range(isel)) { 2883 return rmw_xireg_aia(env, csrno, isel, val, new_val, wr_mask); 2884 } else if (riscv_cpu_cfg(env)->ext_smcsrind || 2885 riscv_cpu_cfg(env)->ext_sscsrind) { 2886 return rmw_xireg_csrind(env, csrno, isel, val, new_val, wr_mask); 2887 } 2888 2889 done: 2890 return RISCV_EXCP_ILLEGAL_INST; 2891 } 2892 2893 static RISCVException rmw_xtopei(CPURISCVState *env, int csrno, 2894 target_ulong *val, target_ulong new_val, 2895 target_ulong wr_mask) 2896 { 2897 bool virt; 2898 int ret = -EINVAL; 2899 target_ulong priv, vgein; 2900 2901 /* Translate CSR number for VS-mode */ 2902 csrno = aia_xlate_vs_csrno(env, csrno); 2903 2904 /* Decode register details from CSR number */ 2905 virt = false; 2906 switch (csrno) { 2907 case CSR_MTOPEI: 2908 priv = PRV_M; 2909 break; 2910 case CSR_STOPEI: 2911 if (env->mvien & MIP_SEIP && env->priv == PRV_S) { 2912 goto done; 2913 } 2914 priv = PRV_S; 2915 break; 2916 case CSR_VSTOPEI: 2917 priv = PRV_S; 2918 virt = true; 2919 break; 2920 default: 2921 goto done; 2922 }; 2923 2924 /* IMSIC CSRs only available when machine implements IMSIC. */ 2925 if (!env->aia_ireg_rmw_fn[priv]) { 2926 goto done; 2927 } 2928 2929 /* Find the selected guest interrupt file */ 2930 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0; 2931 2932 /* Selected guest interrupt file should be valid */ 2933 if (virt && (!vgein || env->geilen < vgein)) { 2934 goto done; 2935 } 2936 2937 /* Call machine specific IMSIC register emulation for TOPEI */ 2938 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv], 2939 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, priv, virt, vgein, 2940 riscv_cpu_mxl_bits(env)), 2941 val, new_val, wr_mask); 2942 2943 done: 2944 if (ret) { 2945 return (env->virt_enabled && virt) ? 2946 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; 2947 } 2948 return RISCV_EXCP_NONE; 2949 } 2950 2951 static RISCVException read_mtvec(CPURISCVState *env, int csrno, 2952 target_ulong *val) 2953 { 2954 *val = env->mtvec; 2955 return RISCV_EXCP_NONE; 2956 } 2957 2958 static RISCVException write_mtvec(CPURISCVState *env, int csrno, 2959 target_ulong val) 2960 { 2961 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */ 2962 if ((val & 3) < 2) { 2963 env->mtvec = val; 2964 } else { 2965 qemu_log_mask(LOG_UNIMP, "CSR_MTVEC: reserved mode not supported\n"); 2966 } 2967 return RISCV_EXCP_NONE; 2968 } 2969 2970 static RISCVException read_mcountinhibit(CPURISCVState *env, int csrno, 2971 target_ulong *val) 2972 { 2973 *val = env->mcountinhibit; 2974 return RISCV_EXCP_NONE; 2975 } 2976 2977 static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno, 2978 target_ulong val) 2979 { 2980 int cidx; 2981 PMUCTRState *counter; 2982 RISCVCPU *cpu = env_archcpu(env); 2983 uint32_t present_ctrs = cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_IR; 2984 target_ulong updated_ctrs = (env->mcountinhibit ^ val) & present_ctrs; 2985 uint64_t mhpmctr_val, prev_count, curr_count; 2986 2987 /* WARL register - disable unavailable counters; TM bit is always 0 */ 2988 env->mcountinhibit = val & present_ctrs; 2989 2990 /* Check if any other counter is also monitoring cycles/instructions */ 2991 for (cidx = 0; cidx < RV_MAX_MHPMCOUNTERS; cidx++) { 2992 if (!(updated_ctrs & BIT(cidx)) || 2993 (!riscv_pmu_ctr_monitor_cycles(env, cidx) && 2994 !riscv_pmu_ctr_monitor_instructions(env, cidx))) { 2995 continue; 2996 } 2997 2998 counter = &env->pmu_ctrs[cidx]; 2999 3000 if (!get_field(env->mcountinhibit, BIT(cidx))) { 3001 counter->mhpmcounter_prev = 3002 riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false); 3003 if (riscv_cpu_mxl(env) == MXL_RV32) { 3004 counter->mhpmcounterh_prev = 3005 riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true); 3006 } 3007 3008 if (cidx > 2) { 3009 mhpmctr_val = counter->mhpmcounter_val; 3010 if (riscv_cpu_mxl(env) == MXL_RV32) { 3011 mhpmctr_val = mhpmctr_val | 3012 ((uint64_t)counter->mhpmcounterh_val << 32); 3013 } 3014 riscv_pmu_setup_timer(env, mhpmctr_val, cidx); 3015 } 3016 } else { 3017 curr_count = riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false); 3018 3019 mhpmctr_val = counter->mhpmcounter_val; 3020 prev_count = counter->mhpmcounter_prev; 3021 if (riscv_cpu_mxl(env) == MXL_RV32) { 3022 uint64_t tmp = 3023 riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true); 3024 3025 curr_count = curr_count | (tmp << 32); 3026 mhpmctr_val = mhpmctr_val | 3027 ((uint64_t)counter->mhpmcounterh_val << 32); 3028 prev_count = prev_count | 3029 ((uint64_t)counter->mhpmcounterh_prev << 32); 3030 } 3031 3032 /* Adjust the counter for later reads. */ 3033 mhpmctr_val = curr_count - prev_count + mhpmctr_val; 3034 counter->mhpmcounter_val = mhpmctr_val; 3035 if (riscv_cpu_mxl(env) == MXL_RV32) { 3036 counter->mhpmcounterh_val = mhpmctr_val >> 32; 3037 } 3038 } 3039 } 3040 3041 return RISCV_EXCP_NONE; 3042 } 3043 3044 static RISCVException read_scountinhibit(CPURISCVState *env, int csrno, 3045 target_ulong *val) 3046 { 3047 /* S-mode can only access the bits delegated by M-mode */ 3048 *val = env->mcountinhibit & env->mcounteren; 3049 return RISCV_EXCP_NONE; 3050 } 3051 3052 static RISCVException write_scountinhibit(CPURISCVState *env, int csrno, 3053 target_ulong val) 3054 { 3055 write_mcountinhibit(env, csrno, val & env->mcounteren); 3056 return RISCV_EXCP_NONE; 3057 } 3058 3059 static RISCVException read_mcounteren(CPURISCVState *env, int csrno, 3060 target_ulong *val) 3061 { 3062 *val = env->mcounteren; 3063 return RISCV_EXCP_NONE; 3064 } 3065 3066 static RISCVException write_mcounteren(CPURISCVState *env, int csrno, 3067 target_ulong val) 3068 { 3069 RISCVCPU *cpu = env_archcpu(env); 3070 3071 /* WARL register - disable unavailable counters */ 3072 env->mcounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM | 3073 COUNTEREN_IR); 3074 return RISCV_EXCP_NONE; 3075 } 3076 3077 /* Machine Trap Handling */ 3078 static RISCVException read_mscratch_i128(CPURISCVState *env, int csrno, 3079 Int128 *val) 3080 { 3081 *val = int128_make128(env->mscratch, env->mscratchh); 3082 return RISCV_EXCP_NONE; 3083 } 3084 3085 static RISCVException write_mscratch_i128(CPURISCVState *env, int csrno, 3086 Int128 val) 3087 { 3088 env->mscratch = int128_getlo(val); 3089 env->mscratchh = int128_gethi(val); 3090 return RISCV_EXCP_NONE; 3091 } 3092 3093 static RISCVException read_mscratch(CPURISCVState *env, int csrno, 3094 target_ulong *val) 3095 { 3096 *val = env->mscratch; 3097 return RISCV_EXCP_NONE; 3098 } 3099 3100 static RISCVException write_mscratch(CPURISCVState *env, int csrno, 3101 target_ulong val) 3102 { 3103 env->mscratch = val; 3104 return RISCV_EXCP_NONE; 3105 } 3106 3107 static RISCVException read_mepc(CPURISCVState *env, int csrno, 3108 target_ulong *val) 3109 { 3110 *val = env->mepc; 3111 return RISCV_EXCP_NONE; 3112 } 3113 3114 static RISCVException write_mepc(CPURISCVState *env, int csrno, 3115 target_ulong val) 3116 { 3117 env->mepc = val; 3118 return RISCV_EXCP_NONE; 3119 } 3120 3121 static RISCVException read_mcause(CPURISCVState *env, int csrno, 3122 target_ulong *val) 3123 { 3124 *val = env->mcause; 3125 return RISCV_EXCP_NONE; 3126 } 3127 3128 static RISCVException write_mcause(CPURISCVState *env, int csrno, 3129 target_ulong val) 3130 { 3131 env->mcause = val; 3132 return RISCV_EXCP_NONE; 3133 } 3134 3135 static RISCVException read_mtval(CPURISCVState *env, int csrno, 3136 target_ulong *val) 3137 { 3138 *val = env->mtval; 3139 return RISCV_EXCP_NONE; 3140 } 3141 3142 static RISCVException write_mtval(CPURISCVState *env, int csrno, 3143 target_ulong val) 3144 { 3145 env->mtval = val; 3146 return RISCV_EXCP_NONE; 3147 } 3148 3149 /* Execution environment configuration setup */ 3150 static RISCVException read_menvcfg(CPURISCVState *env, int csrno, 3151 target_ulong *val) 3152 { 3153 *val = env->menvcfg; 3154 return RISCV_EXCP_NONE; 3155 } 3156 3157 static RISCVException write_henvcfg(CPURISCVState *env, int csrno, 3158 target_ulong val); 3159 static RISCVException write_menvcfg(CPURISCVState *env, int csrno, 3160 target_ulong val) 3161 { 3162 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env); 3163 uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE | 3164 MENVCFG_CBZE | MENVCFG_CDE; 3165 3166 if (riscv_cpu_mxl(env) == MXL_RV64) { 3167 mask |= (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) | 3168 (cfg->ext_sstc ? MENVCFG_STCE : 0) | 3169 (cfg->ext_smcdeleg ? MENVCFG_CDE : 0) | 3170 (cfg->ext_svadu ? MENVCFG_ADUE : 0) | 3171 (cfg->ext_ssdbltrp ? MENVCFG_DTE : 0); 3172 3173 if (env_archcpu(env)->cfg.ext_zicfilp) { 3174 mask |= MENVCFG_LPE; 3175 } 3176 3177 if (env_archcpu(env)->cfg.ext_zicfiss) { 3178 mask |= MENVCFG_SSE; 3179 } 3180 3181 /* Update PMM field only if the value is valid according to Zjpm v1.0 */ 3182 if (env_archcpu(env)->cfg.ext_smnpm && 3183 get_field(val, MENVCFG_PMM) != PMM_FIELD_RESERVED) { 3184 mask |= MENVCFG_PMM; 3185 } 3186 3187 if ((val & MENVCFG_DTE) == 0) { 3188 env->mstatus &= ~MSTATUS_SDT; 3189 } 3190 } 3191 env->menvcfg = (env->menvcfg & ~mask) | (val & mask); 3192 write_henvcfg(env, CSR_HENVCFG, env->henvcfg); 3193 3194 return RISCV_EXCP_NONE; 3195 } 3196 3197 static RISCVException read_menvcfgh(CPURISCVState *env, int csrno, 3198 target_ulong *val) 3199 { 3200 *val = env->menvcfg >> 32; 3201 return RISCV_EXCP_NONE; 3202 } 3203 3204 static RISCVException write_henvcfgh(CPURISCVState *env, int csrno, 3205 target_ulong val); 3206 static RISCVException write_menvcfgh(CPURISCVState *env, int csrno, 3207 target_ulong val) 3208 { 3209 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env); 3210 uint64_t mask = (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) | 3211 (cfg->ext_sstc ? MENVCFG_STCE : 0) | 3212 (cfg->ext_svadu ? MENVCFG_ADUE : 0) | 3213 (cfg->ext_smcdeleg ? MENVCFG_CDE : 0) | 3214 (cfg->ext_ssdbltrp ? MENVCFG_DTE : 0); 3215 uint64_t valh = (uint64_t)val << 32; 3216 3217 if ((valh & MENVCFG_DTE) == 0) { 3218 env->mstatus &= ~MSTATUS_SDT; 3219 } 3220 3221 env->menvcfg = (env->menvcfg & ~mask) | (valh & mask); 3222 write_henvcfgh(env, CSR_HENVCFGH, env->henvcfg >> 32); 3223 3224 return RISCV_EXCP_NONE; 3225 } 3226 3227 static RISCVException read_senvcfg(CPURISCVState *env, int csrno, 3228 target_ulong *val) 3229 { 3230 RISCVException ret; 3231 3232 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG); 3233 if (ret != RISCV_EXCP_NONE) { 3234 return ret; 3235 } 3236 3237 *val = env->senvcfg; 3238 return RISCV_EXCP_NONE; 3239 } 3240 3241 static RISCVException write_senvcfg(CPURISCVState *env, int csrno, 3242 target_ulong val) 3243 { 3244 uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE; 3245 RISCVException ret; 3246 /* Update PMM field only if the value is valid according to Zjpm v1.0 */ 3247 if (env_archcpu(env)->cfg.ext_ssnpm && 3248 riscv_cpu_mxl(env) == MXL_RV64 && 3249 get_field(val, SENVCFG_PMM) != PMM_FIELD_RESERVED) { 3250 mask |= SENVCFG_PMM; 3251 } 3252 3253 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG); 3254 if (ret != RISCV_EXCP_NONE) { 3255 return ret; 3256 } 3257 3258 if (env_archcpu(env)->cfg.ext_zicfilp) { 3259 mask |= SENVCFG_LPE; 3260 } 3261 3262 /* Higher mode SSE must be ON for next-less mode SSE to be ON */ 3263 if (env_archcpu(env)->cfg.ext_zicfiss && 3264 get_field(env->menvcfg, MENVCFG_SSE) && 3265 (env->virt_enabled ? get_field(env->henvcfg, HENVCFG_SSE) : true)) { 3266 mask |= SENVCFG_SSE; 3267 } 3268 3269 if (env_archcpu(env)->cfg.ext_svukte) { 3270 mask |= SENVCFG_UKTE; 3271 } 3272 3273 env->senvcfg = (env->senvcfg & ~mask) | (val & mask); 3274 return RISCV_EXCP_NONE; 3275 } 3276 3277 static RISCVException read_henvcfg(CPURISCVState *env, int csrno, 3278 target_ulong *val) 3279 { 3280 RISCVException ret; 3281 3282 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG); 3283 if (ret != RISCV_EXCP_NONE) { 3284 return ret; 3285 } 3286 3287 /* 3288 * henvcfg.pbmte is read_only 0 when menvcfg.pbmte = 0 3289 * henvcfg.stce is read_only 0 when menvcfg.stce = 0 3290 * henvcfg.adue is read_only 0 when menvcfg.adue = 0 3291 * henvcfg.dte is read_only 0 when menvcfg.dte = 0 3292 */ 3293 *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE | 3294 HENVCFG_DTE) | env->menvcfg); 3295 return RISCV_EXCP_NONE; 3296 } 3297 3298 static RISCVException write_henvcfg(CPURISCVState *env, int csrno, 3299 target_ulong val) 3300 { 3301 uint64_t mask = HENVCFG_FIOM | HENVCFG_CBIE | HENVCFG_CBCFE | HENVCFG_CBZE; 3302 RISCVException ret; 3303 3304 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG); 3305 if (ret != RISCV_EXCP_NONE) { 3306 return ret; 3307 } 3308 3309 if (riscv_cpu_mxl(env) == MXL_RV64) { 3310 mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE | 3311 HENVCFG_DTE); 3312 3313 if (env_archcpu(env)->cfg.ext_zicfilp) { 3314 mask |= HENVCFG_LPE; 3315 } 3316 3317 /* H can light up SSE for VS only if HS had it from menvcfg */ 3318 if (env_archcpu(env)->cfg.ext_zicfiss && 3319 get_field(env->menvcfg, MENVCFG_SSE)) { 3320 mask |= HENVCFG_SSE; 3321 } 3322 3323 /* Update PMM field only if the value is valid according to Zjpm v1.0 */ 3324 if (env_archcpu(env)->cfg.ext_ssnpm && 3325 get_field(val, HENVCFG_PMM) != PMM_FIELD_RESERVED) { 3326 mask |= HENVCFG_PMM; 3327 } 3328 } 3329 3330 env->henvcfg = val & mask; 3331 if ((env->henvcfg & HENVCFG_DTE) == 0) { 3332 env->vsstatus &= ~MSTATUS_SDT; 3333 } 3334 3335 return RISCV_EXCP_NONE; 3336 } 3337 3338 static RISCVException read_henvcfgh(CPURISCVState *env, int csrno, 3339 target_ulong *val) 3340 { 3341 RISCVException ret; 3342 3343 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG); 3344 if (ret != RISCV_EXCP_NONE) { 3345 return ret; 3346 } 3347 3348 *val = (env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE | 3349 HENVCFG_DTE) | env->menvcfg)) >> 32; 3350 return RISCV_EXCP_NONE; 3351 } 3352 3353 static RISCVException write_henvcfgh(CPURISCVState *env, int csrno, 3354 target_ulong val) 3355 { 3356 uint64_t mask = env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | 3357 HENVCFG_ADUE | HENVCFG_DTE); 3358 uint64_t valh = (uint64_t)val << 32; 3359 RISCVException ret; 3360 3361 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG); 3362 if (ret != RISCV_EXCP_NONE) { 3363 return ret; 3364 } 3365 env->henvcfg = (env->henvcfg & 0xFFFFFFFF) | (valh & mask); 3366 if ((env->henvcfg & HENVCFG_DTE) == 0) { 3367 env->vsstatus &= ~MSTATUS_SDT; 3368 } 3369 return RISCV_EXCP_NONE; 3370 } 3371 3372 static RISCVException read_mstateen(CPURISCVState *env, int csrno, 3373 target_ulong *val) 3374 { 3375 *val = env->mstateen[csrno - CSR_MSTATEEN0]; 3376 3377 return RISCV_EXCP_NONE; 3378 } 3379 3380 static RISCVException write_mstateen(CPURISCVState *env, int csrno, 3381 uint64_t wr_mask, target_ulong new_val) 3382 { 3383 uint64_t *reg; 3384 3385 reg = &env->mstateen[csrno - CSR_MSTATEEN0]; 3386 *reg = (*reg & ~wr_mask) | (new_val & wr_mask); 3387 3388 return RISCV_EXCP_NONE; 3389 } 3390 3391 static RISCVException write_mstateen0(CPURISCVState *env, int csrno, 3392 target_ulong new_val) 3393 { 3394 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; 3395 if (!riscv_has_ext(env, RVF)) { 3396 wr_mask |= SMSTATEEN0_FCSR; 3397 } 3398 3399 if (env->priv_ver >= PRIV_VERSION_1_13_0) { 3400 wr_mask |= SMSTATEEN0_P1P13; 3401 } 3402 3403 if (riscv_cpu_cfg(env)->ext_smaia || riscv_cpu_cfg(env)->ext_smcsrind) { 3404 wr_mask |= SMSTATEEN0_SVSLCT; 3405 } 3406 3407 /* 3408 * As per the AIA specification, SMSTATEEN0_IMSIC is valid only if IMSIC is 3409 * implemented. However, that information is with MachineState and we can't 3410 * figure that out in csr.c. Just enable if Smaia is available. 3411 */ 3412 if (riscv_cpu_cfg(env)->ext_smaia) { 3413 wr_mask |= (SMSTATEEN0_AIA | SMSTATEEN0_IMSIC); 3414 } 3415 3416 if (riscv_cpu_cfg(env)->ext_ssctr) { 3417 wr_mask |= SMSTATEEN0_CTR; 3418 } 3419 3420 return write_mstateen(env, csrno, wr_mask, new_val); 3421 } 3422 3423 static RISCVException write_mstateen_1_3(CPURISCVState *env, int csrno, 3424 target_ulong new_val) 3425 { 3426 return write_mstateen(env, csrno, SMSTATEEN_STATEEN, new_val); 3427 } 3428 3429 static RISCVException read_mstateenh(CPURISCVState *env, int csrno, 3430 target_ulong *val) 3431 { 3432 *val = env->mstateen[csrno - CSR_MSTATEEN0H] >> 32; 3433 3434 return RISCV_EXCP_NONE; 3435 } 3436 3437 static RISCVException write_mstateenh(CPURISCVState *env, int csrno, 3438 uint64_t wr_mask, target_ulong new_val) 3439 { 3440 uint64_t *reg, val; 3441 3442 reg = &env->mstateen[csrno - CSR_MSTATEEN0H]; 3443 val = (uint64_t)new_val << 32; 3444 val |= *reg & 0xFFFFFFFF; 3445 *reg = (*reg & ~wr_mask) | (val & wr_mask); 3446 3447 return RISCV_EXCP_NONE; 3448 } 3449 3450 static RISCVException write_mstateen0h(CPURISCVState *env, int csrno, 3451 target_ulong new_val) 3452 { 3453 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; 3454 3455 if (env->priv_ver >= PRIV_VERSION_1_13_0) { 3456 wr_mask |= SMSTATEEN0_P1P13; 3457 } 3458 3459 if (riscv_cpu_cfg(env)->ext_ssctr) { 3460 wr_mask |= SMSTATEEN0_CTR; 3461 } 3462 3463 return write_mstateenh(env, csrno, wr_mask, new_val); 3464 } 3465 3466 static RISCVException write_mstateenh_1_3(CPURISCVState *env, int csrno, 3467 target_ulong new_val) 3468 { 3469 return write_mstateenh(env, csrno, SMSTATEEN_STATEEN, new_val); 3470 } 3471 3472 static RISCVException read_hstateen(CPURISCVState *env, int csrno, 3473 target_ulong *val) 3474 { 3475 int index = csrno - CSR_HSTATEEN0; 3476 3477 *val = env->hstateen[index] & env->mstateen[index]; 3478 3479 return RISCV_EXCP_NONE; 3480 } 3481 3482 static RISCVException write_hstateen(CPURISCVState *env, int csrno, 3483 uint64_t mask, target_ulong new_val) 3484 { 3485 int index = csrno - CSR_HSTATEEN0; 3486 uint64_t *reg, wr_mask; 3487 3488 reg = &env->hstateen[index]; 3489 wr_mask = env->mstateen[index] & mask; 3490 *reg = (*reg & ~wr_mask) | (new_val & wr_mask); 3491 3492 return RISCV_EXCP_NONE; 3493 } 3494 3495 static RISCVException write_hstateen0(CPURISCVState *env, int csrno, 3496 target_ulong new_val) 3497 { 3498 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; 3499 3500 if (!riscv_has_ext(env, RVF)) { 3501 wr_mask |= SMSTATEEN0_FCSR; 3502 } 3503 3504 if (riscv_cpu_cfg(env)->ext_ssaia || riscv_cpu_cfg(env)->ext_sscsrind) { 3505 wr_mask |= SMSTATEEN0_SVSLCT; 3506 } 3507 3508 /* 3509 * As per the AIA specification, SMSTATEEN0_IMSIC is valid only if IMSIC is 3510 * implemented. However, that information is with MachineState and we can't 3511 * figure that out in csr.c. Just enable if Ssaia is available. 3512 */ 3513 if (riscv_cpu_cfg(env)->ext_ssaia) { 3514 wr_mask |= (SMSTATEEN0_AIA | SMSTATEEN0_IMSIC); 3515 } 3516 3517 if (riscv_cpu_cfg(env)->ext_ssctr) { 3518 wr_mask |= SMSTATEEN0_CTR; 3519 } 3520 3521 return write_hstateen(env, csrno, wr_mask, new_val); 3522 } 3523 3524 static RISCVException write_hstateen_1_3(CPURISCVState *env, int csrno, 3525 target_ulong new_val) 3526 { 3527 return write_hstateen(env, csrno, SMSTATEEN_STATEEN, new_val); 3528 } 3529 3530 static RISCVException read_hstateenh(CPURISCVState *env, int csrno, 3531 target_ulong *val) 3532 { 3533 int index = csrno - CSR_HSTATEEN0H; 3534 3535 *val = (env->hstateen[index] >> 32) & (env->mstateen[index] >> 32); 3536 3537 return RISCV_EXCP_NONE; 3538 } 3539 3540 static RISCVException write_hstateenh(CPURISCVState *env, int csrno, 3541 uint64_t mask, target_ulong new_val) 3542 { 3543 int index = csrno - CSR_HSTATEEN0H; 3544 uint64_t *reg, wr_mask, val; 3545 3546 reg = &env->hstateen[index]; 3547 val = (uint64_t)new_val << 32; 3548 val |= *reg & 0xFFFFFFFF; 3549 wr_mask = env->mstateen[index] & mask; 3550 *reg = (*reg & ~wr_mask) | (val & wr_mask); 3551 3552 return RISCV_EXCP_NONE; 3553 } 3554 3555 static RISCVException write_hstateen0h(CPURISCVState *env, int csrno, 3556 target_ulong new_val) 3557 { 3558 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; 3559 3560 if (riscv_cpu_cfg(env)->ext_ssctr) { 3561 wr_mask |= SMSTATEEN0_CTR; 3562 } 3563 3564 return write_hstateenh(env, csrno, wr_mask, new_val); 3565 } 3566 3567 static RISCVException write_hstateenh_1_3(CPURISCVState *env, int csrno, 3568 target_ulong new_val) 3569 { 3570 return write_hstateenh(env, csrno, SMSTATEEN_STATEEN, new_val); 3571 } 3572 3573 static RISCVException read_sstateen(CPURISCVState *env, int csrno, 3574 target_ulong *val) 3575 { 3576 bool virt = env->virt_enabled; 3577 int index = csrno - CSR_SSTATEEN0; 3578 3579 *val = env->sstateen[index] & env->mstateen[index]; 3580 if (virt) { 3581 *val &= env->hstateen[index]; 3582 } 3583 3584 return RISCV_EXCP_NONE; 3585 } 3586 3587 static RISCVException write_sstateen(CPURISCVState *env, int csrno, 3588 uint64_t mask, target_ulong new_val) 3589 { 3590 bool virt = env->virt_enabled; 3591 int index = csrno - CSR_SSTATEEN0; 3592 uint64_t wr_mask; 3593 uint64_t *reg; 3594 3595 wr_mask = env->mstateen[index] & mask; 3596 if (virt) { 3597 wr_mask &= env->hstateen[index]; 3598 } 3599 3600 reg = &env->sstateen[index]; 3601 *reg = (*reg & ~wr_mask) | (new_val & wr_mask); 3602 3603 return RISCV_EXCP_NONE; 3604 } 3605 3606 static RISCVException write_sstateen0(CPURISCVState *env, int csrno, 3607 target_ulong new_val) 3608 { 3609 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; 3610 3611 if (!riscv_has_ext(env, RVF)) { 3612 wr_mask |= SMSTATEEN0_FCSR; 3613 } 3614 3615 return write_sstateen(env, csrno, wr_mask, new_val); 3616 } 3617 3618 static RISCVException write_sstateen_1_3(CPURISCVState *env, int csrno, 3619 target_ulong new_val) 3620 { 3621 return write_sstateen(env, csrno, SMSTATEEN_STATEEN, new_val); 3622 } 3623 3624 static RISCVException rmw_mip64(CPURISCVState *env, int csrno, 3625 uint64_t *ret_val, 3626 uint64_t new_val, uint64_t wr_mask) 3627 { 3628 uint64_t old_mip, mask = wr_mask & delegable_ints; 3629 uint32_t gin; 3630 3631 if (mask & MIP_SEIP) { 3632 env->software_seip = new_val & MIP_SEIP; 3633 new_val |= env->external_seip * MIP_SEIP; 3634 } 3635 3636 if (riscv_cpu_cfg(env)->ext_sstc && (env->priv == PRV_M) && 3637 get_field(env->menvcfg, MENVCFG_STCE)) { 3638 /* sstc extension forbids STIP & VSTIP to be writeable in mip */ 3639 mask = mask & ~(MIP_STIP | MIP_VSTIP); 3640 } 3641 3642 if (mask) { 3643 old_mip = riscv_cpu_update_mip(env, mask, (new_val & mask)); 3644 } else { 3645 old_mip = env->mip; 3646 } 3647 3648 if (csrno != CSR_HVIP) { 3649 gin = get_field(env->hstatus, HSTATUS_VGEIN); 3650 old_mip |= (env->hgeip & ((target_ulong)1 << gin)) ? MIP_VSEIP : 0; 3651 old_mip |= env->vstime_irq ? MIP_VSTIP : 0; 3652 } 3653 3654 if (ret_val) { 3655 *ret_val = old_mip; 3656 } 3657 3658 return RISCV_EXCP_NONE; 3659 } 3660 3661 static RISCVException rmw_mip(CPURISCVState *env, int csrno, 3662 target_ulong *ret_val, 3663 target_ulong new_val, target_ulong wr_mask) 3664 { 3665 uint64_t rval; 3666 RISCVException ret; 3667 3668 ret = rmw_mip64(env, csrno, &rval, new_val, wr_mask); 3669 if (ret_val) { 3670 *ret_val = rval; 3671 } 3672 3673 return ret; 3674 } 3675 3676 static RISCVException rmw_miph(CPURISCVState *env, int csrno, 3677 target_ulong *ret_val, 3678 target_ulong new_val, target_ulong wr_mask) 3679 { 3680 uint64_t rval; 3681 RISCVException ret; 3682 3683 ret = rmw_mip64(env, csrno, &rval, 3684 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 3685 if (ret_val) { 3686 *ret_val = rval >> 32; 3687 } 3688 3689 return ret; 3690 } 3691 3692 /* 3693 * The function is written for two use-cases: 3694 * 1- To access mvip csr as is for m-mode access. 3695 * 2- To access sip as a combination of mip and mvip for s-mode. 3696 * 3697 * Both report bits 1, 5, 9 and 13:63 but with the exception of 3698 * STIP being read-only zero in case of mvip when sstc extension 3699 * is present. 3700 * Also, sip needs to be read-only zero when both mideleg[i] and 3701 * mvien[i] are zero but mvip needs to be an alias of mip. 3702 */ 3703 static RISCVException rmw_mvip64(CPURISCVState *env, int csrno, 3704 uint64_t *ret_val, 3705 uint64_t new_val, uint64_t wr_mask) 3706 { 3707 RISCVCPU *cpu = env_archcpu(env); 3708 target_ulong ret_mip = 0; 3709 RISCVException ret; 3710 uint64_t old_mvip; 3711 3712 /* 3713 * mideleg[i] mvien[i] 3714 * 0 0 No delegation. mvip[i] is alias of mip[i]. 3715 * 0 1 mvip[i] becomes source of interrupt, mip bypassed. 3716 * 1 X mip[i] is source of interrupt and mvip[i] aliases 3717 * mip[i]. 3718 * 3719 * So alias condition would be for bits: 3720 * ((S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & (mideleg | ~mvien)) | 3721 * (!sstc & MIP_STIP) 3722 * 3723 * Non-alias condition will be for bits: 3724 * (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & (~mideleg & mvien) 3725 * 3726 * alias_mask denotes the bits that come from mip nalias_mask denotes bits 3727 * that come from hvip. 3728 */ 3729 uint64_t alias_mask = ((S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & 3730 (env->mideleg | ~env->mvien)) | MIP_STIP; 3731 uint64_t nalias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & 3732 (~env->mideleg & env->mvien); 3733 uint64_t wr_mask_mvip; 3734 uint64_t wr_mask_mip; 3735 3736 /* 3737 * mideleg[i] mvien[i] 3738 * 0 0 sip[i] read-only zero. 3739 * 0 1 sip[i] alias of mvip[i]. 3740 * 1 X sip[i] alias of mip[i]. 3741 * 3742 * Both alias and non-alias mask remain same for sip except for bits 3743 * which are zero in both mideleg and mvien. 3744 */ 3745 if (csrno == CSR_SIP) { 3746 /* Remove bits that are zero in both mideleg and mvien. */ 3747 alias_mask &= (env->mideleg | env->mvien); 3748 nalias_mask &= (env->mideleg | env->mvien); 3749 } 3750 3751 /* 3752 * If sstc is present, mvip.STIP is not an alias of mip.STIP so clear 3753 * that our in mip returned value. 3754 */ 3755 if (cpu->cfg.ext_sstc && (env->priv == PRV_M) && 3756 get_field(env->menvcfg, MENVCFG_STCE)) { 3757 alias_mask &= ~MIP_STIP; 3758 } 3759 3760 wr_mask_mip = wr_mask & alias_mask & mvip_writable_mask; 3761 wr_mask_mvip = wr_mask & nalias_mask & mvip_writable_mask; 3762 3763 /* 3764 * For bits set in alias_mask, mvip needs to be alias of mip, so forward 3765 * this to rmw_mip. 3766 */ 3767 ret = rmw_mip(env, CSR_MIP, &ret_mip, new_val, wr_mask_mip); 3768 if (ret != RISCV_EXCP_NONE) { 3769 return ret; 3770 } 3771 3772 old_mvip = env->mvip; 3773 3774 /* 3775 * Write to mvip. Update only non-alias bits. Alias bits were updated 3776 * in mip in rmw_mip above. 3777 */ 3778 if (wr_mask_mvip) { 3779 env->mvip = (env->mvip & ~wr_mask_mvip) | (new_val & wr_mask_mvip); 3780 3781 /* 3782 * Given mvip is separate source from mip, we need to trigger interrupt 3783 * from here separately. Normally this happen from riscv_cpu_update_mip. 3784 */ 3785 riscv_cpu_interrupt(env); 3786 } 3787 3788 if (ret_val) { 3789 ret_mip &= alias_mask; 3790 old_mvip &= nalias_mask; 3791 3792 *ret_val = old_mvip | ret_mip; 3793 } 3794 3795 return RISCV_EXCP_NONE; 3796 } 3797 3798 static RISCVException rmw_mvip(CPURISCVState *env, int csrno, 3799 target_ulong *ret_val, 3800 target_ulong new_val, target_ulong wr_mask) 3801 { 3802 uint64_t rval; 3803 RISCVException ret; 3804 3805 ret = rmw_mvip64(env, csrno, &rval, new_val, wr_mask); 3806 if (ret_val) { 3807 *ret_val = rval; 3808 } 3809 3810 return ret; 3811 } 3812 3813 static RISCVException rmw_mviph(CPURISCVState *env, int csrno, 3814 target_ulong *ret_val, 3815 target_ulong new_val, target_ulong wr_mask) 3816 { 3817 uint64_t rval; 3818 RISCVException ret; 3819 3820 ret = rmw_mvip64(env, csrno, &rval, 3821 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 3822 if (ret_val) { 3823 *ret_val = rval >> 32; 3824 } 3825 3826 return ret; 3827 } 3828 3829 /* Supervisor Trap Setup */ 3830 static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno, 3831 Int128 *val) 3832 { 3833 uint64_t mask = sstatus_v1_10_mask; 3834 uint64_t sstatus = env->mstatus & mask; 3835 if (env->xl != MXL_RV32 || env->debugger) { 3836 mask |= SSTATUS64_UXL; 3837 } 3838 if (riscv_cpu_cfg(env)->ext_ssdbltrp) { 3839 mask |= SSTATUS_SDT; 3840 } 3841 3842 if (env_archcpu(env)->cfg.ext_zicfilp) { 3843 mask |= SSTATUS_SPELP; 3844 } 3845 3846 *val = int128_make128(sstatus, add_status_sd(MXL_RV128, sstatus)); 3847 return RISCV_EXCP_NONE; 3848 } 3849 3850 static RISCVException read_sstatus(CPURISCVState *env, int csrno, 3851 target_ulong *val) 3852 { 3853 target_ulong mask = (sstatus_v1_10_mask); 3854 if (env->xl != MXL_RV32 || env->debugger) { 3855 mask |= SSTATUS64_UXL; 3856 } 3857 3858 if (env_archcpu(env)->cfg.ext_zicfilp) { 3859 mask |= SSTATUS_SPELP; 3860 } 3861 if (riscv_cpu_cfg(env)->ext_ssdbltrp) { 3862 mask |= SSTATUS_SDT; 3863 } 3864 /* TODO: Use SXL not MXL. */ 3865 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask); 3866 return RISCV_EXCP_NONE; 3867 } 3868 3869 static RISCVException write_sstatus(CPURISCVState *env, int csrno, 3870 target_ulong val) 3871 { 3872 target_ulong mask = (sstatus_v1_10_mask); 3873 3874 if (env->xl != MXL_RV32 || env->debugger) { 3875 if ((val & SSTATUS64_UXL) != 0) { 3876 mask |= SSTATUS64_UXL; 3877 } 3878 } 3879 3880 if (env_archcpu(env)->cfg.ext_zicfilp) { 3881 mask |= SSTATUS_SPELP; 3882 } 3883 if (riscv_cpu_cfg(env)->ext_ssdbltrp) { 3884 mask |= SSTATUS_SDT; 3885 } 3886 target_ulong newval = (env->mstatus & ~mask) | (val & mask); 3887 return write_mstatus(env, CSR_MSTATUS, newval); 3888 } 3889 3890 static RISCVException rmw_vsie64(CPURISCVState *env, int csrno, 3891 uint64_t *ret_val, 3892 uint64_t new_val, uint64_t wr_mask) 3893 { 3894 uint64_t alias_mask = (LOCAL_INTERRUPTS | VS_MODE_INTERRUPTS) & 3895 env->hideleg; 3896 uint64_t nalias_mask = LOCAL_INTERRUPTS & (~env->hideleg & env->hvien); 3897 uint64_t rval, rval_vs, vsbits; 3898 uint64_t wr_mask_vsie; 3899 uint64_t wr_mask_mie; 3900 RISCVException ret; 3901 3902 /* Bring VS-level bits to correct position */ 3903 vsbits = new_val & (VS_MODE_INTERRUPTS >> 1); 3904 new_val &= ~(VS_MODE_INTERRUPTS >> 1); 3905 new_val |= vsbits << 1; 3906 3907 vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1); 3908 wr_mask &= ~(VS_MODE_INTERRUPTS >> 1); 3909 wr_mask |= vsbits << 1; 3910 3911 wr_mask_mie = wr_mask & alias_mask; 3912 wr_mask_vsie = wr_mask & nalias_mask; 3913 3914 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask_mie); 3915 3916 rval_vs = env->vsie & nalias_mask; 3917 env->vsie = (env->vsie & ~wr_mask_vsie) | (new_val & wr_mask_vsie); 3918 3919 if (ret_val) { 3920 rval &= alias_mask; 3921 vsbits = rval & VS_MODE_INTERRUPTS; 3922 rval &= ~VS_MODE_INTERRUPTS; 3923 *ret_val = rval | (vsbits >> 1) | rval_vs; 3924 } 3925 3926 return ret; 3927 } 3928 3929 static RISCVException rmw_vsie(CPURISCVState *env, int csrno, 3930 target_ulong *ret_val, 3931 target_ulong new_val, target_ulong wr_mask) 3932 { 3933 uint64_t rval; 3934 RISCVException ret; 3935 3936 ret = rmw_vsie64(env, csrno, &rval, new_val, wr_mask); 3937 if (ret_val) { 3938 *ret_val = rval; 3939 } 3940 3941 return ret; 3942 } 3943 3944 static RISCVException rmw_vsieh(CPURISCVState *env, int csrno, 3945 target_ulong *ret_val, 3946 target_ulong new_val, target_ulong wr_mask) 3947 { 3948 uint64_t rval; 3949 RISCVException ret; 3950 3951 ret = rmw_vsie64(env, csrno, &rval, 3952 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 3953 if (ret_val) { 3954 *ret_val = rval >> 32; 3955 } 3956 3957 return ret; 3958 } 3959 3960 static RISCVException rmw_sie64(CPURISCVState *env, int csrno, 3961 uint64_t *ret_val, 3962 uint64_t new_val, uint64_t wr_mask) 3963 { 3964 uint64_t nalias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & 3965 (~env->mideleg & env->mvien); 3966 uint64_t alias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & env->mideleg; 3967 uint64_t sie_mask = wr_mask & nalias_mask; 3968 RISCVException ret; 3969 3970 /* 3971 * mideleg[i] mvien[i] 3972 * 0 0 sie[i] read-only zero. 3973 * 0 1 sie[i] is a separate writable bit. 3974 * 1 X sie[i] alias of mie[i]. 3975 * 3976 * Both alias and non-alias mask remain same for sip except for bits 3977 * which are zero in both mideleg and mvien. 3978 */ 3979 if (env->virt_enabled) { 3980 if (env->hvictl & HVICTL_VTI) { 3981 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 3982 } 3983 ret = rmw_vsie64(env, CSR_VSIE, ret_val, new_val, wr_mask); 3984 if (ret_val) { 3985 *ret_val &= alias_mask; 3986 } 3987 } else { 3988 ret = rmw_mie64(env, csrno, ret_val, new_val, wr_mask & alias_mask); 3989 if (ret_val) { 3990 *ret_val &= alias_mask; 3991 *ret_val |= env->sie & nalias_mask; 3992 } 3993 3994 env->sie = (env->sie & ~sie_mask) | (new_val & sie_mask); 3995 } 3996 3997 return ret; 3998 } 3999 4000 static RISCVException rmw_sie(CPURISCVState *env, int csrno, 4001 target_ulong *ret_val, 4002 target_ulong new_val, target_ulong wr_mask) 4003 { 4004 uint64_t rval; 4005 RISCVException ret; 4006 4007 ret = rmw_sie64(env, csrno, &rval, new_val, wr_mask); 4008 if (ret == RISCV_EXCP_NONE && ret_val) { 4009 *ret_val = rval; 4010 } 4011 4012 return ret; 4013 } 4014 4015 static RISCVException rmw_sieh(CPURISCVState *env, int csrno, 4016 target_ulong *ret_val, 4017 target_ulong new_val, target_ulong wr_mask) 4018 { 4019 uint64_t rval; 4020 RISCVException ret; 4021 4022 ret = rmw_sie64(env, csrno, &rval, 4023 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 4024 if (ret_val) { 4025 *ret_val = rval >> 32; 4026 } 4027 4028 return ret; 4029 } 4030 4031 static RISCVException read_stvec(CPURISCVState *env, int csrno, 4032 target_ulong *val) 4033 { 4034 *val = env->stvec; 4035 return RISCV_EXCP_NONE; 4036 } 4037 4038 static RISCVException write_stvec(CPURISCVState *env, int csrno, 4039 target_ulong val) 4040 { 4041 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */ 4042 if ((val & 3) < 2) { 4043 env->stvec = val; 4044 } else { 4045 qemu_log_mask(LOG_UNIMP, "CSR_STVEC: reserved mode not supported\n"); 4046 } 4047 return RISCV_EXCP_NONE; 4048 } 4049 4050 static RISCVException read_scounteren(CPURISCVState *env, int csrno, 4051 target_ulong *val) 4052 { 4053 *val = env->scounteren; 4054 return RISCV_EXCP_NONE; 4055 } 4056 4057 static RISCVException write_scounteren(CPURISCVState *env, int csrno, 4058 target_ulong val) 4059 { 4060 RISCVCPU *cpu = env_archcpu(env); 4061 4062 /* WARL register - disable unavailable counters */ 4063 env->scounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM | 4064 COUNTEREN_IR); 4065 return RISCV_EXCP_NONE; 4066 } 4067 4068 /* Supervisor Trap Handling */ 4069 static RISCVException read_sscratch_i128(CPURISCVState *env, int csrno, 4070 Int128 *val) 4071 { 4072 *val = int128_make128(env->sscratch, env->sscratchh); 4073 return RISCV_EXCP_NONE; 4074 } 4075 4076 static RISCVException write_sscratch_i128(CPURISCVState *env, int csrno, 4077 Int128 val) 4078 { 4079 env->sscratch = int128_getlo(val); 4080 env->sscratchh = int128_gethi(val); 4081 return RISCV_EXCP_NONE; 4082 } 4083 4084 static RISCVException read_sscratch(CPURISCVState *env, int csrno, 4085 target_ulong *val) 4086 { 4087 *val = env->sscratch; 4088 return RISCV_EXCP_NONE; 4089 } 4090 4091 static RISCVException write_sscratch(CPURISCVState *env, int csrno, 4092 target_ulong val) 4093 { 4094 env->sscratch = val; 4095 return RISCV_EXCP_NONE; 4096 } 4097 4098 static RISCVException read_sepc(CPURISCVState *env, int csrno, 4099 target_ulong *val) 4100 { 4101 *val = env->sepc; 4102 return RISCV_EXCP_NONE; 4103 } 4104 4105 static RISCVException write_sepc(CPURISCVState *env, int csrno, 4106 target_ulong val) 4107 { 4108 env->sepc = val; 4109 return RISCV_EXCP_NONE; 4110 } 4111 4112 static RISCVException read_scause(CPURISCVState *env, int csrno, 4113 target_ulong *val) 4114 { 4115 *val = env->scause; 4116 return RISCV_EXCP_NONE; 4117 } 4118 4119 static RISCVException write_scause(CPURISCVState *env, int csrno, 4120 target_ulong val) 4121 { 4122 env->scause = val; 4123 return RISCV_EXCP_NONE; 4124 } 4125 4126 static RISCVException read_stval(CPURISCVState *env, int csrno, 4127 target_ulong *val) 4128 { 4129 *val = env->stval; 4130 return RISCV_EXCP_NONE; 4131 } 4132 4133 static RISCVException write_stval(CPURISCVState *env, int csrno, 4134 target_ulong val) 4135 { 4136 env->stval = val; 4137 return RISCV_EXCP_NONE; 4138 } 4139 4140 static RISCVException rmw_hvip64(CPURISCVState *env, int csrno, 4141 uint64_t *ret_val, 4142 uint64_t new_val, uint64_t wr_mask); 4143 4144 static RISCVException rmw_vsip64(CPURISCVState *env, int csrno, 4145 uint64_t *ret_val, 4146 uint64_t new_val, uint64_t wr_mask) 4147 { 4148 RISCVException ret; 4149 uint64_t rval, mask = env->hideleg & VS_MODE_INTERRUPTS; 4150 uint64_t vsbits; 4151 4152 /* Add virtualized bits into vsip mask. */ 4153 mask |= env->hvien & ~env->hideleg; 4154 4155 /* Bring VS-level bits to correct position */ 4156 vsbits = new_val & (VS_MODE_INTERRUPTS >> 1); 4157 new_val &= ~(VS_MODE_INTERRUPTS >> 1); 4158 new_val |= vsbits << 1; 4159 vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1); 4160 wr_mask &= ~(VS_MODE_INTERRUPTS >> 1); 4161 wr_mask |= vsbits << 1; 4162 4163 ret = rmw_hvip64(env, csrno, &rval, new_val, 4164 wr_mask & mask & vsip_writable_mask); 4165 if (ret_val) { 4166 rval &= mask; 4167 vsbits = rval & VS_MODE_INTERRUPTS; 4168 rval &= ~VS_MODE_INTERRUPTS; 4169 *ret_val = rval | (vsbits >> 1); 4170 } 4171 4172 return ret; 4173 } 4174 4175 static RISCVException rmw_vsip(CPURISCVState *env, int csrno, 4176 target_ulong *ret_val, 4177 target_ulong new_val, target_ulong wr_mask) 4178 { 4179 uint64_t rval; 4180 RISCVException ret; 4181 4182 ret = rmw_vsip64(env, csrno, &rval, new_val, wr_mask); 4183 if (ret_val) { 4184 *ret_val = rval; 4185 } 4186 4187 return ret; 4188 } 4189 4190 static RISCVException rmw_vsiph(CPURISCVState *env, int csrno, 4191 target_ulong *ret_val, 4192 target_ulong new_val, target_ulong wr_mask) 4193 { 4194 uint64_t rval; 4195 RISCVException ret; 4196 4197 ret = rmw_vsip64(env, csrno, &rval, 4198 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 4199 if (ret_val) { 4200 *ret_val = rval >> 32; 4201 } 4202 4203 return ret; 4204 } 4205 4206 static RISCVException rmw_sip64(CPURISCVState *env, int csrno, 4207 uint64_t *ret_val, 4208 uint64_t new_val, uint64_t wr_mask) 4209 { 4210 RISCVException ret; 4211 uint64_t mask = (env->mideleg | env->mvien) & sip_writable_mask; 4212 4213 if (env->virt_enabled) { 4214 if (env->hvictl & HVICTL_VTI) { 4215 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 4216 } 4217 ret = rmw_vsip64(env, CSR_VSIP, ret_val, new_val, wr_mask); 4218 } else { 4219 ret = rmw_mvip64(env, csrno, ret_val, new_val, wr_mask & mask); 4220 } 4221 4222 if (ret_val) { 4223 *ret_val &= (env->mideleg | env->mvien) & 4224 (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS); 4225 } 4226 4227 return ret; 4228 } 4229 4230 static RISCVException rmw_sip(CPURISCVState *env, int csrno, 4231 target_ulong *ret_val, 4232 target_ulong new_val, target_ulong wr_mask) 4233 { 4234 uint64_t rval; 4235 RISCVException ret; 4236 4237 ret = rmw_sip64(env, csrno, &rval, new_val, wr_mask); 4238 if (ret_val) { 4239 *ret_val = rval; 4240 } 4241 4242 return ret; 4243 } 4244 4245 static RISCVException rmw_siph(CPURISCVState *env, int csrno, 4246 target_ulong *ret_val, 4247 target_ulong new_val, target_ulong wr_mask) 4248 { 4249 uint64_t rval; 4250 RISCVException ret; 4251 4252 ret = rmw_sip64(env, csrno, &rval, 4253 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 4254 if (ret_val) { 4255 *ret_val = rval >> 32; 4256 } 4257 4258 return ret; 4259 } 4260 4261 /* Supervisor Protection and Translation */ 4262 static RISCVException read_satp(CPURISCVState *env, int csrno, 4263 target_ulong *val) 4264 { 4265 if (!riscv_cpu_cfg(env)->mmu) { 4266 *val = 0; 4267 return RISCV_EXCP_NONE; 4268 } 4269 *val = env->satp; 4270 return RISCV_EXCP_NONE; 4271 } 4272 4273 static RISCVException write_satp(CPURISCVState *env, int csrno, 4274 target_ulong val) 4275 { 4276 if (!riscv_cpu_cfg(env)->mmu) { 4277 return RISCV_EXCP_NONE; 4278 } 4279 4280 env->satp = legalize_xatp(env, env->satp, val); 4281 return RISCV_EXCP_NONE; 4282 } 4283 4284 static RISCVException rmw_sctrdepth(CPURISCVState *env, int csrno, 4285 target_ulong *ret_val, 4286 target_ulong new_val, target_ulong wr_mask) 4287 { 4288 uint64_t mask = wr_mask & SCTRDEPTH_MASK; 4289 4290 if (ret_val) { 4291 *ret_val = env->sctrdepth; 4292 } 4293 4294 env->sctrdepth = (env->sctrdepth & ~mask) | (new_val & mask); 4295 4296 /* Correct depth. */ 4297 if (mask) { 4298 uint64_t depth = get_field(env->sctrdepth, SCTRDEPTH_MASK); 4299 4300 if (depth > SCTRDEPTH_MAX) { 4301 depth = SCTRDEPTH_MAX; 4302 env->sctrdepth = set_field(env->sctrdepth, SCTRDEPTH_MASK, depth); 4303 } 4304 4305 /* Update sctrstatus.WRPTR with a legal value */ 4306 depth = 16ULL << depth; 4307 env->sctrstatus = 4308 env->sctrstatus & (~SCTRSTATUS_WRPTR_MASK | (depth - 1)); 4309 } 4310 4311 return RISCV_EXCP_NONE; 4312 } 4313 4314 static RISCVException rmw_sctrstatus(CPURISCVState *env, int csrno, 4315 target_ulong *ret_val, 4316 target_ulong new_val, target_ulong wr_mask) 4317 { 4318 uint32_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK); 4319 uint32_t mask = wr_mask & SCTRSTATUS_MASK; 4320 4321 if (ret_val) { 4322 *ret_val = env->sctrstatus; 4323 } 4324 4325 env->sctrstatus = (env->sctrstatus & ~mask) | (new_val & mask); 4326 4327 /* Update sctrstatus.WRPTR with a legal value */ 4328 env->sctrstatus = env->sctrstatus & (~SCTRSTATUS_WRPTR_MASK | (depth - 1)); 4329 4330 return RISCV_EXCP_NONE; 4331 } 4332 4333 static RISCVException rmw_xctrctl(CPURISCVState *env, int csrno, 4334 target_ulong *ret_val, 4335 target_ulong new_val, target_ulong wr_mask) 4336 { 4337 uint64_t csr_mask, mask = wr_mask; 4338 uint64_t *ctl_ptr = &env->mctrctl; 4339 4340 if (csrno == CSR_MCTRCTL) { 4341 csr_mask = MCTRCTL_MASK; 4342 } else if (csrno == CSR_SCTRCTL && !env->virt_enabled) { 4343 csr_mask = SCTRCTL_MASK; 4344 } else { 4345 /* 4346 * This is for csrno == CSR_SCTRCTL and env->virt_enabled == true 4347 * or csrno == CSR_VSCTRCTL. 4348 */ 4349 csr_mask = VSCTRCTL_MASK; 4350 ctl_ptr = &env->vsctrctl; 4351 } 4352 4353 mask &= csr_mask; 4354 4355 if (ret_val) { 4356 *ret_val = *ctl_ptr & csr_mask; 4357 } 4358 4359 *ctl_ptr = (*ctl_ptr & ~mask) | (new_val & mask); 4360 4361 return RISCV_EXCP_NONE; 4362 } 4363 4364 static RISCVException read_vstopi(CPURISCVState *env, int csrno, 4365 target_ulong *val) 4366 { 4367 int irq, ret; 4368 target_ulong topei; 4369 uint64_t vseip, vsgein; 4370 uint32_t iid, iprio, hviid, hviprio, gein; 4371 uint32_t s, scount = 0, siid[VSTOPI_NUM_SRCS], siprio[VSTOPI_NUM_SRCS]; 4372 4373 gein = get_field(env->hstatus, HSTATUS_VGEIN); 4374 hviid = get_field(env->hvictl, HVICTL_IID); 4375 hviprio = get_field(env->hvictl, HVICTL_IPRIO); 4376 4377 if (gein) { 4378 vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0; 4379 vseip = env->mie & (env->mip | vsgein) & MIP_VSEIP; 4380 if (gein <= env->geilen && vseip) { 4381 siid[scount] = IRQ_S_EXT; 4382 siprio[scount] = IPRIO_MMAXIPRIO + 1; 4383 if (env->aia_ireg_rmw_fn[PRV_S]) { 4384 /* 4385 * Call machine specific IMSIC register emulation for 4386 * reading TOPEI. 4387 */ 4388 ret = env->aia_ireg_rmw_fn[PRV_S]( 4389 env->aia_ireg_rmw_fn_arg[PRV_S], 4390 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, PRV_S, true, gein, 4391 riscv_cpu_mxl_bits(env)), 4392 &topei, 0, 0); 4393 if (!ret && topei) { 4394 siprio[scount] = topei & IMSIC_TOPEI_IPRIO_MASK; 4395 } 4396 } 4397 scount++; 4398 } 4399 } else { 4400 if (hviid == IRQ_S_EXT && hviprio) { 4401 siid[scount] = IRQ_S_EXT; 4402 siprio[scount] = hviprio; 4403 scount++; 4404 } 4405 } 4406 4407 if (env->hvictl & HVICTL_VTI) { 4408 if (hviid != IRQ_S_EXT) { 4409 siid[scount] = hviid; 4410 siprio[scount] = hviprio; 4411 scount++; 4412 } 4413 } else { 4414 irq = riscv_cpu_vsirq_pending(env); 4415 if (irq != IRQ_S_EXT && 0 < irq && irq <= 63) { 4416 siid[scount] = irq; 4417 siprio[scount] = env->hviprio[irq]; 4418 scount++; 4419 } 4420 } 4421 4422 iid = 0; 4423 iprio = UINT_MAX; 4424 for (s = 0; s < scount; s++) { 4425 if (siprio[s] < iprio) { 4426 iid = siid[s]; 4427 iprio = siprio[s]; 4428 } 4429 } 4430 4431 if (iid) { 4432 if (env->hvictl & HVICTL_IPRIOM) { 4433 if (iprio > IPRIO_MMAXIPRIO) { 4434 iprio = IPRIO_MMAXIPRIO; 4435 } 4436 if (!iprio) { 4437 if (riscv_cpu_default_priority(iid) > IPRIO_DEFAULT_S) { 4438 iprio = IPRIO_MMAXIPRIO; 4439 } 4440 } 4441 } else { 4442 iprio = 1; 4443 } 4444 } else { 4445 iprio = 0; 4446 } 4447 4448 *val = (iid & TOPI_IID_MASK) << TOPI_IID_SHIFT; 4449 *val |= iprio; 4450 4451 return RISCV_EXCP_NONE; 4452 } 4453 4454 static RISCVException read_stopi(CPURISCVState *env, int csrno, 4455 target_ulong *val) 4456 { 4457 int irq; 4458 uint8_t iprio; 4459 4460 if (env->virt_enabled) { 4461 return read_vstopi(env, CSR_VSTOPI, val); 4462 } 4463 4464 irq = riscv_cpu_sirq_pending(env); 4465 if (irq <= 0 || irq > 63) { 4466 *val = 0; 4467 } else { 4468 iprio = env->siprio[irq]; 4469 if (!iprio) { 4470 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_S) { 4471 iprio = IPRIO_MMAXIPRIO; 4472 } 4473 } 4474 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT; 4475 *val |= iprio; 4476 } 4477 4478 return RISCV_EXCP_NONE; 4479 } 4480 4481 /* Hypervisor Extensions */ 4482 static RISCVException read_hstatus(CPURISCVState *env, int csrno, 4483 target_ulong *val) 4484 { 4485 *val = env->hstatus; 4486 if (riscv_cpu_mxl(env) != MXL_RV32) { 4487 /* We only support 64-bit VSXL */ 4488 *val = set_field(*val, HSTATUS_VSXL, 2); 4489 } 4490 /* We only support little endian */ 4491 *val = set_field(*val, HSTATUS_VSBE, 0); 4492 return RISCV_EXCP_NONE; 4493 } 4494 4495 static RISCVException write_hstatus(CPURISCVState *env, int csrno, 4496 target_ulong val) 4497 { 4498 uint64_t mask = (target_ulong)-1; 4499 if (!env_archcpu(env)->cfg.ext_svukte) { 4500 mask &= ~HSTATUS_HUKTE; 4501 } 4502 /* Update PMM field only if the value is valid according to Zjpm v1.0 */ 4503 if (!env_archcpu(env)->cfg.ext_ssnpm || 4504 riscv_cpu_mxl(env) != MXL_RV64 || 4505 get_field(val, HSTATUS_HUPMM) == PMM_FIELD_RESERVED) { 4506 mask &= ~HSTATUS_HUPMM; 4507 } 4508 env->hstatus = (env->hstatus & ~mask) | (val & mask); 4509 4510 if (riscv_cpu_mxl(env) != MXL_RV32 && get_field(val, HSTATUS_VSXL) != 2) { 4511 qemu_log_mask(LOG_UNIMP, 4512 "QEMU does not support mixed HSXLEN options."); 4513 } 4514 if (get_field(val, HSTATUS_VSBE) != 0) { 4515 qemu_log_mask(LOG_UNIMP, "QEMU does not support big endian guests."); 4516 } 4517 return RISCV_EXCP_NONE; 4518 } 4519 4520 static RISCVException read_hedeleg(CPURISCVState *env, int csrno, 4521 target_ulong *val) 4522 { 4523 *val = env->hedeleg; 4524 return RISCV_EXCP_NONE; 4525 } 4526 4527 static RISCVException write_hedeleg(CPURISCVState *env, int csrno, 4528 target_ulong val) 4529 { 4530 env->hedeleg = val & vs_delegable_excps; 4531 return RISCV_EXCP_NONE; 4532 } 4533 4534 static RISCVException read_hedelegh(CPURISCVState *env, int csrno, 4535 target_ulong *val) 4536 { 4537 RISCVException ret; 4538 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_P1P13); 4539 if (ret != RISCV_EXCP_NONE) { 4540 return ret; 4541 } 4542 4543 /* Reserved, now read zero */ 4544 *val = 0; 4545 return RISCV_EXCP_NONE; 4546 } 4547 4548 static RISCVException write_hedelegh(CPURISCVState *env, int csrno, 4549 target_ulong val) 4550 { 4551 RISCVException ret; 4552 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_P1P13); 4553 if (ret != RISCV_EXCP_NONE) { 4554 return ret; 4555 } 4556 4557 /* Reserved, now write ignore */ 4558 return RISCV_EXCP_NONE; 4559 } 4560 4561 static RISCVException rmw_hvien64(CPURISCVState *env, int csrno, 4562 uint64_t *ret_val, 4563 uint64_t new_val, uint64_t wr_mask) 4564 { 4565 uint64_t mask = wr_mask & hvien_writable_mask; 4566 4567 if (ret_val) { 4568 *ret_val = env->hvien; 4569 } 4570 4571 env->hvien = (env->hvien & ~mask) | (new_val & mask); 4572 4573 return RISCV_EXCP_NONE; 4574 } 4575 4576 static RISCVException rmw_hvien(CPURISCVState *env, int csrno, 4577 target_ulong *ret_val, 4578 target_ulong new_val, target_ulong wr_mask) 4579 { 4580 uint64_t rval; 4581 RISCVException ret; 4582 4583 ret = rmw_hvien64(env, csrno, &rval, new_val, wr_mask); 4584 if (ret_val) { 4585 *ret_val = rval; 4586 } 4587 4588 return ret; 4589 } 4590 4591 static RISCVException rmw_hvienh(CPURISCVState *env, int csrno, 4592 target_ulong *ret_val, 4593 target_ulong new_val, target_ulong wr_mask) 4594 { 4595 uint64_t rval; 4596 RISCVException ret; 4597 4598 ret = rmw_hvien64(env, csrno, &rval, 4599 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 4600 if (ret_val) { 4601 *ret_val = rval >> 32; 4602 } 4603 4604 return ret; 4605 } 4606 4607 static RISCVException rmw_hideleg64(CPURISCVState *env, int csrno, 4608 uint64_t *ret_val, 4609 uint64_t new_val, uint64_t wr_mask) 4610 { 4611 uint64_t mask = wr_mask & vs_delegable_ints; 4612 4613 if (ret_val) { 4614 *ret_val = env->hideleg & vs_delegable_ints; 4615 } 4616 4617 env->hideleg = (env->hideleg & ~mask) | (new_val & mask); 4618 return RISCV_EXCP_NONE; 4619 } 4620 4621 static RISCVException rmw_hideleg(CPURISCVState *env, int csrno, 4622 target_ulong *ret_val, 4623 target_ulong new_val, target_ulong wr_mask) 4624 { 4625 uint64_t rval; 4626 RISCVException ret; 4627 4628 ret = rmw_hideleg64(env, csrno, &rval, new_val, wr_mask); 4629 if (ret_val) { 4630 *ret_val = rval; 4631 } 4632 4633 return ret; 4634 } 4635 4636 static RISCVException rmw_hidelegh(CPURISCVState *env, int csrno, 4637 target_ulong *ret_val, 4638 target_ulong new_val, target_ulong wr_mask) 4639 { 4640 uint64_t rval; 4641 RISCVException ret; 4642 4643 ret = rmw_hideleg64(env, csrno, &rval, 4644 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 4645 if (ret_val) { 4646 *ret_val = rval >> 32; 4647 } 4648 4649 return ret; 4650 } 4651 4652 /* 4653 * The function is written for two use-cases: 4654 * 1- To access hvip csr as is for HS-mode access. 4655 * 2- To access vsip as a combination of hvip, and mip for vs-mode. 4656 * 4657 * Both report bits 2, 6, 10 and 13:63. 4658 * vsip needs to be read-only zero when both hideleg[i] and 4659 * hvien[i] are zero. 4660 */ 4661 static RISCVException rmw_hvip64(CPURISCVState *env, int csrno, 4662 uint64_t *ret_val, 4663 uint64_t new_val, uint64_t wr_mask) 4664 { 4665 RISCVException ret; 4666 uint64_t old_hvip; 4667 uint64_t ret_mip; 4668 4669 /* 4670 * For bits 10, 6 and 2, vsip[i] is an alias of hip[i]. These bits are 4671 * present in hip, hvip and mip. Where mip[i] is alias of hip[i] and hvip[i] 4672 * is OR'ed in hip[i] to inject virtual interrupts from hypervisor. These 4673 * bits are actually being maintained in mip so we read them from there. 4674 * This way we have a single source of truth and allows for easier 4675 * implementation. 4676 * 4677 * For bits 13:63 we have: 4678 * 4679 * hideleg[i] hvien[i] 4680 * 0 0 No delegation. vsip[i] readonly zero. 4681 * 0 1 vsip[i] is alias of hvip[i], sip bypassed. 4682 * 1 X vsip[i] is alias of sip[i], hvip bypassed. 4683 * 4684 * alias_mask denotes the bits that come from sip (mip here given we 4685 * maintain all bits there). nalias_mask denotes bits that come from 4686 * hvip. 4687 */ 4688 uint64_t alias_mask = (env->hideleg | ~env->hvien) | VS_MODE_INTERRUPTS; 4689 uint64_t nalias_mask = (~env->hideleg & env->hvien); 4690 uint64_t wr_mask_hvip; 4691 uint64_t wr_mask_mip; 4692 4693 /* 4694 * Both alias and non-alias mask remain same for vsip except: 4695 * 1- For VS* bits if they are zero in hideleg. 4696 * 2- For 13:63 bits if they are zero in both hideleg and hvien. 4697 */ 4698 if (csrno == CSR_VSIP) { 4699 /* zero-out VS* bits that are not delegated to VS mode. */ 4700 alias_mask &= (env->hideleg | ~VS_MODE_INTERRUPTS); 4701 4702 /* 4703 * zero-out 13:63 bits that are zero in both hideleg and hvien. 4704 * nalias_mask mask can not contain any VS* bits so only second 4705 * condition applies on it. 4706 */ 4707 nalias_mask &= (env->hideleg | env->hvien); 4708 alias_mask &= (env->hideleg | env->hvien); 4709 } 4710 4711 wr_mask_hvip = wr_mask & nalias_mask & hvip_writable_mask; 4712 wr_mask_mip = wr_mask & alias_mask & hvip_writable_mask; 4713 4714 /* Aliased bits, bits 10, 6, 2 need to come from mip. */ 4715 ret = rmw_mip64(env, csrno, &ret_mip, new_val, wr_mask_mip); 4716 if (ret != RISCV_EXCP_NONE) { 4717 return ret; 4718 } 4719 4720 old_hvip = env->hvip; 4721 4722 if (wr_mask_hvip) { 4723 env->hvip = (env->hvip & ~wr_mask_hvip) | (new_val & wr_mask_hvip); 4724 4725 /* 4726 * Given hvip is separate source from mip, we need to trigger interrupt 4727 * from here separately. Normally this happen from riscv_cpu_update_mip. 4728 */ 4729 riscv_cpu_interrupt(env); 4730 } 4731 4732 if (ret_val) { 4733 /* Only take VS* bits from mip. */ 4734 ret_mip &= alias_mask; 4735 4736 /* Take in non-delegated 13:63 bits from hvip. */ 4737 old_hvip &= nalias_mask; 4738 4739 *ret_val = ret_mip | old_hvip; 4740 } 4741 4742 return ret; 4743 } 4744 4745 static RISCVException rmw_hvip(CPURISCVState *env, int csrno, 4746 target_ulong *ret_val, 4747 target_ulong new_val, target_ulong wr_mask) 4748 { 4749 uint64_t rval; 4750 RISCVException ret; 4751 4752 ret = rmw_hvip64(env, csrno, &rval, new_val, wr_mask); 4753 if (ret_val) { 4754 *ret_val = rval; 4755 } 4756 4757 return ret; 4758 } 4759 4760 static RISCVException rmw_hviph(CPURISCVState *env, int csrno, 4761 target_ulong *ret_val, 4762 target_ulong new_val, target_ulong wr_mask) 4763 { 4764 uint64_t rval; 4765 RISCVException ret; 4766 4767 ret = rmw_hvip64(env, csrno, &rval, 4768 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 4769 if (ret_val) { 4770 *ret_val = rval >> 32; 4771 } 4772 4773 return ret; 4774 } 4775 4776 static RISCVException rmw_hip(CPURISCVState *env, int csrno, 4777 target_ulong *ret_value, 4778 target_ulong new_value, target_ulong write_mask) 4779 { 4780 int ret = rmw_mip(env, csrno, ret_value, new_value, 4781 write_mask & hip_writable_mask); 4782 4783 if (ret_value) { 4784 *ret_value &= HS_MODE_INTERRUPTS; 4785 } 4786 return ret; 4787 } 4788 4789 static RISCVException rmw_hie(CPURISCVState *env, int csrno, 4790 target_ulong *ret_val, 4791 target_ulong new_val, target_ulong wr_mask) 4792 { 4793 uint64_t rval; 4794 RISCVException ret; 4795 4796 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & HS_MODE_INTERRUPTS); 4797 if (ret_val) { 4798 *ret_val = rval & HS_MODE_INTERRUPTS; 4799 } 4800 4801 return ret; 4802 } 4803 4804 static RISCVException read_hcounteren(CPURISCVState *env, int csrno, 4805 target_ulong *val) 4806 { 4807 *val = env->hcounteren; 4808 return RISCV_EXCP_NONE; 4809 } 4810 4811 static RISCVException write_hcounteren(CPURISCVState *env, int csrno, 4812 target_ulong val) 4813 { 4814 RISCVCPU *cpu = env_archcpu(env); 4815 4816 /* WARL register - disable unavailable counters */ 4817 env->hcounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM | 4818 COUNTEREN_IR); 4819 return RISCV_EXCP_NONE; 4820 } 4821 4822 static RISCVException read_hgeie(CPURISCVState *env, int csrno, 4823 target_ulong *val) 4824 { 4825 if (val) { 4826 *val = env->hgeie; 4827 } 4828 return RISCV_EXCP_NONE; 4829 } 4830 4831 static RISCVException write_hgeie(CPURISCVState *env, int csrno, 4832 target_ulong val) 4833 { 4834 /* Only GEILEN:1 bits implemented and BIT0 is never implemented */ 4835 val &= ((((target_ulong)1) << env->geilen) - 1) << 1; 4836 env->hgeie = val; 4837 /* Update mip.SGEIP bit */ 4838 riscv_cpu_update_mip(env, MIP_SGEIP, 4839 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 4840 return RISCV_EXCP_NONE; 4841 } 4842 4843 static RISCVException read_htval(CPURISCVState *env, int csrno, 4844 target_ulong *val) 4845 { 4846 *val = env->htval; 4847 return RISCV_EXCP_NONE; 4848 } 4849 4850 static RISCVException write_htval(CPURISCVState *env, int csrno, 4851 target_ulong val) 4852 { 4853 env->htval = val; 4854 return RISCV_EXCP_NONE; 4855 } 4856 4857 static RISCVException read_htinst(CPURISCVState *env, int csrno, 4858 target_ulong *val) 4859 { 4860 *val = env->htinst; 4861 return RISCV_EXCP_NONE; 4862 } 4863 4864 static RISCVException write_htinst(CPURISCVState *env, int csrno, 4865 target_ulong val) 4866 { 4867 return RISCV_EXCP_NONE; 4868 } 4869 4870 static RISCVException read_hgeip(CPURISCVState *env, int csrno, 4871 target_ulong *val) 4872 { 4873 if (val) { 4874 *val = env->hgeip; 4875 } 4876 return RISCV_EXCP_NONE; 4877 } 4878 4879 static RISCVException read_hgatp(CPURISCVState *env, int csrno, 4880 target_ulong *val) 4881 { 4882 *val = env->hgatp; 4883 return RISCV_EXCP_NONE; 4884 } 4885 4886 static RISCVException write_hgatp(CPURISCVState *env, int csrno, 4887 target_ulong val) 4888 { 4889 env->hgatp = legalize_xatp(env, env->hgatp, val); 4890 return RISCV_EXCP_NONE; 4891 } 4892 4893 static RISCVException read_htimedelta(CPURISCVState *env, int csrno, 4894 target_ulong *val) 4895 { 4896 if (!env->rdtime_fn) { 4897 return RISCV_EXCP_ILLEGAL_INST; 4898 } 4899 4900 *val = env->htimedelta; 4901 return RISCV_EXCP_NONE; 4902 } 4903 4904 static RISCVException write_htimedelta(CPURISCVState *env, int csrno, 4905 target_ulong val) 4906 { 4907 if (!env->rdtime_fn) { 4908 return RISCV_EXCP_ILLEGAL_INST; 4909 } 4910 4911 if (riscv_cpu_mxl(env) == MXL_RV32) { 4912 env->htimedelta = deposit64(env->htimedelta, 0, 32, (uint64_t)val); 4913 } else { 4914 env->htimedelta = val; 4915 } 4916 4917 if (riscv_cpu_cfg(env)->ext_sstc && env->rdtime_fn) { 4918 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp, 4919 env->htimedelta, MIP_VSTIP); 4920 } 4921 4922 return RISCV_EXCP_NONE; 4923 } 4924 4925 static RISCVException read_htimedeltah(CPURISCVState *env, int csrno, 4926 target_ulong *val) 4927 { 4928 if (!env->rdtime_fn) { 4929 return RISCV_EXCP_ILLEGAL_INST; 4930 } 4931 4932 *val = env->htimedelta >> 32; 4933 return RISCV_EXCP_NONE; 4934 } 4935 4936 static RISCVException write_htimedeltah(CPURISCVState *env, int csrno, 4937 target_ulong val) 4938 { 4939 if (!env->rdtime_fn) { 4940 return RISCV_EXCP_ILLEGAL_INST; 4941 } 4942 4943 env->htimedelta = deposit64(env->htimedelta, 32, 32, (uint64_t)val); 4944 4945 if (riscv_cpu_cfg(env)->ext_sstc && env->rdtime_fn) { 4946 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp, 4947 env->htimedelta, MIP_VSTIP); 4948 } 4949 4950 return RISCV_EXCP_NONE; 4951 } 4952 4953 static RISCVException read_hvictl(CPURISCVState *env, int csrno, 4954 target_ulong *val) 4955 { 4956 *val = env->hvictl; 4957 return RISCV_EXCP_NONE; 4958 } 4959 4960 static RISCVException write_hvictl(CPURISCVState *env, int csrno, 4961 target_ulong val) 4962 { 4963 env->hvictl = val & HVICTL_VALID_MASK; 4964 return RISCV_EXCP_NONE; 4965 } 4966 4967 static RISCVException read_hvipriox(CPURISCVState *env, int first_index, 4968 uint8_t *iprio, target_ulong *val) 4969 { 4970 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32); 4971 4972 /* First index has to be a multiple of number of irqs per register */ 4973 if (first_index % num_irqs) { 4974 return (env->virt_enabled) ? 4975 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; 4976 } 4977 4978 /* Fill-up return value */ 4979 *val = 0; 4980 for (i = 0; i < num_irqs; i++) { 4981 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) { 4982 continue; 4983 } 4984 if (rdzero) { 4985 continue; 4986 } 4987 *val |= ((target_ulong)iprio[irq]) << (i * 8); 4988 } 4989 4990 return RISCV_EXCP_NONE; 4991 } 4992 4993 static RISCVException write_hvipriox(CPURISCVState *env, int first_index, 4994 uint8_t *iprio, target_ulong val) 4995 { 4996 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32); 4997 4998 /* First index has to be a multiple of number of irqs per register */ 4999 if (first_index % num_irqs) { 5000 return (env->virt_enabled) ? 5001 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; 5002 } 5003 5004 /* Fill-up priority array */ 5005 for (i = 0; i < num_irqs; i++) { 5006 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) { 5007 continue; 5008 } 5009 if (rdzero) { 5010 iprio[irq] = 0; 5011 } else { 5012 iprio[irq] = (val >> (i * 8)) & 0xff; 5013 } 5014 } 5015 5016 return RISCV_EXCP_NONE; 5017 } 5018 5019 static RISCVException read_hviprio1(CPURISCVState *env, int csrno, 5020 target_ulong *val) 5021 { 5022 return read_hvipriox(env, 0, env->hviprio, val); 5023 } 5024 5025 static RISCVException write_hviprio1(CPURISCVState *env, int csrno, 5026 target_ulong val) 5027 { 5028 return write_hvipriox(env, 0, env->hviprio, val); 5029 } 5030 5031 static RISCVException read_hviprio1h(CPURISCVState *env, int csrno, 5032 target_ulong *val) 5033 { 5034 return read_hvipriox(env, 4, env->hviprio, val); 5035 } 5036 5037 static RISCVException write_hviprio1h(CPURISCVState *env, int csrno, 5038 target_ulong val) 5039 { 5040 return write_hvipriox(env, 4, env->hviprio, val); 5041 } 5042 5043 static RISCVException read_hviprio2(CPURISCVState *env, int csrno, 5044 target_ulong *val) 5045 { 5046 return read_hvipriox(env, 8, env->hviprio, val); 5047 } 5048 5049 static RISCVException write_hviprio2(CPURISCVState *env, int csrno, 5050 target_ulong val) 5051 { 5052 return write_hvipriox(env, 8, env->hviprio, val); 5053 } 5054 5055 static RISCVException read_hviprio2h(CPURISCVState *env, int csrno, 5056 target_ulong *val) 5057 { 5058 return read_hvipriox(env, 12, env->hviprio, val); 5059 } 5060 5061 static RISCVException write_hviprio2h(CPURISCVState *env, int csrno, 5062 target_ulong val) 5063 { 5064 return write_hvipriox(env, 12, env->hviprio, val); 5065 } 5066 5067 /* Virtual CSR Registers */ 5068 static RISCVException read_vsstatus(CPURISCVState *env, int csrno, 5069 target_ulong *val) 5070 { 5071 *val = env->vsstatus; 5072 return RISCV_EXCP_NONE; 5073 } 5074 5075 static RISCVException write_vsstatus(CPURISCVState *env, int csrno, 5076 target_ulong val) 5077 { 5078 uint64_t mask = (target_ulong)-1; 5079 if ((val & VSSTATUS64_UXL) == 0) { 5080 mask &= ~VSSTATUS64_UXL; 5081 } 5082 if ((env->henvcfg & HENVCFG_DTE)) { 5083 if ((val & SSTATUS_SDT) != 0) { 5084 val &= ~SSTATUS_SIE; 5085 } 5086 } else { 5087 val &= ~SSTATUS_SDT; 5088 } 5089 env->vsstatus = (env->vsstatus & ~mask) | (uint64_t)val; 5090 return RISCV_EXCP_NONE; 5091 } 5092 5093 static RISCVException read_vstvec(CPURISCVState *env, int csrno, 5094 target_ulong *val) 5095 { 5096 *val = env->vstvec; 5097 return RISCV_EXCP_NONE; 5098 } 5099 5100 static RISCVException write_vstvec(CPURISCVState *env, int csrno, 5101 target_ulong val) 5102 { 5103 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */ 5104 if ((val & 3) < 2) { 5105 env->vstvec = val; 5106 } else { 5107 qemu_log_mask(LOG_UNIMP, "CSR_VSTVEC: reserved mode not supported\n"); 5108 } 5109 return RISCV_EXCP_NONE; 5110 } 5111 5112 static RISCVException read_vsscratch(CPURISCVState *env, int csrno, 5113 target_ulong *val) 5114 { 5115 *val = env->vsscratch; 5116 return RISCV_EXCP_NONE; 5117 } 5118 5119 static RISCVException write_vsscratch(CPURISCVState *env, int csrno, 5120 target_ulong val) 5121 { 5122 env->vsscratch = val; 5123 return RISCV_EXCP_NONE; 5124 } 5125 5126 static RISCVException read_vsepc(CPURISCVState *env, int csrno, 5127 target_ulong *val) 5128 { 5129 *val = env->vsepc; 5130 return RISCV_EXCP_NONE; 5131 } 5132 5133 static RISCVException write_vsepc(CPURISCVState *env, int csrno, 5134 target_ulong val) 5135 { 5136 env->vsepc = val; 5137 return RISCV_EXCP_NONE; 5138 } 5139 5140 static RISCVException read_vscause(CPURISCVState *env, int csrno, 5141 target_ulong *val) 5142 { 5143 *val = env->vscause; 5144 return RISCV_EXCP_NONE; 5145 } 5146 5147 static RISCVException write_vscause(CPURISCVState *env, int csrno, 5148 target_ulong val) 5149 { 5150 env->vscause = val; 5151 return RISCV_EXCP_NONE; 5152 } 5153 5154 static RISCVException read_vstval(CPURISCVState *env, int csrno, 5155 target_ulong *val) 5156 { 5157 *val = env->vstval; 5158 return RISCV_EXCP_NONE; 5159 } 5160 5161 static RISCVException write_vstval(CPURISCVState *env, int csrno, 5162 target_ulong val) 5163 { 5164 env->vstval = val; 5165 return RISCV_EXCP_NONE; 5166 } 5167 5168 static RISCVException read_vsatp(CPURISCVState *env, int csrno, 5169 target_ulong *val) 5170 { 5171 *val = env->vsatp; 5172 return RISCV_EXCP_NONE; 5173 } 5174 5175 static RISCVException write_vsatp(CPURISCVState *env, int csrno, 5176 target_ulong val) 5177 { 5178 env->vsatp = legalize_xatp(env, env->vsatp, val); 5179 return RISCV_EXCP_NONE; 5180 } 5181 5182 static RISCVException read_mtval2(CPURISCVState *env, int csrno, 5183 target_ulong *val) 5184 { 5185 *val = env->mtval2; 5186 return RISCV_EXCP_NONE; 5187 } 5188 5189 static RISCVException write_mtval2(CPURISCVState *env, int csrno, 5190 target_ulong val) 5191 { 5192 env->mtval2 = val; 5193 return RISCV_EXCP_NONE; 5194 } 5195 5196 static RISCVException read_mtinst(CPURISCVState *env, int csrno, 5197 target_ulong *val) 5198 { 5199 *val = env->mtinst; 5200 return RISCV_EXCP_NONE; 5201 } 5202 5203 static RISCVException write_mtinst(CPURISCVState *env, int csrno, 5204 target_ulong val) 5205 { 5206 env->mtinst = val; 5207 return RISCV_EXCP_NONE; 5208 } 5209 5210 /* Physical Memory Protection */ 5211 static RISCVException read_mseccfg(CPURISCVState *env, int csrno, 5212 target_ulong *val) 5213 { 5214 *val = mseccfg_csr_read(env); 5215 return RISCV_EXCP_NONE; 5216 } 5217 5218 static RISCVException write_mseccfg(CPURISCVState *env, int csrno, 5219 target_ulong val) 5220 { 5221 mseccfg_csr_write(env, val); 5222 return RISCV_EXCP_NONE; 5223 } 5224 5225 static RISCVException read_pmpcfg(CPURISCVState *env, int csrno, 5226 target_ulong *val) 5227 { 5228 uint32_t reg_index = csrno - CSR_PMPCFG0; 5229 5230 *val = pmpcfg_csr_read(env, reg_index); 5231 return RISCV_EXCP_NONE; 5232 } 5233 5234 static RISCVException write_pmpcfg(CPURISCVState *env, int csrno, 5235 target_ulong val) 5236 { 5237 uint32_t reg_index = csrno - CSR_PMPCFG0; 5238 5239 pmpcfg_csr_write(env, reg_index, val); 5240 return RISCV_EXCP_NONE; 5241 } 5242 5243 static RISCVException read_pmpaddr(CPURISCVState *env, int csrno, 5244 target_ulong *val) 5245 { 5246 *val = pmpaddr_csr_read(env, csrno - CSR_PMPADDR0); 5247 return RISCV_EXCP_NONE; 5248 } 5249 5250 static RISCVException write_pmpaddr(CPURISCVState *env, int csrno, 5251 target_ulong val) 5252 { 5253 pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val); 5254 return RISCV_EXCP_NONE; 5255 } 5256 5257 static RISCVException read_tselect(CPURISCVState *env, int csrno, 5258 target_ulong *val) 5259 { 5260 *val = tselect_csr_read(env); 5261 return RISCV_EXCP_NONE; 5262 } 5263 5264 static RISCVException write_tselect(CPURISCVState *env, int csrno, 5265 target_ulong val) 5266 { 5267 tselect_csr_write(env, val); 5268 return RISCV_EXCP_NONE; 5269 } 5270 5271 static RISCVException read_tdata(CPURISCVState *env, int csrno, 5272 target_ulong *val) 5273 { 5274 /* return 0 in tdata1 to end the trigger enumeration */ 5275 if (env->trigger_cur >= RV_MAX_TRIGGERS && csrno == CSR_TDATA1) { 5276 *val = 0; 5277 return RISCV_EXCP_NONE; 5278 } 5279 5280 if (!tdata_available(env, csrno - CSR_TDATA1)) { 5281 return RISCV_EXCP_ILLEGAL_INST; 5282 } 5283 5284 *val = tdata_csr_read(env, csrno - CSR_TDATA1); 5285 return RISCV_EXCP_NONE; 5286 } 5287 5288 static RISCVException write_tdata(CPURISCVState *env, int csrno, 5289 target_ulong val) 5290 { 5291 if (!tdata_available(env, csrno - CSR_TDATA1)) { 5292 return RISCV_EXCP_ILLEGAL_INST; 5293 } 5294 5295 tdata_csr_write(env, csrno - CSR_TDATA1, val); 5296 return RISCV_EXCP_NONE; 5297 } 5298 5299 static RISCVException read_tinfo(CPURISCVState *env, int csrno, 5300 target_ulong *val) 5301 { 5302 *val = tinfo_csr_read(env); 5303 return RISCV_EXCP_NONE; 5304 } 5305 5306 static RISCVException read_mcontext(CPURISCVState *env, int csrno, 5307 target_ulong *val) 5308 { 5309 *val = env->mcontext; 5310 return RISCV_EXCP_NONE; 5311 } 5312 5313 static RISCVException write_mcontext(CPURISCVState *env, int csrno, 5314 target_ulong val) 5315 { 5316 bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false; 5317 int32_t mask; 5318 5319 if (riscv_has_ext(env, RVH)) { 5320 /* Spec suggest 7-bit for RV32 and 14-bit for RV64 w/ H extension */ 5321 mask = rv32 ? MCONTEXT32_HCONTEXT : MCONTEXT64_HCONTEXT; 5322 } else { 5323 /* Spec suggest 6-bit for RV32 and 13-bit for RV64 w/o H extension */ 5324 mask = rv32 ? MCONTEXT32 : MCONTEXT64; 5325 } 5326 5327 env->mcontext = val & mask; 5328 return RISCV_EXCP_NONE; 5329 } 5330 5331 static RISCVException read_mnscratch(CPURISCVState *env, int csrno, 5332 target_ulong *val) 5333 { 5334 *val = env->mnscratch; 5335 return RISCV_EXCP_NONE; 5336 } 5337 5338 static int write_mnscratch(CPURISCVState *env, int csrno, target_ulong val) 5339 { 5340 env->mnscratch = val; 5341 return RISCV_EXCP_NONE; 5342 } 5343 5344 static int read_mnepc(CPURISCVState *env, int csrno, target_ulong *val) 5345 { 5346 *val = env->mnepc; 5347 return RISCV_EXCP_NONE; 5348 } 5349 5350 static int write_mnepc(CPURISCVState *env, int csrno, target_ulong val) 5351 { 5352 env->mnepc = val; 5353 return RISCV_EXCP_NONE; 5354 } 5355 5356 static int read_mncause(CPURISCVState *env, int csrno, target_ulong *val) 5357 { 5358 *val = env->mncause; 5359 return RISCV_EXCP_NONE; 5360 } 5361 5362 static int write_mncause(CPURISCVState *env, int csrno, target_ulong val) 5363 { 5364 env->mncause = val; 5365 return RISCV_EXCP_NONE; 5366 } 5367 5368 static int read_mnstatus(CPURISCVState *env, int csrno, target_ulong *val) 5369 { 5370 *val = env->mnstatus; 5371 return RISCV_EXCP_NONE; 5372 } 5373 5374 static int write_mnstatus(CPURISCVState *env, int csrno, target_ulong val) 5375 { 5376 target_ulong mask = (MNSTATUS_NMIE | MNSTATUS_MNPP); 5377 5378 if (riscv_has_ext(env, RVH)) { 5379 /* Flush tlb on mnstatus fields that affect VM. */ 5380 if ((val ^ env->mnstatus) & MNSTATUS_MNPV) { 5381 tlb_flush(env_cpu(env)); 5382 } 5383 5384 mask |= MNSTATUS_MNPV; 5385 } 5386 5387 /* mnstatus.mnie can only be cleared by hardware. */ 5388 env->mnstatus = (env->mnstatus & MNSTATUS_NMIE) | (val & mask); 5389 return RISCV_EXCP_NONE; 5390 } 5391 5392 #endif 5393 5394 /* Crypto Extension */ 5395 target_ulong riscv_new_csr_seed(target_ulong new_value, 5396 target_ulong write_mask) 5397 { 5398 uint16_t random_v; 5399 Error *random_e = NULL; 5400 int random_r; 5401 target_ulong rval; 5402 5403 random_r = qemu_guest_getrandom(&random_v, 2, &random_e); 5404 if (unlikely(random_r < 0)) { 5405 /* 5406 * Failed, for unknown reasons in the crypto subsystem. 5407 * The best we can do is log the reason and return a 5408 * failure indication to the guest. There is no reason 5409 * we know to expect the failure to be transitory, so 5410 * indicate DEAD to avoid having the guest spin on WAIT. 5411 */ 5412 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s", 5413 __func__, error_get_pretty(random_e)); 5414 error_free(random_e); 5415 rval = SEED_OPST_DEAD; 5416 } else { 5417 rval = random_v | SEED_OPST_ES16; 5418 } 5419 5420 return rval; 5421 } 5422 5423 static RISCVException rmw_seed(CPURISCVState *env, int csrno, 5424 target_ulong *ret_value, 5425 target_ulong new_value, 5426 target_ulong write_mask) 5427 { 5428 target_ulong rval; 5429 5430 rval = riscv_new_csr_seed(new_value, write_mask); 5431 5432 if (ret_value) { 5433 *ret_value = rval; 5434 } 5435 5436 return RISCV_EXCP_NONE; 5437 } 5438 5439 /* 5440 * riscv_csrrw - read and/or update control and status register 5441 * 5442 * csrr <-> riscv_csrrw(env, csrno, ret_value, 0, 0); 5443 * csrrw <-> riscv_csrrw(env, csrno, ret_value, value, -1); 5444 * csrrs <-> riscv_csrrw(env, csrno, ret_value, -1, value); 5445 * csrrc <-> riscv_csrrw(env, csrno, ret_value, 0, value); 5446 */ 5447 5448 static inline RISCVException riscv_csrrw_check(CPURISCVState *env, 5449 int csrno, 5450 bool write) 5451 { 5452 /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */ 5453 bool read_only = get_field(csrno, 0xC00) == 3; 5454 int csr_min_priv = csr_ops[csrno].min_priv_ver; 5455 5456 /* ensure the CSR extension is enabled */ 5457 if (!riscv_cpu_cfg(env)->ext_zicsr) { 5458 return RISCV_EXCP_ILLEGAL_INST; 5459 } 5460 5461 /* ensure CSR is implemented by checking predicate */ 5462 if (!csr_ops[csrno].predicate) { 5463 return RISCV_EXCP_ILLEGAL_INST; 5464 } 5465 5466 /* privileged spec version check */ 5467 if (env->priv_ver < csr_min_priv) { 5468 return RISCV_EXCP_ILLEGAL_INST; 5469 } 5470 5471 /* read / write check */ 5472 if (write && read_only) { 5473 return RISCV_EXCP_ILLEGAL_INST; 5474 } 5475 5476 /* 5477 * The predicate() not only does existence check but also does some 5478 * access control check which triggers for example virtual instruction 5479 * exception in some cases. When writing read-only CSRs in those cases 5480 * illegal instruction exception should be triggered instead of virtual 5481 * instruction exception. Hence this comes after the read / write check. 5482 */ 5483 RISCVException ret = csr_ops[csrno].predicate(env, csrno); 5484 if (ret != RISCV_EXCP_NONE) { 5485 return ret; 5486 } 5487 5488 #if !defined(CONFIG_USER_ONLY) 5489 int csr_priv, effective_priv = env->priv; 5490 5491 if (riscv_has_ext(env, RVH) && env->priv == PRV_S && 5492 !env->virt_enabled) { 5493 /* 5494 * We are in HS mode. Add 1 to the effective privilege level to 5495 * allow us to access the Hypervisor CSRs. 5496 */ 5497 effective_priv++; 5498 } 5499 5500 csr_priv = get_field(csrno, 0x300); 5501 if (!env->debugger && (effective_priv < csr_priv)) { 5502 if (csr_priv == (PRV_S + 1) && env->virt_enabled) { 5503 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 5504 } 5505 return RISCV_EXCP_ILLEGAL_INST; 5506 } 5507 #endif 5508 return RISCV_EXCP_NONE; 5509 } 5510 5511 static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno, 5512 target_ulong *ret_value, 5513 target_ulong new_value, 5514 target_ulong write_mask) 5515 { 5516 RISCVException ret; 5517 target_ulong old_value = 0; 5518 5519 /* execute combined read/write operation if it exists */ 5520 if (csr_ops[csrno].op) { 5521 return csr_ops[csrno].op(env, csrno, ret_value, new_value, write_mask); 5522 } 5523 5524 /* 5525 * ret_value == NULL means that rd=x0 and we're coming from helper_csrw() 5526 * and we can't throw side effects caused by CSR reads. 5527 */ 5528 if (ret_value) { 5529 /* if no accessor exists then return failure */ 5530 if (!csr_ops[csrno].read) { 5531 return RISCV_EXCP_ILLEGAL_INST; 5532 } 5533 /* read old value */ 5534 ret = csr_ops[csrno].read(env, csrno, &old_value); 5535 if (ret != RISCV_EXCP_NONE) { 5536 return ret; 5537 } 5538 } 5539 5540 /* write value if writable and write mask set, otherwise drop writes */ 5541 if (write_mask) { 5542 new_value = (old_value & ~write_mask) | (new_value & write_mask); 5543 if (csr_ops[csrno].write) { 5544 ret = csr_ops[csrno].write(env, csrno, new_value); 5545 if (ret != RISCV_EXCP_NONE) { 5546 return ret; 5547 } 5548 } 5549 } 5550 5551 /* return old value */ 5552 if (ret_value) { 5553 *ret_value = old_value; 5554 } 5555 5556 return RISCV_EXCP_NONE; 5557 } 5558 5559 RISCVException riscv_csrr(CPURISCVState *env, int csrno, 5560 target_ulong *ret_value) 5561 { 5562 RISCVException ret = riscv_csrrw_check(env, csrno, false); 5563 if (ret != RISCV_EXCP_NONE) { 5564 return ret; 5565 } 5566 5567 return riscv_csrrw_do64(env, csrno, ret_value, 0, 0); 5568 } 5569 5570 RISCVException riscv_csrrw(CPURISCVState *env, int csrno, 5571 target_ulong *ret_value, 5572 target_ulong new_value, target_ulong write_mask) 5573 { 5574 RISCVException ret = riscv_csrrw_check(env, csrno, true); 5575 if (ret != RISCV_EXCP_NONE) { 5576 return ret; 5577 } 5578 5579 return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask); 5580 } 5581 5582 static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno, 5583 Int128 *ret_value, 5584 Int128 new_value, 5585 Int128 write_mask) 5586 { 5587 RISCVException ret; 5588 Int128 old_value; 5589 5590 /* read old value */ 5591 ret = csr_ops[csrno].read128(env, csrno, &old_value); 5592 if (ret != RISCV_EXCP_NONE) { 5593 return ret; 5594 } 5595 5596 /* write value if writable and write mask set, otherwise drop writes */ 5597 if (int128_nz(write_mask)) { 5598 new_value = int128_or(int128_and(old_value, int128_not(write_mask)), 5599 int128_and(new_value, write_mask)); 5600 if (csr_ops[csrno].write128) { 5601 ret = csr_ops[csrno].write128(env, csrno, new_value); 5602 if (ret != RISCV_EXCP_NONE) { 5603 return ret; 5604 } 5605 } else if (csr_ops[csrno].write) { 5606 /* avoids having to write wrappers for all registers */ 5607 ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value)); 5608 if (ret != RISCV_EXCP_NONE) { 5609 return ret; 5610 } 5611 } 5612 } 5613 5614 /* return old value */ 5615 if (ret_value) { 5616 *ret_value = old_value; 5617 } 5618 5619 return RISCV_EXCP_NONE; 5620 } 5621 5622 RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno, 5623 Int128 *ret_value) 5624 { 5625 RISCVException ret; 5626 5627 ret = riscv_csrrw_check(env, csrno, false); 5628 if (ret != RISCV_EXCP_NONE) { 5629 return ret; 5630 } 5631 5632 if (csr_ops[csrno].read128) { 5633 return riscv_csrrw_do128(env, csrno, ret_value, 5634 int128_zero(), int128_zero()); 5635 } 5636 5637 /* 5638 * Fall back to 64-bit version for now, if the 128-bit alternative isn't 5639 * at all defined. 5640 * Note, some CSRs don't need to extend to MXLEN (64 upper bits non 5641 * significant), for those, this fallback is correctly handling the 5642 * accesses 5643 */ 5644 target_ulong old_value; 5645 ret = riscv_csrrw_do64(env, csrno, &old_value, 5646 (target_ulong)0, 5647 (target_ulong)0); 5648 if (ret == RISCV_EXCP_NONE && ret_value) { 5649 *ret_value = int128_make64(old_value); 5650 } 5651 return ret; 5652 } 5653 5654 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno, 5655 Int128 *ret_value, 5656 Int128 new_value, Int128 write_mask) 5657 { 5658 RISCVException ret; 5659 5660 ret = riscv_csrrw_check(env, csrno, true); 5661 if (ret != RISCV_EXCP_NONE) { 5662 return ret; 5663 } 5664 5665 if (csr_ops[csrno].read128) { 5666 return riscv_csrrw_do128(env, csrno, ret_value, new_value, write_mask); 5667 } 5668 5669 /* 5670 * Fall back to 64-bit version for now, if the 128-bit alternative isn't 5671 * at all defined. 5672 * Note, some CSRs don't need to extend to MXLEN (64 upper bits non 5673 * significant), for those, this fallback is correctly handling the 5674 * accesses 5675 */ 5676 target_ulong old_value; 5677 ret = riscv_csrrw_do64(env, csrno, &old_value, 5678 int128_getlo(new_value), 5679 int128_getlo(write_mask)); 5680 if (ret == RISCV_EXCP_NONE && ret_value) { 5681 *ret_value = int128_make64(old_value); 5682 } 5683 return ret; 5684 } 5685 5686 /* 5687 * Debugger support. If not in user mode, set env->debugger before the 5688 * riscv_csrrw call and clear it after the call. 5689 */ 5690 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno, 5691 target_ulong *ret_value, 5692 target_ulong new_value, 5693 target_ulong write_mask) 5694 { 5695 RISCVException ret; 5696 #if !defined(CONFIG_USER_ONLY) 5697 env->debugger = true; 5698 #endif 5699 if (!write_mask) { 5700 ret = riscv_csrr(env, csrno, ret_value); 5701 } else { 5702 ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask); 5703 } 5704 #if !defined(CONFIG_USER_ONLY) 5705 env->debugger = false; 5706 #endif 5707 return ret; 5708 } 5709 5710 static RISCVException read_jvt(CPURISCVState *env, int csrno, 5711 target_ulong *val) 5712 { 5713 *val = env->jvt; 5714 return RISCV_EXCP_NONE; 5715 } 5716 5717 static RISCVException write_jvt(CPURISCVState *env, int csrno, 5718 target_ulong val) 5719 { 5720 env->jvt = val; 5721 return RISCV_EXCP_NONE; 5722 } 5723 5724 /* 5725 * Control and Status Register function table 5726 * riscv_csr_operations::predicate() must be provided for an implemented CSR 5727 */ 5728 riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { 5729 /* User Floating-Point CSRs */ 5730 [CSR_FFLAGS] = { "fflags", fs, read_fflags, write_fflags }, 5731 [CSR_FRM] = { "frm", fs, read_frm, write_frm }, 5732 [CSR_FCSR] = { "fcsr", fs, read_fcsr, write_fcsr }, 5733 /* Vector CSRs */ 5734 [CSR_VSTART] = { "vstart", vs, read_vstart, write_vstart }, 5735 [CSR_VXSAT] = { "vxsat", vs, read_vxsat, write_vxsat }, 5736 [CSR_VXRM] = { "vxrm", vs, read_vxrm, write_vxrm }, 5737 [CSR_VCSR] = { "vcsr", vs, read_vcsr, write_vcsr }, 5738 [CSR_VL] = { "vl", vs, read_vl }, 5739 [CSR_VTYPE] = { "vtype", vs, read_vtype }, 5740 [CSR_VLENB] = { "vlenb", vs, read_vlenb }, 5741 /* User Timers and Counters */ 5742 [CSR_CYCLE] = { "cycle", ctr, read_hpmcounter }, 5743 [CSR_INSTRET] = { "instret", ctr, read_hpmcounter }, 5744 [CSR_CYCLEH] = { "cycleh", ctr32, read_hpmcounterh }, 5745 [CSR_INSTRETH] = { "instreth", ctr32, read_hpmcounterh }, 5746 5747 /* 5748 * In privileged mode, the monitor will have to emulate TIME CSRs only if 5749 * rdtime callback is not provided by machine/platform emulation. 5750 */ 5751 [CSR_TIME] = { "time", ctr, read_time }, 5752 [CSR_TIMEH] = { "timeh", ctr32, read_timeh }, 5753 5754 /* Crypto Extension */ 5755 [CSR_SEED] = { "seed", seed, NULL, NULL, rmw_seed }, 5756 5757 /* Zcmt Extension */ 5758 [CSR_JVT] = {"jvt", zcmt, read_jvt, write_jvt}, 5759 5760 /* zicfiss Extension, shadow stack register */ 5761 [CSR_SSP] = { "ssp", cfi_ss, read_ssp, write_ssp }, 5762 5763 #if !defined(CONFIG_USER_ONLY) 5764 /* Machine Timers and Counters */ 5765 [CSR_MCYCLE] = { "mcycle", any, read_hpmcounter, 5766 write_mhpmcounter }, 5767 [CSR_MINSTRET] = { "minstret", any, read_hpmcounter, 5768 write_mhpmcounter }, 5769 [CSR_MCYCLEH] = { "mcycleh", any32, read_hpmcounterh, 5770 write_mhpmcounterh }, 5771 [CSR_MINSTRETH] = { "minstreth", any32, read_hpmcounterh, 5772 write_mhpmcounterh }, 5773 5774 /* Machine Information Registers */ 5775 [CSR_MVENDORID] = { "mvendorid", any, read_mvendorid }, 5776 [CSR_MARCHID] = { "marchid", any, read_marchid }, 5777 [CSR_MIMPID] = { "mimpid", any, read_mimpid }, 5778 [CSR_MHARTID] = { "mhartid", any, read_mhartid }, 5779 5780 [CSR_MCONFIGPTR] = { "mconfigptr", any, read_zero, 5781 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5782 /* Machine Trap Setup */ 5783 [CSR_MSTATUS] = { "mstatus", any, read_mstatus, write_mstatus, 5784 NULL, read_mstatus_i128 }, 5785 [CSR_MISA] = { "misa", any, read_misa, write_misa, 5786 NULL, read_misa_i128 }, 5787 [CSR_MIDELEG] = { "mideleg", any, NULL, NULL, rmw_mideleg }, 5788 [CSR_MEDELEG] = { "medeleg", any, read_medeleg, write_medeleg }, 5789 [CSR_MIE] = { "mie", any, NULL, NULL, rmw_mie }, 5790 [CSR_MTVEC] = { "mtvec", any, read_mtvec, write_mtvec }, 5791 [CSR_MCOUNTEREN] = { "mcounteren", umode, read_mcounteren, 5792 write_mcounteren }, 5793 5794 [CSR_MSTATUSH] = { "mstatush", any32, read_mstatush, 5795 write_mstatush }, 5796 [CSR_MEDELEGH] = { "medelegh", any32, read_zero, write_ignore, 5797 .min_priv_ver = PRIV_VERSION_1_13_0 }, 5798 [CSR_HEDELEGH] = { "hedelegh", hmode32, read_hedelegh, write_hedelegh, 5799 .min_priv_ver = PRIV_VERSION_1_13_0 }, 5800 5801 /* Machine Trap Handling */ 5802 [CSR_MSCRATCH] = { "mscratch", any, read_mscratch, write_mscratch, 5803 NULL, read_mscratch_i128, write_mscratch_i128 }, 5804 [CSR_MEPC] = { "mepc", any, read_mepc, write_mepc }, 5805 [CSR_MCAUSE] = { "mcause", any, read_mcause, write_mcause }, 5806 [CSR_MTVAL] = { "mtval", any, read_mtval, write_mtval }, 5807 [CSR_MIP] = { "mip", any, NULL, NULL, rmw_mip }, 5808 5809 /* Machine-Level Window to Indirectly Accessed Registers (AIA) */ 5810 [CSR_MISELECT] = { "miselect", csrind_or_aia_any, NULL, NULL, 5811 rmw_xiselect }, 5812 [CSR_MIREG] = { "mireg", csrind_or_aia_any, NULL, NULL, 5813 rmw_xireg }, 5814 5815 /* Machine Indirect Register Alias */ 5816 [CSR_MIREG2] = { "mireg2", csrind_any, NULL, NULL, rmw_xiregi, 5817 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5818 [CSR_MIREG3] = { "mireg3", csrind_any, NULL, NULL, rmw_xiregi, 5819 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5820 [CSR_MIREG4] = { "mireg4", csrind_any, NULL, NULL, rmw_xiregi, 5821 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5822 [CSR_MIREG5] = { "mireg5", csrind_any, NULL, NULL, rmw_xiregi, 5823 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5824 [CSR_MIREG6] = { "mireg6", csrind_any, NULL, NULL, rmw_xiregi, 5825 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5826 5827 /* Machine-Level Interrupts (AIA) */ 5828 [CSR_MTOPEI] = { "mtopei", aia_any, NULL, NULL, rmw_xtopei }, 5829 [CSR_MTOPI] = { "mtopi", aia_any, read_mtopi }, 5830 5831 /* Virtual Interrupts for Supervisor Level (AIA) */ 5832 [CSR_MVIEN] = { "mvien", aia_any, NULL, NULL, rmw_mvien }, 5833 [CSR_MVIP] = { "mvip", aia_any, NULL, NULL, rmw_mvip }, 5834 5835 /* Machine-Level High-Half CSRs (AIA) */ 5836 [CSR_MIDELEGH] = { "midelegh", aia_any32, NULL, NULL, rmw_midelegh }, 5837 [CSR_MIEH] = { "mieh", aia_any32, NULL, NULL, rmw_mieh }, 5838 [CSR_MVIENH] = { "mvienh", aia_any32, NULL, NULL, rmw_mvienh }, 5839 [CSR_MVIPH] = { "mviph", aia_any32, NULL, NULL, rmw_mviph }, 5840 [CSR_MIPH] = { "miph", aia_any32, NULL, NULL, rmw_miph }, 5841 5842 /* Execution environment configuration */ 5843 [CSR_MENVCFG] = { "menvcfg", umode, read_menvcfg, write_menvcfg, 5844 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5845 [CSR_MENVCFGH] = { "menvcfgh", umode32, read_menvcfgh, write_menvcfgh, 5846 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5847 [CSR_SENVCFG] = { "senvcfg", smode, read_senvcfg, write_senvcfg, 5848 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5849 [CSR_HENVCFG] = { "henvcfg", hmode, read_henvcfg, write_henvcfg, 5850 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5851 [CSR_HENVCFGH] = { "henvcfgh", hmode32, read_henvcfgh, write_henvcfgh, 5852 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5853 5854 /* Smstateen extension CSRs */ 5855 [CSR_MSTATEEN0] = { "mstateen0", mstateen, read_mstateen, write_mstateen0, 5856 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5857 [CSR_MSTATEEN0H] = { "mstateen0h", mstateen, read_mstateenh, 5858 write_mstateen0h, 5859 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5860 [CSR_MSTATEEN1] = { "mstateen1", mstateen, read_mstateen, 5861 write_mstateen_1_3, 5862 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5863 [CSR_MSTATEEN1H] = { "mstateen1h", mstateen, read_mstateenh, 5864 write_mstateenh_1_3, 5865 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5866 [CSR_MSTATEEN2] = { "mstateen2", mstateen, read_mstateen, 5867 write_mstateen_1_3, 5868 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5869 [CSR_MSTATEEN2H] = { "mstateen2h", mstateen, read_mstateenh, 5870 write_mstateenh_1_3, 5871 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5872 [CSR_MSTATEEN3] = { "mstateen3", mstateen, read_mstateen, 5873 write_mstateen_1_3, 5874 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5875 [CSR_MSTATEEN3H] = { "mstateen3h", mstateen, read_mstateenh, 5876 write_mstateenh_1_3, 5877 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5878 [CSR_HSTATEEN0] = { "hstateen0", hstateen, read_hstateen, write_hstateen0, 5879 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5880 [CSR_HSTATEEN0H] = { "hstateen0h", hstateenh, read_hstateenh, 5881 write_hstateen0h, 5882 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5883 [CSR_HSTATEEN1] = { "hstateen1", hstateen, read_hstateen, 5884 write_hstateen_1_3, 5885 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5886 [CSR_HSTATEEN1H] = { "hstateen1h", hstateenh, read_hstateenh, 5887 write_hstateenh_1_3, 5888 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5889 [CSR_HSTATEEN2] = { "hstateen2", hstateen, read_hstateen, 5890 write_hstateen_1_3, 5891 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5892 [CSR_HSTATEEN2H] = { "hstateen2h", hstateenh, read_hstateenh, 5893 write_hstateenh_1_3, 5894 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5895 [CSR_HSTATEEN3] = { "hstateen3", hstateen, read_hstateen, 5896 write_hstateen_1_3, 5897 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5898 [CSR_HSTATEEN3H] = { "hstateen3h", hstateenh, read_hstateenh, 5899 write_hstateenh_1_3, 5900 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5901 [CSR_SSTATEEN0] = { "sstateen0", sstateen, read_sstateen, write_sstateen0, 5902 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5903 [CSR_SSTATEEN1] = { "sstateen1", sstateen, read_sstateen, 5904 write_sstateen_1_3, 5905 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5906 [CSR_SSTATEEN2] = { "sstateen2", sstateen, read_sstateen, 5907 write_sstateen_1_3, 5908 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5909 [CSR_SSTATEEN3] = { "sstateen3", sstateen, read_sstateen, 5910 write_sstateen_1_3, 5911 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5912 5913 /* RNMI */ 5914 [CSR_MNSCRATCH] = { "mnscratch", rnmi, read_mnscratch, write_mnscratch, 5915 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5916 [CSR_MNEPC] = { "mnepc", rnmi, read_mnepc, write_mnepc, 5917 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5918 [CSR_MNCAUSE] = { "mncause", rnmi, read_mncause, write_mncause, 5919 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5920 [CSR_MNSTATUS] = { "mnstatus", rnmi, read_mnstatus, write_mnstatus, 5921 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5922 5923 /* Supervisor Counter Delegation */ 5924 [CSR_SCOUNTINHIBIT] = {"scountinhibit", scountinhibit_pred, 5925 read_scountinhibit, write_scountinhibit, 5926 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5927 5928 /* Supervisor Trap Setup */ 5929 [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus, 5930 NULL, read_sstatus_i128 }, 5931 [CSR_SIE] = { "sie", smode, NULL, NULL, rmw_sie }, 5932 [CSR_STVEC] = { "stvec", smode, read_stvec, write_stvec }, 5933 [CSR_SCOUNTEREN] = { "scounteren", smode, read_scounteren, 5934 write_scounteren }, 5935 5936 /* Supervisor Trap Handling */ 5937 [CSR_SSCRATCH] = { "sscratch", smode, read_sscratch, write_sscratch, 5938 NULL, read_sscratch_i128, write_sscratch_i128 }, 5939 [CSR_SEPC] = { "sepc", smode, read_sepc, write_sepc }, 5940 [CSR_SCAUSE] = { "scause", smode, read_scause, write_scause }, 5941 [CSR_STVAL] = { "stval", smode, read_stval, write_stval }, 5942 [CSR_SIP] = { "sip", smode, NULL, NULL, rmw_sip }, 5943 [CSR_STIMECMP] = { "stimecmp", sstc, read_stimecmp, write_stimecmp, 5944 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5945 [CSR_STIMECMPH] = { "stimecmph", sstc_32, read_stimecmph, write_stimecmph, 5946 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5947 [CSR_VSTIMECMP] = { "vstimecmp", sstc, read_vstimecmp, 5948 write_vstimecmp, 5949 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5950 [CSR_VSTIMECMPH] = { "vstimecmph", sstc_32, read_vstimecmph, 5951 write_vstimecmph, 5952 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5953 5954 /* Supervisor Protection and Translation */ 5955 [CSR_SATP] = { "satp", satp, read_satp, write_satp }, 5956 5957 /* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */ 5958 [CSR_SISELECT] = { "siselect", csrind_or_aia_smode, NULL, NULL, 5959 rmw_xiselect }, 5960 [CSR_SIREG] = { "sireg", csrind_or_aia_smode, NULL, NULL, 5961 rmw_xireg }, 5962 5963 /* Supervisor Indirect Register Alias */ 5964 [CSR_SIREG2] = { "sireg2", csrind_smode, NULL, NULL, rmw_xiregi, 5965 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5966 [CSR_SIREG3] = { "sireg3", csrind_smode, NULL, NULL, rmw_xiregi, 5967 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5968 [CSR_SIREG4] = { "sireg4", csrind_smode, NULL, NULL, rmw_xiregi, 5969 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5970 [CSR_SIREG5] = { "sireg5", csrind_smode, NULL, NULL, rmw_xiregi, 5971 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5972 [CSR_SIREG6] = { "sireg6", csrind_smode, NULL, NULL, rmw_xiregi, 5973 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5974 5975 /* Supervisor-Level Interrupts (AIA) */ 5976 [CSR_STOPEI] = { "stopei", aia_smode, NULL, NULL, rmw_xtopei }, 5977 [CSR_STOPI] = { "stopi", aia_smode, read_stopi }, 5978 5979 /* Supervisor-Level High-Half CSRs (AIA) */ 5980 [CSR_SIEH] = { "sieh", aia_smode32, NULL, NULL, rmw_sieh }, 5981 [CSR_SIPH] = { "siph", aia_smode32, NULL, NULL, rmw_siph }, 5982 5983 [CSR_HSTATUS] = { "hstatus", hmode, read_hstatus, write_hstatus, 5984 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5985 [CSR_HEDELEG] = { "hedeleg", hmode, read_hedeleg, write_hedeleg, 5986 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5987 [CSR_HIDELEG] = { "hideleg", hmode, NULL, NULL, rmw_hideleg, 5988 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5989 [CSR_HVIP] = { "hvip", hmode, NULL, NULL, rmw_hvip, 5990 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5991 [CSR_HIP] = { "hip", hmode, NULL, NULL, rmw_hip, 5992 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5993 [CSR_HIE] = { "hie", hmode, NULL, NULL, rmw_hie, 5994 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5995 [CSR_HCOUNTEREN] = { "hcounteren", hmode, read_hcounteren, 5996 write_hcounteren, 5997 .min_priv_ver = PRIV_VERSION_1_12_0 }, 5998 [CSR_HGEIE] = { "hgeie", hmode, read_hgeie, write_hgeie, 5999 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6000 [CSR_HTVAL] = { "htval", hmode, read_htval, write_htval, 6001 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6002 [CSR_HTINST] = { "htinst", hmode, read_htinst, write_htinst, 6003 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6004 [CSR_HGEIP] = { "hgeip", hmode, read_hgeip, 6005 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6006 [CSR_HGATP] = { "hgatp", hgatp, read_hgatp, write_hgatp, 6007 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6008 [CSR_HTIMEDELTA] = { "htimedelta", hmode, read_htimedelta, 6009 write_htimedelta, 6010 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6011 [CSR_HTIMEDELTAH] = { "htimedeltah", hmode32, read_htimedeltah, 6012 write_htimedeltah, 6013 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6014 6015 [CSR_VSSTATUS] = { "vsstatus", hmode, read_vsstatus, 6016 write_vsstatus, 6017 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6018 [CSR_VSIP] = { "vsip", hmode, NULL, NULL, rmw_vsip, 6019 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6020 [CSR_VSIE] = { "vsie", hmode, NULL, NULL, rmw_vsie , 6021 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6022 [CSR_VSTVEC] = { "vstvec", hmode, read_vstvec, write_vstvec, 6023 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6024 [CSR_VSSCRATCH] = { "vsscratch", hmode, read_vsscratch, 6025 write_vsscratch, 6026 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6027 [CSR_VSEPC] = { "vsepc", hmode, read_vsepc, write_vsepc, 6028 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6029 [CSR_VSCAUSE] = { "vscause", hmode, read_vscause, write_vscause, 6030 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6031 [CSR_VSTVAL] = { "vstval", hmode, read_vstval, write_vstval, 6032 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6033 [CSR_VSATP] = { "vsatp", hmode, read_vsatp, write_vsatp, 6034 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6035 6036 [CSR_MTVAL2] = { "mtval2", dbltrp_hmode, read_mtval2, write_mtval2, 6037 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6038 [CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst, 6039 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6040 6041 /* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */ 6042 [CSR_HVIEN] = { "hvien", aia_hmode, NULL, NULL, rmw_hvien }, 6043 [CSR_HVICTL] = { "hvictl", aia_hmode, read_hvictl, 6044 write_hvictl }, 6045 [CSR_HVIPRIO1] = { "hviprio1", aia_hmode, read_hviprio1, 6046 write_hviprio1 }, 6047 [CSR_HVIPRIO2] = { "hviprio2", aia_hmode, read_hviprio2, 6048 write_hviprio2 }, 6049 /* 6050 * VS-Level Window to Indirectly Accessed Registers (H-extension with AIA) 6051 */ 6052 [CSR_VSISELECT] = { "vsiselect", csrind_or_aia_hmode, NULL, NULL, 6053 rmw_xiselect }, 6054 [CSR_VSIREG] = { "vsireg", csrind_or_aia_hmode, NULL, NULL, 6055 rmw_xireg }, 6056 6057 /* Virtual Supervisor Indirect Alias */ 6058 [CSR_VSIREG2] = { "vsireg2", csrind_hmode, NULL, NULL, rmw_xiregi, 6059 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6060 [CSR_VSIREG3] = { "vsireg3", csrind_hmode, NULL, NULL, rmw_xiregi, 6061 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6062 [CSR_VSIREG4] = { "vsireg4", csrind_hmode, NULL, NULL, rmw_xiregi, 6063 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6064 [CSR_VSIREG5] = { "vsireg5", csrind_hmode, NULL, NULL, rmw_xiregi, 6065 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6066 [CSR_VSIREG6] = { "vsireg6", csrind_hmode, NULL, NULL, rmw_xiregi, 6067 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6068 6069 /* VS-Level Interrupts (H-extension with AIA) */ 6070 [CSR_VSTOPEI] = { "vstopei", aia_hmode, NULL, NULL, rmw_xtopei }, 6071 [CSR_VSTOPI] = { "vstopi", aia_hmode, read_vstopi }, 6072 6073 /* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */ 6074 [CSR_HIDELEGH] = { "hidelegh", aia_hmode32, NULL, NULL, 6075 rmw_hidelegh }, 6076 [CSR_HVIENH] = { "hvienh", aia_hmode32, NULL, NULL, rmw_hvienh }, 6077 [CSR_HVIPH] = { "hviph", aia_hmode32, NULL, NULL, rmw_hviph }, 6078 [CSR_HVIPRIO1H] = { "hviprio1h", aia_hmode32, read_hviprio1h, 6079 write_hviprio1h }, 6080 [CSR_HVIPRIO2H] = { "hviprio2h", aia_hmode32, read_hviprio2h, 6081 write_hviprio2h }, 6082 [CSR_VSIEH] = { "vsieh", aia_hmode32, NULL, NULL, rmw_vsieh }, 6083 [CSR_VSIPH] = { "vsiph", aia_hmode32, NULL, NULL, rmw_vsiph }, 6084 6085 /* Physical Memory Protection */ 6086 [CSR_MSECCFG] = { "mseccfg", have_mseccfg, read_mseccfg, write_mseccfg, 6087 .min_priv_ver = PRIV_VERSION_1_11_0 }, 6088 [CSR_PMPCFG0] = { "pmpcfg0", pmp, read_pmpcfg, write_pmpcfg }, 6089 [CSR_PMPCFG1] = { "pmpcfg1", pmp, read_pmpcfg, write_pmpcfg }, 6090 [CSR_PMPCFG2] = { "pmpcfg2", pmp, read_pmpcfg, write_pmpcfg }, 6091 [CSR_PMPCFG3] = { "pmpcfg3", pmp, read_pmpcfg, write_pmpcfg }, 6092 [CSR_PMPADDR0] = { "pmpaddr0", pmp, read_pmpaddr, write_pmpaddr }, 6093 [CSR_PMPADDR1] = { "pmpaddr1", pmp, read_pmpaddr, write_pmpaddr }, 6094 [CSR_PMPADDR2] = { "pmpaddr2", pmp, read_pmpaddr, write_pmpaddr }, 6095 [CSR_PMPADDR3] = { "pmpaddr3", pmp, read_pmpaddr, write_pmpaddr }, 6096 [CSR_PMPADDR4] = { "pmpaddr4", pmp, read_pmpaddr, write_pmpaddr }, 6097 [CSR_PMPADDR5] = { "pmpaddr5", pmp, read_pmpaddr, write_pmpaddr }, 6098 [CSR_PMPADDR6] = { "pmpaddr6", pmp, read_pmpaddr, write_pmpaddr }, 6099 [CSR_PMPADDR7] = { "pmpaddr7", pmp, read_pmpaddr, write_pmpaddr }, 6100 [CSR_PMPADDR8] = { "pmpaddr8", pmp, read_pmpaddr, write_pmpaddr }, 6101 [CSR_PMPADDR9] = { "pmpaddr9", pmp, read_pmpaddr, write_pmpaddr }, 6102 [CSR_PMPADDR10] = { "pmpaddr10", pmp, read_pmpaddr, write_pmpaddr }, 6103 [CSR_PMPADDR11] = { "pmpaddr11", pmp, read_pmpaddr, write_pmpaddr }, 6104 [CSR_PMPADDR12] = { "pmpaddr12", pmp, read_pmpaddr, write_pmpaddr }, 6105 [CSR_PMPADDR13] = { "pmpaddr13", pmp, read_pmpaddr, write_pmpaddr }, 6106 [CSR_PMPADDR14] = { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr }, 6107 [CSR_PMPADDR15] = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr }, 6108 6109 /* Debug CSRs */ 6110 [CSR_TSELECT] = { "tselect", debug, read_tselect, write_tselect }, 6111 [CSR_TDATA1] = { "tdata1", debug, read_tdata, write_tdata }, 6112 [CSR_TDATA2] = { "tdata2", debug, read_tdata, write_tdata }, 6113 [CSR_TDATA3] = { "tdata3", debug, read_tdata, write_tdata }, 6114 [CSR_TINFO] = { "tinfo", debug, read_tinfo, write_ignore }, 6115 [CSR_MCONTEXT] = { "mcontext", debug, read_mcontext, write_mcontext }, 6116 6117 [CSR_MCTRCTL] = { "mctrctl", ctr_mmode, NULL, NULL, rmw_xctrctl }, 6118 [CSR_SCTRCTL] = { "sctrctl", ctr_smode, NULL, NULL, rmw_xctrctl }, 6119 [CSR_VSCTRCTL] = { "vsctrctl", ctr_smode, NULL, NULL, rmw_xctrctl }, 6120 [CSR_SCTRDEPTH] = { "sctrdepth", ctr_smode, NULL, NULL, rmw_sctrdepth }, 6121 [CSR_SCTRSTATUS] = { "sctrstatus", ctr_smode, NULL, NULL, rmw_sctrstatus }, 6122 6123 /* Performance Counters */ 6124 [CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_hpmcounter }, 6125 [CSR_HPMCOUNTER4] = { "hpmcounter4", ctr, read_hpmcounter }, 6126 [CSR_HPMCOUNTER5] = { "hpmcounter5", ctr, read_hpmcounter }, 6127 [CSR_HPMCOUNTER6] = { "hpmcounter6", ctr, read_hpmcounter }, 6128 [CSR_HPMCOUNTER7] = { "hpmcounter7", ctr, read_hpmcounter }, 6129 [CSR_HPMCOUNTER8] = { "hpmcounter8", ctr, read_hpmcounter }, 6130 [CSR_HPMCOUNTER9] = { "hpmcounter9", ctr, read_hpmcounter }, 6131 [CSR_HPMCOUNTER10] = { "hpmcounter10", ctr, read_hpmcounter }, 6132 [CSR_HPMCOUNTER11] = { "hpmcounter11", ctr, read_hpmcounter }, 6133 [CSR_HPMCOUNTER12] = { "hpmcounter12", ctr, read_hpmcounter }, 6134 [CSR_HPMCOUNTER13] = { "hpmcounter13", ctr, read_hpmcounter }, 6135 [CSR_HPMCOUNTER14] = { "hpmcounter14", ctr, read_hpmcounter }, 6136 [CSR_HPMCOUNTER15] = { "hpmcounter15", ctr, read_hpmcounter }, 6137 [CSR_HPMCOUNTER16] = { "hpmcounter16", ctr, read_hpmcounter }, 6138 [CSR_HPMCOUNTER17] = { "hpmcounter17", ctr, read_hpmcounter }, 6139 [CSR_HPMCOUNTER18] = { "hpmcounter18", ctr, read_hpmcounter }, 6140 [CSR_HPMCOUNTER19] = { "hpmcounter19", ctr, read_hpmcounter }, 6141 [CSR_HPMCOUNTER20] = { "hpmcounter20", ctr, read_hpmcounter }, 6142 [CSR_HPMCOUNTER21] = { "hpmcounter21", ctr, read_hpmcounter }, 6143 [CSR_HPMCOUNTER22] = { "hpmcounter22", ctr, read_hpmcounter }, 6144 [CSR_HPMCOUNTER23] = { "hpmcounter23", ctr, read_hpmcounter }, 6145 [CSR_HPMCOUNTER24] = { "hpmcounter24", ctr, read_hpmcounter }, 6146 [CSR_HPMCOUNTER25] = { "hpmcounter25", ctr, read_hpmcounter }, 6147 [CSR_HPMCOUNTER26] = { "hpmcounter26", ctr, read_hpmcounter }, 6148 [CSR_HPMCOUNTER27] = { "hpmcounter27", ctr, read_hpmcounter }, 6149 [CSR_HPMCOUNTER28] = { "hpmcounter28", ctr, read_hpmcounter }, 6150 [CSR_HPMCOUNTER29] = { "hpmcounter29", ctr, read_hpmcounter }, 6151 [CSR_HPMCOUNTER30] = { "hpmcounter30", ctr, read_hpmcounter }, 6152 [CSR_HPMCOUNTER31] = { "hpmcounter31", ctr, read_hpmcounter }, 6153 6154 [CSR_MHPMCOUNTER3] = { "mhpmcounter3", mctr, read_hpmcounter, 6155 write_mhpmcounter }, 6156 [CSR_MHPMCOUNTER4] = { "mhpmcounter4", mctr, read_hpmcounter, 6157 write_mhpmcounter }, 6158 [CSR_MHPMCOUNTER5] = { "mhpmcounter5", mctr, read_hpmcounter, 6159 write_mhpmcounter }, 6160 [CSR_MHPMCOUNTER6] = { "mhpmcounter6", mctr, read_hpmcounter, 6161 write_mhpmcounter }, 6162 [CSR_MHPMCOUNTER7] = { "mhpmcounter7", mctr, read_hpmcounter, 6163 write_mhpmcounter }, 6164 [CSR_MHPMCOUNTER8] = { "mhpmcounter8", mctr, read_hpmcounter, 6165 write_mhpmcounter }, 6166 [CSR_MHPMCOUNTER9] = { "mhpmcounter9", mctr, read_hpmcounter, 6167 write_mhpmcounter }, 6168 [CSR_MHPMCOUNTER10] = { "mhpmcounter10", mctr, read_hpmcounter, 6169 write_mhpmcounter }, 6170 [CSR_MHPMCOUNTER11] = { "mhpmcounter11", mctr, read_hpmcounter, 6171 write_mhpmcounter }, 6172 [CSR_MHPMCOUNTER12] = { "mhpmcounter12", mctr, read_hpmcounter, 6173 write_mhpmcounter }, 6174 [CSR_MHPMCOUNTER13] = { "mhpmcounter13", mctr, read_hpmcounter, 6175 write_mhpmcounter }, 6176 [CSR_MHPMCOUNTER14] = { "mhpmcounter14", mctr, read_hpmcounter, 6177 write_mhpmcounter }, 6178 [CSR_MHPMCOUNTER15] = { "mhpmcounter15", mctr, read_hpmcounter, 6179 write_mhpmcounter }, 6180 [CSR_MHPMCOUNTER16] = { "mhpmcounter16", mctr, read_hpmcounter, 6181 write_mhpmcounter }, 6182 [CSR_MHPMCOUNTER17] = { "mhpmcounter17", mctr, read_hpmcounter, 6183 write_mhpmcounter }, 6184 [CSR_MHPMCOUNTER18] = { "mhpmcounter18", mctr, read_hpmcounter, 6185 write_mhpmcounter }, 6186 [CSR_MHPMCOUNTER19] = { "mhpmcounter19", mctr, read_hpmcounter, 6187 write_mhpmcounter }, 6188 [CSR_MHPMCOUNTER20] = { "mhpmcounter20", mctr, read_hpmcounter, 6189 write_mhpmcounter }, 6190 [CSR_MHPMCOUNTER21] = { "mhpmcounter21", mctr, read_hpmcounter, 6191 write_mhpmcounter }, 6192 [CSR_MHPMCOUNTER22] = { "mhpmcounter22", mctr, read_hpmcounter, 6193 write_mhpmcounter }, 6194 [CSR_MHPMCOUNTER23] = { "mhpmcounter23", mctr, read_hpmcounter, 6195 write_mhpmcounter }, 6196 [CSR_MHPMCOUNTER24] = { "mhpmcounter24", mctr, read_hpmcounter, 6197 write_mhpmcounter }, 6198 [CSR_MHPMCOUNTER25] = { "mhpmcounter25", mctr, read_hpmcounter, 6199 write_mhpmcounter }, 6200 [CSR_MHPMCOUNTER26] = { "mhpmcounter26", mctr, read_hpmcounter, 6201 write_mhpmcounter }, 6202 [CSR_MHPMCOUNTER27] = { "mhpmcounter27", mctr, read_hpmcounter, 6203 write_mhpmcounter }, 6204 [CSR_MHPMCOUNTER28] = { "mhpmcounter28", mctr, read_hpmcounter, 6205 write_mhpmcounter }, 6206 [CSR_MHPMCOUNTER29] = { "mhpmcounter29", mctr, read_hpmcounter, 6207 write_mhpmcounter }, 6208 [CSR_MHPMCOUNTER30] = { "mhpmcounter30", mctr, read_hpmcounter, 6209 write_mhpmcounter }, 6210 [CSR_MHPMCOUNTER31] = { "mhpmcounter31", mctr, read_hpmcounter, 6211 write_mhpmcounter }, 6212 6213 [CSR_MCOUNTINHIBIT] = { "mcountinhibit", any, read_mcountinhibit, 6214 write_mcountinhibit, 6215 .min_priv_ver = PRIV_VERSION_1_11_0 }, 6216 6217 [CSR_MCYCLECFG] = { "mcyclecfg", smcntrpmf, read_mcyclecfg, 6218 write_mcyclecfg, 6219 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6220 [CSR_MINSTRETCFG] = { "minstretcfg", smcntrpmf, read_minstretcfg, 6221 write_minstretcfg, 6222 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6223 6224 [CSR_MHPMEVENT3] = { "mhpmevent3", any, read_mhpmevent, 6225 write_mhpmevent }, 6226 [CSR_MHPMEVENT4] = { "mhpmevent4", any, read_mhpmevent, 6227 write_mhpmevent }, 6228 [CSR_MHPMEVENT5] = { "mhpmevent5", any, read_mhpmevent, 6229 write_mhpmevent }, 6230 [CSR_MHPMEVENT6] = { "mhpmevent6", any, read_mhpmevent, 6231 write_mhpmevent }, 6232 [CSR_MHPMEVENT7] = { "mhpmevent7", any, read_mhpmevent, 6233 write_mhpmevent }, 6234 [CSR_MHPMEVENT8] = { "mhpmevent8", any, read_mhpmevent, 6235 write_mhpmevent }, 6236 [CSR_MHPMEVENT9] = { "mhpmevent9", any, read_mhpmevent, 6237 write_mhpmevent }, 6238 [CSR_MHPMEVENT10] = { "mhpmevent10", any, read_mhpmevent, 6239 write_mhpmevent }, 6240 [CSR_MHPMEVENT11] = { "mhpmevent11", any, read_mhpmevent, 6241 write_mhpmevent }, 6242 [CSR_MHPMEVENT12] = { "mhpmevent12", any, read_mhpmevent, 6243 write_mhpmevent }, 6244 [CSR_MHPMEVENT13] = { "mhpmevent13", any, read_mhpmevent, 6245 write_mhpmevent }, 6246 [CSR_MHPMEVENT14] = { "mhpmevent14", any, read_mhpmevent, 6247 write_mhpmevent }, 6248 [CSR_MHPMEVENT15] = { "mhpmevent15", any, read_mhpmevent, 6249 write_mhpmevent }, 6250 [CSR_MHPMEVENT16] = { "mhpmevent16", any, read_mhpmevent, 6251 write_mhpmevent }, 6252 [CSR_MHPMEVENT17] = { "mhpmevent17", any, read_mhpmevent, 6253 write_mhpmevent }, 6254 [CSR_MHPMEVENT18] = { "mhpmevent18", any, read_mhpmevent, 6255 write_mhpmevent }, 6256 [CSR_MHPMEVENT19] = { "mhpmevent19", any, read_mhpmevent, 6257 write_mhpmevent }, 6258 [CSR_MHPMEVENT20] = { "mhpmevent20", any, read_mhpmevent, 6259 write_mhpmevent }, 6260 [CSR_MHPMEVENT21] = { "mhpmevent21", any, read_mhpmevent, 6261 write_mhpmevent }, 6262 [CSR_MHPMEVENT22] = { "mhpmevent22", any, read_mhpmevent, 6263 write_mhpmevent }, 6264 [CSR_MHPMEVENT23] = { "mhpmevent23", any, read_mhpmevent, 6265 write_mhpmevent }, 6266 [CSR_MHPMEVENT24] = { "mhpmevent24", any, read_mhpmevent, 6267 write_mhpmevent }, 6268 [CSR_MHPMEVENT25] = { "mhpmevent25", any, read_mhpmevent, 6269 write_mhpmevent }, 6270 [CSR_MHPMEVENT26] = { "mhpmevent26", any, read_mhpmevent, 6271 write_mhpmevent }, 6272 [CSR_MHPMEVENT27] = { "mhpmevent27", any, read_mhpmevent, 6273 write_mhpmevent }, 6274 [CSR_MHPMEVENT28] = { "mhpmevent28", any, read_mhpmevent, 6275 write_mhpmevent }, 6276 [CSR_MHPMEVENT29] = { "mhpmevent29", any, read_mhpmevent, 6277 write_mhpmevent }, 6278 [CSR_MHPMEVENT30] = { "mhpmevent30", any, read_mhpmevent, 6279 write_mhpmevent }, 6280 [CSR_MHPMEVENT31] = { "mhpmevent31", any, read_mhpmevent, 6281 write_mhpmevent }, 6282 6283 [CSR_MCYCLECFGH] = { "mcyclecfgh", smcntrpmf_32, read_mcyclecfgh, 6284 write_mcyclecfgh, 6285 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6286 [CSR_MINSTRETCFGH] = { "minstretcfgh", smcntrpmf_32, read_minstretcfgh, 6287 write_minstretcfgh, 6288 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6289 6290 [CSR_MHPMEVENT3H] = { "mhpmevent3h", sscofpmf_32, read_mhpmeventh, 6291 write_mhpmeventh, 6292 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6293 [CSR_MHPMEVENT4H] = { "mhpmevent4h", sscofpmf_32, read_mhpmeventh, 6294 write_mhpmeventh, 6295 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6296 [CSR_MHPMEVENT5H] = { "mhpmevent5h", sscofpmf_32, read_mhpmeventh, 6297 write_mhpmeventh, 6298 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6299 [CSR_MHPMEVENT6H] = { "mhpmevent6h", sscofpmf_32, read_mhpmeventh, 6300 write_mhpmeventh, 6301 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6302 [CSR_MHPMEVENT7H] = { "mhpmevent7h", sscofpmf_32, read_mhpmeventh, 6303 write_mhpmeventh, 6304 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6305 [CSR_MHPMEVENT8H] = { "mhpmevent8h", sscofpmf_32, read_mhpmeventh, 6306 write_mhpmeventh, 6307 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6308 [CSR_MHPMEVENT9H] = { "mhpmevent9h", sscofpmf_32, read_mhpmeventh, 6309 write_mhpmeventh, 6310 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6311 [CSR_MHPMEVENT10H] = { "mhpmevent10h", sscofpmf_32, read_mhpmeventh, 6312 write_mhpmeventh, 6313 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6314 [CSR_MHPMEVENT11H] = { "mhpmevent11h", sscofpmf_32, read_mhpmeventh, 6315 write_mhpmeventh, 6316 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6317 [CSR_MHPMEVENT12H] = { "mhpmevent12h", sscofpmf_32, read_mhpmeventh, 6318 write_mhpmeventh, 6319 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6320 [CSR_MHPMEVENT13H] = { "mhpmevent13h", sscofpmf_32, read_mhpmeventh, 6321 write_mhpmeventh, 6322 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6323 [CSR_MHPMEVENT14H] = { "mhpmevent14h", sscofpmf_32, read_mhpmeventh, 6324 write_mhpmeventh, 6325 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6326 [CSR_MHPMEVENT15H] = { "mhpmevent15h", sscofpmf_32, read_mhpmeventh, 6327 write_mhpmeventh, 6328 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6329 [CSR_MHPMEVENT16H] = { "mhpmevent16h", sscofpmf_32, read_mhpmeventh, 6330 write_mhpmeventh, 6331 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6332 [CSR_MHPMEVENT17H] = { "mhpmevent17h", sscofpmf_32, read_mhpmeventh, 6333 write_mhpmeventh, 6334 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6335 [CSR_MHPMEVENT18H] = { "mhpmevent18h", sscofpmf_32, read_mhpmeventh, 6336 write_mhpmeventh, 6337 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6338 [CSR_MHPMEVENT19H] = { "mhpmevent19h", sscofpmf_32, read_mhpmeventh, 6339 write_mhpmeventh, 6340 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6341 [CSR_MHPMEVENT20H] = { "mhpmevent20h", sscofpmf_32, read_mhpmeventh, 6342 write_mhpmeventh, 6343 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6344 [CSR_MHPMEVENT21H] = { "mhpmevent21h", sscofpmf_32, read_mhpmeventh, 6345 write_mhpmeventh, 6346 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6347 [CSR_MHPMEVENT22H] = { "mhpmevent22h", sscofpmf_32, read_mhpmeventh, 6348 write_mhpmeventh, 6349 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6350 [CSR_MHPMEVENT23H] = { "mhpmevent23h", sscofpmf_32, read_mhpmeventh, 6351 write_mhpmeventh, 6352 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6353 [CSR_MHPMEVENT24H] = { "mhpmevent24h", sscofpmf_32, read_mhpmeventh, 6354 write_mhpmeventh, 6355 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6356 [CSR_MHPMEVENT25H] = { "mhpmevent25h", sscofpmf_32, read_mhpmeventh, 6357 write_mhpmeventh, 6358 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6359 [CSR_MHPMEVENT26H] = { "mhpmevent26h", sscofpmf_32, read_mhpmeventh, 6360 write_mhpmeventh, 6361 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6362 [CSR_MHPMEVENT27H] = { "mhpmevent27h", sscofpmf_32, read_mhpmeventh, 6363 write_mhpmeventh, 6364 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6365 [CSR_MHPMEVENT28H] = { "mhpmevent28h", sscofpmf_32, read_mhpmeventh, 6366 write_mhpmeventh, 6367 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6368 [CSR_MHPMEVENT29H] = { "mhpmevent29h", sscofpmf_32, read_mhpmeventh, 6369 write_mhpmeventh, 6370 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6371 [CSR_MHPMEVENT30H] = { "mhpmevent30h", sscofpmf_32, read_mhpmeventh, 6372 write_mhpmeventh, 6373 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6374 [CSR_MHPMEVENT31H] = { "mhpmevent31h", sscofpmf_32, read_mhpmeventh, 6375 write_mhpmeventh, 6376 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6377 6378 [CSR_HPMCOUNTER3H] = { "hpmcounter3h", ctr32, read_hpmcounterh }, 6379 [CSR_HPMCOUNTER4H] = { "hpmcounter4h", ctr32, read_hpmcounterh }, 6380 [CSR_HPMCOUNTER5H] = { "hpmcounter5h", ctr32, read_hpmcounterh }, 6381 [CSR_HPMCOUNTER6H] = { "hpmcounter6h", ctr32, read_hpmcounterh }, 6382 [CSR_HPMCOUNTER7H] = { "hpmcounter7h", ctr32, read_hpmcounterh }, 6383 [CSR_HPMCOUNTER8H] = { "hpmcounter8h", ctr32, read_hpmcounterh }, 6384 [CSR_HPMCOUNTER9H] = { "hpmcounter9h", ctr32, read_hpmcounterh }, 6385 [CSR_HPMCOUNTER10H] = { "hpmcounter10h", ctr32, read_hpmcounterh }, 6386 [CSR_HPMCOUNTER11H] = { "hpmcounter11h", ctr32, read_hpmcounterh }, 6387 [CSR_HPMCOUNTER12H] = { "hpmcounter12h", ctr32, read_hpmcounterh }, 6388 [CSR_HPMCOUNTER13H] = { "hpmcounter13h", ctr32, read_hpmcounterh }, 6389 [CSR_HPMCOUNTER14H] = { "hpmcounter14h", ctr32, read_hpmcounterh }, 6390 [CSR_HPMCOUNTER15H] = { "hpmcounter15h", ctr32, read_hpmcounterh }, 6391 [CSR_HPMCOUNTER16H] = { "hpmcounter16h", ctr32, read_hpmcounterh }, 6392 [CSR_HPMCOUNTER17H] = { "hpmcounter17h", ctr32, read_hpmcounterh }, 6393 [CSR_HPMCOUNTER18H] = { "hpmcounter18h", ctr32, read_hpmcounterh }, 6394 [CSR_HPMCOUNTER19H] = { "hpmcounter19h", ctr32, read_hpmcounterh }, 6395 [CSR_HPMCOUNTER20H] = { "hpmcounter20h", ctr32, read_hpmcounterh }, 6396 [CSR_HPMCOUNTER21H] = { "hpmcounter21h", ctr32, read_hpmcounterh }, 6397 [CSR_HPMCOUNTER22H] = { "hpmcounter22h", ctr32, read_hpmcounterh }, 6398 [CSR_HPMCOUNTER23H] = { "hpmcounter23h", ctr32, read_hpmcounterh }, 6399 [CSR_HPMCOUNTER24H] = { "hpmcounter24h", ctr32, read_hpmcounterh }, 6400 [CSR_HPMCOUNTER25H] = { "hpmcounter25h", ctr32, read_hpmcounterh }, 6401 [CSR_HPMCOUNTER26H] = { "hpmcounter26h", ctr32, read_hpmcounterh }, 6402 [CSR_HPMCOUNTER27H] = { "hpmcounter27h", ctr32, read_hpmcounterh }, 6403 [CSR_HPMCOUNTER28H] = { "hpmcounter28h", ctr32, read_hpmcounterh }, 6404 [CSR_HPMCOUNTER29H] = { "hpmcounter29h", ctr32, read_hpmcounterh }, 6405 [CSR_HPMCOUNTER30H] = { "hpmcounter30h", ctr32, read_hpmcounterh }, 6406 [CSR_HPMCOUNTER31H] = { "hpmcounter31h", ctr32, read_hpmcounterh }, 6407 6408 [CSR_MHPMCOUNTER3H] = { "mhpmcounter3h", mctr32, read_hpmcounterh, 6409 write_mhpmcounterh }, 6410 [CSR_MHPMCOUNTER4H] = { "mhpmcounter4h", mctr32, read_hpmcounterh, 6411 write_mhpmcounterh }, 6412 [CSR_MHPMCOUNTER5H] = { "mhpmcounter5h", mctr32, read_hpmcounterh, 6413 write_mhpmcounterh }, 6414 [CSR_MHPMCOUNTER6H] = { "mhpmcounter6h", mctr32, read_hpmcounterh, 6415 write_mhpmcounterh }, 6416 [CSR_MHPMCOUNTER7H] = { "mhpmcounter7h", mctr32, read_hpmcounterh, 6417 write_mhpmcounterh }, 6418 [CSR_MHPMCOUNTER8H] = { "mhpmcounter8h", mctr32, read_hpmcounterh, 6419 write_mhpmcounterh }, 6420 [CSR_MHPMCOUNTER9H] = { "mhpmcounter9h", mctr32, read_hpmcounterh, 6421 write_mhpmcounterh }, 6422 [CSR_MHPMCOUNTER10H] = { "mhpmcounter10h", mctr32, read_hpmcounterh, 6423 write_mhpmcounterh }, 6424 [CSR_MHPMCOUNTER11H] = { "mhpmcounter11h", mctr32, read_hpmcounterh, 6425 write_mhpmcounterh }, 6426 [CSR_MHPMCOUNTER12H] = { "mhpmcounter12h", mctr32, read_hpmcounterh, 6427 write_mhpmcounterh }, 6428 [CSR_MHPMCOUNTER13H] = { "mhpmcounter13h", mctr32, read_hpmcounterh, 6429 write_mhpmcounterh }, 6430 [CSR_MHPMCOUNTER14H] = { "mhpmcounter14h", mctr32, read_hpmcounterh, 6431 write_mhpmcounterh }, 6432 [CSR_MHPMCOUNTER15H] = { "mhpmcounter15h", mctr32, read_hpmcounterh, 6433 write_mhpmcounterh }, 6434 [CSR_MHPMCOUNTER16H] = { "mhpmcounter16h", mctr32, read_hpmcounterh, 6435 write_mhpmcounterh }, 6436 [CSR_MHPMCOUNTER17H] = { "mhpmcounter17h", mctr32, read_hpmcounterh, 6437 write_mhpmcounterh }, 6438 [CSR_MHPMCOUNTER18H] = { "mhpmcounter18h", mctr32, read_hpmcounterh, 6439 write_mhpmcounterh }, 6440 [CSR_MHPMCOUNTER19H] = { "mhpmcounter19h", mctr32, read_hpmcounterh, 6441 write_mhpmcounterh }, 6442 [CSR_MHPMCOUNTER20H] = { "mhpmcounter20h", mctr32, read_hpmcounterh, 6443 write_mhpmcounterh }, 6444 [CSR_MHPMCOUNTER21H] = { "mhpmcounter21h", mctr32, read_hpmcounterh, 6445 write_mhpmcounterh }, 6446 [CSR_MHPMCOUNTER22H] = { "mhpmcounter22h", mctr32, read_hpmcounterh, 6447 write_mhpmcounterh }, 6448 [CSR_MHPMCOUNTER23H] = { "mhpmcounter23h", mctr32, read_hpmcounterh, 6449 write_mhpmcounterh }, 6450 [CSR_MHPMCOUNTER24H] = { "mhpmcounter24h", mctr32, read_hpmcounterh, 6451 write_mhpmcounterh }, 6452 [CSR_MHPMCOUNTER25H] = { "mhpmcounter25h", mctr32, read_hpmcounterh, 6453 write_mhpmcounterh }, 6454 [CSR_MHPMCOUNTER26H] = { "mhpmcounter26h", mctr32, read_hpmcounterh, 6455 write_mhpmcounterh }, 6456 [CSR_MHPMCOUNTER27H] = { "mhpmcounter27h", mctr32, read_hpmcounterh, 6457 write_mhpmcounterh }, 6458 [CSR_MHPMCOUNTER28H] = { "mhpmcounter28h", mctr32, read_hpmcounterh, 6459 write_mhpmcounterh }, 6460 [CSR_MHPMCOUNTER29H] = { "mhpmcounter29h", mctr32, read_hpmcounterh, 6461 write_mhpmcounterh }, 6462 [CSR_MHPMCOUNTER30H] = { "mhpmcounter30h", mctr32, read_hpmcounterh, 6463 write_mhpmcounterh }, 6464 [CSR_MHPMCOUNTER31H] = { "mhpmcounter31h", mctr32, read_hpmcounterh, 6465 write_mhpmcounterh }, 6466 [CSR_SCOUNTOVF] = { "scountovf", sscofpmf, read_scountovf, 6467 .min_priv_ver = PRIV_VERSION_1_12_0 }, 6468 6469 #endif /* !CONFIG_USER_ONLY */ 6470 }; 6471