1 /* 2 * RISC-V Control and Status Registers. 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/log.h" 22 #include "qemu/timer.h" 23 #include "cpu.h" 24 #include "pmu.h" 25 #include "time_helper.h" 26 #include "qemu/main-loop.h" 27 #include "exec/exec-all.h" 28 #include "sysemu/cpu-timers.h" 29 #include "qemu/guest-random.h" 30 #include "qapi/error.h" 31 32 /* CSR function table public API */ 33 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops) 34 { 35 *ops = csr_ops[csrno & (CSR_TABLE_SIZE - 1)]; 36 } 37 38 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops) 39 { 40 csr_ops[csrno & (CSR_TABLE_SIZE - 1)] = *ops; 41 } 42 43 /* Predicates */ 44 #if !defined(CONFIG_USER_ONLY) 45 static RISCVException smstateen_acc_ok(CPURISCVState *env, int index, 46 uint64_t bit) 47 { 48 bool virt = riscv_cpu_virt_enabled(env); 49 RISCVCPU *cpu = env_archcpu(env); 50 51 if (env->priv == PRV_M || !cpu->cfg.ext_smstateen) { 52 return RISCV_EXCP_NONE; 53 } 54 55 if (!(env->mstateen[index] & bit)) { 56 return RISCV_EXCP_ILLEGAL_INST; 57 } 58 59 if (virt) { 60 if (!(env->hstateen[index] & bit)) { 61 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 62 } 63 64 if (env->priv == PRV_U && !(env->sstateen[index] & bit)) { 65 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 66 } 67 } 68 69 if (env->priv == PRV_U && riscv_has_ext(env, RVS)) { 70 if (!(env->sstateen[index] & bit)) { 71 return RISCV_EXCP_ILLEGAL_INST; 72 } 73 } 74 75 return RISCV_EXCP_NONE; 76 } 77 #endif 78 79 static RISCVException fs(CPURISCVState *env, int csrno) 80 { 81 #if !defined(CONFIG_USER_ONLY) 82 if (!env->debugger && !riscv_cpu_fp_enabled(env) && 83 !RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) { 84 return RISCV_EXCP_ILLEGAL_INST; 85 } 86 #endif 87 return RISCV_EXCP_NONE; 88 } 89 90 static RISCVException vs(CPURISCVState *env, int csrno) 91 { 92 RISCVCPU *cpu = env_archcpu(env); 93 94 if (env->misa_ext & RVV || 95 cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) { 96 #if !defined(CONFIG_USER_ONLY) 97 if (!env->debugger && !riscv_cpu_vector_enabled(env)) { 98 return RISCV_EXCP_ILLEGAL_INST; 99 } 100 #endif 101 return RISCV_EXCP_NONE; 102 } 103 return RISCV_EXCP_ILLEGAL_INST; 104 } 105 106 static RISCVException ctr(CPURISCVState *env, int csrno) 107 { 108 #if !defined(CONFIG_USER_ONLY) 109 RISCVCPU *cpu = env_archcpu(env); 110 int ctr_index; 111 target_ulong ctr_mask; 112 int base_csrno = CSR_CYCLE; 113 bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false; 114 115 if (rv32 && csrno >= CSR_CYCLEH) { 116 /* Offset for RV32 hpmcounternh counters */ 117 base_csrno += 0x80; 118 } 119 ctr_index = csrno - base_csrno; 120 ctr_mask = BIT(ctr_index); 121 122 if ((csrno >= CSR_CYCLE && csrno <= CSR_INSTRET) || 123 (csrno >= CSR_CYCLEH && csrno <= CSR_INSTRETH)) { 124 goto skip_ext_pmu_check; 125 } 126 127 if (!(cpu->pmu_avail_ctrs & ctr_mask)) { 128 /* No counter is enabled in PMU or the counter is out of range */ 129 return RISCV_EXCP_ILLEGAL_INST; 130 } 131 132 skip_ext_pmu_check: 133 134 if (env->priv < PRV_M && !get_field(env->mcounteren, ctr_mask)) { 135 return RISCV_EXCP_ILLEGAL_INST; 136 } 137 138 if (riscv_cpu_virt_enabled(env)) { 139 if (!get_field(env->hcounteren, ctr_mask) || 140 (env->priv == PRV_U && !get_field(env->scounteren, ctr_mask))) { 141 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 142 } 143 } 144 145 if (riscv_has_ext(env, RVS) && env->priv == PRV_U && 146 !get_field(env->scounteren, ctr_mask)) { 147 return RISCV_EXCP_ILLEGAL_INST; 148 } 149 150 #endif 151 return RISCV_EXCP_NONE; 152 } 153 154 static RISCVException ctr32(CPURISCVState *env, int csrno) 155 { 156 if (riscv_cpu_mxl(env) != MXL_RV32) { 157 return RISCV_EXCP_ILLEGAL_INST; 158 } 159 160 return ctr(env, csrno); 161 } 162 163 #if !defined(CONFIG_USER_ONLY) 164 static RISCVException mctr(CPURISCVState *env, int csrno) 165 { 166 RISCVCPU *cpu = env_archcpu(env); 167 int ctr_index; 168 int base_csrno = CSR_MHPMCOUNTER3; 169 170 if ((riscv_cpu_mxl(env) == MXL_RV32) && csrno >= CSR_MCYCLEH) { 171 /* Offset for RV32 mhpmcounternh counters */ 172 base_csrno += 0x80; 173 } 174 ctr_index = csrno - base_csrno; 175 if (!cpu->cfg.pmu_num || ctr_index >= cpu->cfg.pmu_num) { 176 /* The PMU is not enabled or counter is out of range*/ 177 return RISCV_EXCP_ILLEGAL_INST; 178 } 179 180 return RISCV_EXCP_NONE; 181 } 182 183 static RISCVException mctr32(CPURISCVState *env, int csrno) 184 { 185 if (riscv_cpu_mxl(env) != MXL_RV32) { 186 return RISCV_EXCP_ILLEGAL_INST; 187 } 188 189 return mctr(env, csrno); 190 } 191 192 static RISCVException sscofpmf(CPURISCVState *env, int csrno) 193 { 194 RISCVCPU *cpu = env_archcpu(env); 195 196 if (!cpu->cfg.ext_sscofpmf) { 197 return RISCV_EXCP_ILLEGAL_INST; 198 } 199 200 return RISCV_EXCP_NONE; 201 } 202 203 static RISCVException any(CPURISCVState *env, int csrno) 204 { 205 return RISCV_EXCP_NONE; 206 } 207 208 static RISCVException any32(CPURISCVState *env, int csrno) 209 { 210 if (riscv_cpu_mxl(env) != MXL_RV32) { 211 return RISCV_EXCP_ILLEGAL_INST; 212 } 213 214 return any(env, csrno); 215 216 } 217 218 static int aia_any(CPURISCVState *env, int csrno) 219 { 220 RISCVCPU *cpu = env_archcpu(env); 221 222 if (!cpu->cfg.ext_smaia) { 223 return RISCV_EXCP_ILLEGAL_INST; 224 } 225 226 return any(env, csrno); 227 } 228 229 static int aia_any32(CPURISCVState *env, int csrno) 230 { 231 RISCVCPU *cpu = env_archcpu(env); 232 233 if (!cpu->cfg.ext_smaia) { 234 return RISCV_EXCP_ILLEGAL_INST; 235 } 236 237 return any32(env, csrno); 238 } 239 240 static RISCVException smode(CPURISCVState *env, int csrno) 241 { 242 if (riscv_has_ext(env, RVS)) { 243 return RISCV_EXCP_NONE; 244 } 245 246 return RISCV_EXCP_ILLEGAL_INST; 247 } 248 249 static int smode32(CPURISCVState *env, int csrno) 250 { 251 if (riscv_cpu_mxl(env) != MXL_RV32) { 252 return RISCV_EXCP_ILLEGAL_INST; 253 } 254 255 return smode(env, csrno); 256 } 257 258 static int aia_smode(CPURISCVState *env, int csrno) 259 { 260 RISCVCPU *cpu = env_archcpu(env); 261 262 if (!cpu->cfg.ext_ssaia) { 263 return RISCV_EXCP_ILLEGAL_INST; 264 } 265 266 return smode(env, csrno); 267 } 268 269 static int aia_smode32(CPURISCVState *env, int csrno) 270 { 271 RISCVCPU *cpu = env_archcpu(env); 272 273 if (!cpu->cfg.ext_ssaia) { 274 return RISCV_EXCP_ILLEGAL_INST; 275 } 276 277 return smode32(env, csrno); 278 } 279 280 static RISCVException hmode(CPURISCVState *env, int csrno) 281 { 282 if (riscv_has_ext(env, RVH)) { 283 return RISCV_EXCP_NONE; 284 } 285 286 return RISCV_EXCP_ILLEGAL_INST; 287 } 288 289 static RISCVException hmode32(CPURISCVState *env, int csrno) 290 { 291 if (riscv_cpu_mxl(env) != MXL_RV32) { 292 return RISCV_EXCP_ILLEGAL_INST; 293 } 294 295 return hmode(env, csrno); 296 297 } 298 299 static RISCVException umode(CPURISCVState *env, int csrno) 300 { 301 if (riscv_has_ext(env, RVU)) { 302 return RISCV_EXCP_NONE; 303 } 304 305 return RISCV_EXCP_ILLEGAL_INST; 306 } 307 308 static RISCVException umode32(CPURISCVState *env, int csrno) 309 { 310 if (riscv_cpu_mxl(env) != MXL_RV32) { 311 return RISCV_EXCP_ILLEGAL_INST; 312 } 313 314 return umode(env, csrno); 315 } 316 317 static RISCVException mstateen(CPURISCVState *env, int csrno) 318 { 319 RISCVCPU *cpu = env_archcpu(env); 320 321 if (!cpu->cfg.ext_smstateen) { 322 return RISCV_EXCP_ILLEGAL_INST; 323 } 324 325 return any(env, csrno); 326 } 327 328 static RISCVException hstateen_pred(CPURISCVState *env, int csrno, int base) 329 { 330 RISCVCPU *cpu = env_archcpu(env); 331 332 if (!cpu->cfg.ext_smstateen) { 333 return RISCV_EXCP_ILLEGAL_INST; 334 } 335 336 if (env->priv < PRV_M) { 337 if (!(env->mstateen[csrno - base] & SMSTATEEN_STATEEN)) { 338 return RISCV_EXCP_ILLEGAL_INST; 339 } 340 } 341 342 return hmode(env, csrno); 343 } 344 345 static RISCVException hstateen(CPURISCVState *env, int csrno) 346 { 347 return hstateen_pred(env, csrno, CSR_HSTATEEN0); 348 } 349 350 static RISCVException hstateenh(CPURISCVState *env, int csrno) 351 { 352 return hstateen_pred(env, csrno, CSR_HSTATEEN0H); 353 } 354 355 static RISCVException sstateen(CPURISCVState *env, int csrno) 356 { 357 bool virt = riscv_cpu_virt_enabled(env); 358 int index = csrno - CSR_SSTATEEN0; 359 RISCVCPU *cpu = env_archcpu(env); 360 361 if (!cpu->cfg.ext_smstateen) { 362 return RISCV_EXCP_ILLEGAL_INST; 363 } 364 365 if (env->priv < PRV_M) { 366 if (!(env->mstateen[index] & SMSTATEEN_STATEEN)) { 367 return RISCV_EXCP_ILLEGAL_INST; 368 } 369 370 if (virt) { 371 if (!(env->hstateen[index] & SMSTATEEN_STATEEN)) { 372 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 373 } 374 } 375 } 376 377 return smode(env, csrno); 378 } 379 380 /* Checks if PointerMasking registers could be accessed */ 381 static RISCVException pointer_masking(CPURISCVState *env, int csrno) 382 { 383 /* Check if j-ext is present */ 384 if (riscv_has_ext(env, RVJ)) { 385 return RISCV_EXCP_NONE; 386 } 387 return RISCV_EXCP_ILLEGAL_INST; 388 } 389 390 static int aia_hmode(CPURISCVState *env, int csrno) 391 { 392 RISCVCPU *cpu = env_archcpu(env); 393 394 if (!cpu->cfg.ext_ssaia) { 395 return RISCV_EXCP_ILLEGAL_INST; 396 } 397 398 return hmode(env, csrno); 399 } 400 401 static int aia_hmode32(CPURISCVState *env, int csrno) 402 { 403 RISCVCPU *cpu = env_archcpu(env); 404 405 if (!cpu->cfg.ext_ssaia) { 406 return RISCV_EXCP_ILLEGAL_INST; 407 } 408 409 return hmode32(env, csrno); 410 } 411 412 static RISCVException pmp(CPURISCVState *env, int csrno) 413 { 414 if (riscv_cpu_cfg(env)->pmp) { 415 return RISCV_EXCP_NONE; 416 } 417 418 return RISCV_EXCP_ILLEGAL_INST; 419 } 420 421 static RISCVException epmp(CPURISCVState *env, int csrno) 422 { 423 if (env->priv == PRV_M && riscv_cpu_cfg(env)->epmp) { 424 return RISCV_EXCP_NONE; 425 } 426 427 return RISCV_EXCP_ILLEGAL_INST; 428 } 429 430 static RISCVException debug(CPURISCVState *env, int csrno) 431 { 432 if (riscv_cpu_cfg(env)->debug) { 433 return RISCV_EXCP_NONE; 434 } 435 436 return RISCV_EXCP_ILLEGAL_INST; 437 } 438 #endif 439 440 static RISCVException seed(CPURISCVState *env, int csrno) 441 { 442 RISCVCPU *cpu = env_archcpu(env); 443 444 if (!cpu->cfg.ext_zkr) { 445 return RISCV_EXCP_ILLEGAL_INST; 446 } 447 448 #if !defined(CONFIG_USER_ONLY) 449 /* 450 * With a CSR read-write instruction: 451 * 1) The seed CSR is always available in machine mode as normal. 452 * 2) Attempted access to seed from virtual modes VS and VU always raises 453 * an exception(virtual instruction exception only if mseccfg.sseed=1). 454 * 3) Without the corresponding access control bit set to 1, any attempted 455 * access to seed from U, S or HS modes will raise an illegal instruction 456 * exception. 457 */ 458 if (env->priv == PRV_M) { 459 return RISCV_EXCP_NONE; 460 } else if (riscv_cpu_virt_enabled(env)) { 461 if (env->mseccfg & MSECCFG_SSEED) { 462 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 463 } else { 464 return RISCV_EXCP_ILLEGAL_INST; 465 } 466 } else { 467 if (env->priv == PRV_S && (env->mseccfg & MSECCFG_SSEED)) { 468 return RISCV_EXCP_NONE; 469 } else if (env->priv == PRV_U && (env->mseccfg & MSECCFG_USEED)) { 470 return RISCV_EXCP_NONE; 471 } else { 472 return RISCV_EXCP_ILLEGAL_INST; 473 } 474 } 475 #else 476 return RISCV_EXCP_NONE; 477 #endif 478 } 479 480 /* User Floating-Point CSRs */ 481 static RISCVException read_fflags(CPURISCVState *env, int csrno, 482 target_ulong *val) 483 { 484 *val = riscv_cpu_get_fflags(env); 485 return RISCV_EXCP_NONE; 486 } 487 488 static RISCVException write_fflags(CPURISCVState *env, int csrno, 489 target_ulong val) 490 { 491 #if !defined(CONFIG_USER_ONLY) 492 if (riscv_has_ext(env, RVF)) { 493 env->mstatus |= MSTATUS_FS; 494 } 495 #endif 496 riscv_cpu_set_fflags(env, val & (FSR_AEXC >> FSR_AEXC_SHIFT)); 497 return RISCV_EXCP_NONE; 498 } 499 500 static RISCVException read_frm(CPURISCVState *env, int csrno, 501 target_ulong *val) 502 { 503 *val = env->frm; 504 return RISCV_EXCP_NONE; 505 } 506 507 static RISCVException write_frm(CPURISCVState *env, int csrno, 508 target_ulong val) 509 { 510 #if !defined(CONFIG_USER_ONLY) 511 if (riscv_has_ext(env, RVF)) { 512 env->mstatus |= MSTATUS_FS; 513 } 514 #endif 515 env->frm = val & (FSR_RD >> FSR_RD_SHIFT); 516 return RISCV_EXCP_NONE; 517 } 518 519 static RISCVException read_fcsr(CPURISCVState *env, int csrno, 520 target_ulong *val) 521 { 522 *val = (riscv_cpu_get_fflags(env) << FSR_AEXC_SHIFT) 523 | (env->frm << FSR_RD_SHIFT); 524 return RISCV_EXCP_NONE; 525 } 526 527 static RISCVException write_fcsr(CPURISCVState *env, int csrno, 528 target_ulong val) 529 { 530 #if !defined(CONFIG_USER_ONLY) 531 if (riscv_has_ext(env, RVF)) { 532 env->mstatus |= MSTATUS_FS; 533 } 534 #endif 535 env->frm = (val & FSR_RD) >> FSR_RD_SHIFT; 536 riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT); 537 return RISCV_EXCP_NONE; 538 } 539 540 static RISCVException read_vtype(CPURISCVState *env, int csrno, 541 target_ulong *val) 542 { 543 uint64_t vill; 544 switch (env->xl) { 545 case MXL_RV32: 546 vill = (uint32_t)env->vill << 31; 547 break; 548 case MXL_RV64: 549 vill = (uint64_t)env->vill << 63; 550 break; 551 default: 552 g_assert_not_reached(); 553 } 554 *val = (target_ulong)vill | env->vtype; 555 return RISCV_EXCP_NONE; 556 } 557 558 static RISCVException read_vl(CPURISCVState *env, int csrno, 559 target_ulong *val) 560 { 561 *val = env->vl; 562 return RISCV_EXCP_NONE; 563 } 564 565 static int read_vlenb(CPURISCVState *env, int csrno, target_ulong *val) 566 { 567 *val = env_archcpu(env)->cfg.vlen >> 3; 568 return RISCV_EXCP_NONE; 569 } 570 571 static RISCVException read_vxrm(CPURISCVState *env, int csrno, 572 target_ulong *val) 573 { 574 *val = env->vxrm; 575 return RISCV_EXCP_NONE; 576 } 577 578 static RISCVException write_vxrm(CPURISCVState *env, int csrno, 579 target_ulong val) 580 { 581 #if !defined(CONFIG_USER_ONLY) 582 env->mstatus |= MSTATUS_VS; 583 #endif 584 env->vxrm = val; 585 return RISCV_EXCP_NONE; 586 } 587 588 static RISCVException read_vxsat(CPURISCVState *env, int csrno, 589 target_ulong *val) 590 { 591 *val = env->vxsat; 592 return RISCV_EXCP_NONE; 593 } 594 595 static RISCVException write_vxsat(CPURISCVState *env, int csrno, 596 target_ulong val) 597 { 598 #if !defined(CONFIG_USER_ONLY) 599 env->mstatus |= MSTATUS_VS; 600 #endif 601 env->vxsat = val; 602 return RISCV_EXCP_NONE; 603 } 604 605 static RISCVException read_vstart(CPURISCVState *env, int csrno, 606 target_ulong *val) 607 { 608 *val = env->vstart; 609 return RISCV_EXCP_NONE; 610 } 611 612 static RISCVException write_vstart(CPURISCVState *env, int csrno, 613 target_ulong val) 614 { 615 #if !defined(CONFIG_USER_ONLY) 616 env->mstatus |= MSTATUS_VS; 617 #endif 618 /* 619 * The vstart CSR is defined to have only enough writable bits 620 * to hold the largest element index, i.e. lg2(VLEN) bits. 621 */ 622 env->vstart = val & ~(~0ULL << ctzl(env_archcpu(env)->cfg.vlen)); 623 return RISCV_EXCP_NONE; 624 } 625 626 static int read_vcsr(CPURISCVState *env, int csrno, target_ulong *val) 627 { 628 *val = (env->vxrm << VCSR_VXRM_SHIFT) | (env->vxsat << VCSR_VXSAT_SHIFT); 629 return RISCV_EXCP_NONE; 630 } 631 632 static int write_vcsr(CPURISCVState *env, int csrno, target_ulong val) 633 { 634 #if !defined(CONFIG_USER_ONLY) 635 env->mstatus |= MSTATUS_VS; 636 #endif 637 env->vxrm = (val & VCSR_VXRM) >> VCSR_VXRM_SHIFT; 638 env->vxsat = (val & VCSR_VXSAT) >> VCSR_VXSAT_SHIFT; 639 return RISCV_EXCP_NONE; 640 } 641 642 /* User Timers and Counters */ 643 static target_ulong get_ticks(bool shift) 644 { 645 int64_t val; 646 target_ulong result; 647 648 #if !defined(CONFIG_USER_ONLY) 649 if (icount_enabled()) { 650 val = icount_get(); 651 } else { 652 val = cpu_get_host_ticks(); 653 } 654 #else 655 val = cpu_get_host_ticks(); 656 #endif 657 658 if (shift) { 659 result = val >> 32; 660 } else { 661 result = val; 662 } 663 664 return result; 665 } 666 667 #if defined(CONFIG_USER_ONLY) 668 static RISCVException read_time(CPURISCVState *env, int csrno, 669 target_ulong *val) 670 { 671 *val = cpu_get_host_ticks(); 672 return RISCV_EXCP_NONE; 673 } 674 675 static RISCVException read_timeh(CPURISCVState *env, int csrno, 676 target_ulong *val) 677 { 678 *val = cpu_get_host_ticks() >> 32; 679 return RISCV_EXCP_NONE; 680 } 681 682 static int read_hpmcounter(CPURISCVState *env, int csrno, target_ulong *val) 683 { 684 *val = get_ticks(false); 685 return RISCV_EXCP_NONE; 686 } 687 688 static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val) 689 { 690 *val = get_ticks(true); 691 return RISCV_EXCP_NONE; 692 } 693 694 #else /* CONFIG_USER_ONLY */ 695 696 static int read_mhpmevent(CPURISCVState *env, int csrno, target_ulong *val) 697 { 698 int evt_index = csrno - CSR_MCOUNTINHIBIT; 699 700 *val = env->mhpmevent_val[evt_index]; 701 702 return RISCV_EXCP_NONE; 703 } 704 705 static int write_mhpmevent(CPURISCVState *env, int csrno, target_ulong val) 706 { 707 int evt_index = csrno - CSR_MCOUNTINHIBIT; 708 uint64_t mhpmevt_val = val; 709 710 env->mhpmevent_val[evt_index] = val; 711 712 if (riscv_cpu_mxl(env) == MXL_RV32) { 713 mhpmevt_val = mhpmevt_val | 714 ((uint64_t)env->mhpmeventh_val[evt_index] << 32); 715 } 716 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index); 717 718 return RISCV_EXCP_NONE; 719 } 720 721 static int read_mhpmeventh(CPURISCVState *env, int csrno, target_ulong *val) 722 { 723 int evt_index = csrno - CSR_MHPMEVENT3H + 3; 724 725 *val = env->mhpmeventh_val[evt_index]; 726 727 return RISCV_EXCP_NONE; 728 } 729 730 static int write_mhpmeventh(CPURISCVState *env, int csrno, target_ulong val) 731 { 732 int evt_index = csrno - CSR_MHPMEVENT3H + 3; 733 uint64_t mhpmevth_val = val; 734 uint64_t mhpmevt_val = env->mhpmevent_val[evt_index]; 735 736 mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32); 737 env->mhpmeventh_val[evt_index] = val; 738 739 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index); 740 741 return RISCV_EXCP_NONE; 742 } 743 744 static int write_mhpmcounter(CPURISCVState *env, int csrno, target_ulong val) 745 { 746 int ctr_idx = csrno - CSR_MCYCLE; 747 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx]; 748 uint64_t mhpmctr_val = val; 749 750 counter->mhpmcounter_val = val; 751 if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || 752 riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) { 753 counter->mhpmcounter_prev = get_ticks(false); 754 if (ctr_idx > 2) { 755 if (riscv_cpu_mxl(env) == MXL_RV32) { 756 mhpmctr_val = mhpmctr_val | 757 ((uint64_t)counter->mhpmcounterh_val << 32); 758 } 759 riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx); 760 } 761 } else { 762 /* Other counters can keep incrementing from the given value */ 763 counter->mhpmcounter_prev = val; 764 } 765 766 return RISCV_EXCP_NONE; 767 } 768 769 static int write_mhpmcounterh(CPURISCVState *env, int csrno, target_ulong val) 770 { 771 int ctr_idx = csrno - CSR_MCYCLEH; 772 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx]; 773 uint64_t mhpmctr_val = counter->mhpmcounter_val; 774 uint64_t mhpmctrh_val = val; 775 776 counter->mhpmcounterh_val = val; 777 mhpmctr_val = mhpmctr_val | (mhpmctrh_val << 32); 778 if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || 779 riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) { 780 counter->mhpmcounterh_prev = get_ticks(true); 781 if (ctr_idx > 2) { 782 riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx); 783 } 784 } else { 785 counter->mhpmcounterh_prev = val; 786 } 787 788 return RISCV_EXCP_NONE; 789 } 790 791 static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val, 792 bool upper_half, uint32_t ctr_idx) 793 { 794 PMUCTRState counter = env->pmu_ctrs[ctr_idx]; 795 target_ulong ctr_prev = upper_half ? counter.mhpmcounterh_prev : 796 counter.mhpmcounter_prev; 797 target_ulong ctr_val = upper_half ? counter.mhpmcounterh_val : 798 counter.mhpmcounter_val; 799 800 if (get_field(env->mcountinhibit, BIT(ctr_idx))) { 801 /** 802 * Counter should not increment if inhibit bit is set. We can't really 803 * stop the icount counting. Just return the counter value written by 804 * the supervisor to indicate that counter was not incremented. 805 */ 806 if (!counter.started) { 807 *val = ctr_val; 808 return RISCV_EXCP_NONE; 809 } else { 810 /* Mark that the counter has been stopped */ 811 counter.started = false; 812 } 813 } 814 815 /** 816 * The kernel computes the perf delta by subtracting the current value from 817 * the value it initialized previously (ctr_val). 818 */ 819 if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || 820 riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) { 821 *val = get_ticks(upper_half) - ctr_prev + ctr_val; 822 } else { 823 *val = ctr_val; 824 } 825 826 return RISCV_EXCP_NONE; 827 } 828 829 static int read_hpmcounter(CPURISCVState *env, int csrno, target_ulong *val) 830 { 831 uint16_t ctr_index; 832 833 if (csrno >= CSR_MCYCLE && csrno <= CSR_MHPMCOUNTER31) { 834 ctr_index = csrno - CSR_MCYCLE; 835 } else if (csrno >= CSR_CYCLE && csrno <= CSR_HPMCOUNTER31) { 836 ctr_index = csrno - CSR_CYCLE; 837 } else { 838 return RISCV_EXCP_ILLEGAL_INST; 839 } 840 841 return riscv_pmu_read_ctr(env, val, false, ctr_index); 842 } 843 844 static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val) 845 { 846 uint16_t ctr_index; 847 848 if (csrno >= CSR_MCYCLEH && csrno <= CSR_MHPMCOUNTER31H) { 849 ctr_index = csrno - CSR_MCYCLEH; 850 } else if (csrno >= CSR_CYCLEH && csrno <= CSR_HPMCOUNTER31H) { 851 ctr_index = csrno - CSR_CYCLEH; 852 } else { 853 return RISCV_EXCP_ILLEGAL_INST; 854 } 855 856 return riscv_pmu_read_ctr(env, val, true, ctr_index); 857 } 858 859 static int read_scountovf(CPURISCVState *env, int csrno, target_ulong *val) 860 { 861 int mhpmevt_start = CSR_MHPMEVENT3 - CSR_MCOUNTINHIBIT; 862 int i; 863 *val = 0; 864 target_ulong *mhpm_evt_val; 865 uint64_t of_bit_mask; 866 867 if (riscv_cpu_mxl(env) == MXL_RV32) { 868 mhpm_evt_val = env->mhpmeventh_val; 869 of_bit_mask = MHPMEVENTH_BIT_OF; 870 } else { 871 mhpm_evt_val = env->mhpmevent_val; 872 of_bit_mask = MHPMEVENT_BIT_OF; 873 } 874 875 for (i = mhpmevt_start; i < RV_MAX_MHPMEVENTS; i++) { 876 if ((get_field(env->mcounteren, BIT(i))) && 877 (mhpm_evt_val[i] & of_bit_mask)) { 878 *val |= BIT(i); 879 } 880 } 881 882 return RISCV_EXCP_NONE; 883 } 884 885 static RISCVException read_time(CPURISCVState *env, int csrno, 886 target_ulong *val) 887 { 888 uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0; 889 890 if (!env->rdtime_fn) { 891 return RISCV_EXCP_ILLEGAL_INST; 892 } 893 894 *val = env->rdtime_fn(env->rdtime_fn_arg) + delta; 895 return RISCV_EXCP_NONE; 896 } 897 898 static RISCVException read_timeh(CPURISCVState *env, int csrno, 899 target_ulong *val) 900 { 901 uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0; 902 903 if (!env->rdtime_fn) { 904 return RISCV_EXCP_ILLEGAL_INST; 905 } 906 907 *val = (env->rdtime_fn(env->rdtime_fn_arg) + delta) >> 32; 908 return RISCV_EXCP_NONE; 909 } 910 911 static RISCVException sstc(CPURISCVState *env, int csrno) 912 { 913 RISCVCPU *cpu = env_archcpu(env); 914 bool hmode_check = false; 915 916 if (!cpu->cfg.ext_sstc || !env->rdtime_fn) { 917 return RISCV_EXCP_ILLEGAL_INST; 918 } 919 920 if (env->priv == PRV_M) { 921 return RISCV_EXCP_NONE; 922 } 923 924 /* 925 * No need of separate function for rv32 as menvcfg stores both menvcfg 926 * menvcfgh for RV32. 927 */ 928 if (!(get_field(env->mcounteren, COUNTEREN_TM) && 929 get_field(env->menvcfg, MENVCFG_STCE))) { 930 return RISCV_EXCP_ILLEGAL_INST; 931 } 932 933 if (riscv_cpu_virt_enabled(env)) { 934 if (!(get_field(env->hcounteren, COUNTEREN_TM) && 935 get_field(env->henvcfg, HENVCFG_STCE))) { 936 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 937 } 938 } 939 940 if ((csrno == CSR_VSTIMECMP) || (csrno == CSR_VSTIMECMPH)) { 941 hmode_check = true; 942 } 943 944 return hmode_check ? hmode(env, csrno) : smode(env, csrno); 945 } 946 947 static RISCVException sstc_32(CPURISCVState *env, int csrno) 948 { 949 if (riscv_cpu_mxl(env) != MXL_RV32) { 950 return RISCV_EXCP_ILLEGAL_INST; 951 } 952 953 return sstc(env, csrno); 954 } 955 956 static RISCVException read_vstimecmp(CPURISCVState *env, int csrno, 957 target_ulong *val) 958 { 959 *val = env->vstimecmp; 960 961 return RISCV_EXCP_NONE; 962 } 963 964 static RISCVException read_vstimecmph(CPURISCVState *env, int csrno, 965 target_ulong *val) 966 { 967 *val = env->vstimecmp >> 32; 968 969 return RISCV_EXCP_NONE; 970 } 971 972 static RISCVException write_vstimecmp(CPURISCVState *env, int csrno, 973 target_ulong val) 974 { 975 RISCVCPU *cpu = env_archcpu(env); 976 977 if (riscv_cpu_mxl(env) == MXL_RV32) { 978 env->vstimecmp = deposit64(env->vstimecmp, 0, 32, (uint64_t)val); 979 } else { 980 env->vstimecmp = val; 981 } 982 983 riscv_timer_write_timecmp(cpu, env->vstimer, env->vstimecmp, 984 env->htimedelta, MIP_VSTIP); 985 986 return RISCV_EXCP_NONE; 987 } 988 989 static RISCVException write_vstimecmph(CPURISCVState *env, int csrno, 990 target_ulong val) 991 { 992 RISCVCPU *cpu = env_archcpu(env); 993 994 env->vstimecmp = deposit64(env->vstimecmp, 32, 32, (uint64_t)val); 995 riscv_timer_write_timecmp(cpu, env->vstimer, env->vstimecmp, 996 env->htimedelta, MIP_VSTIP); 997 998 return RISCV_EXCP_NONE; 999 } 1000 1001 static RISCVException read_stimecmp(CPURISCVState *env, int csrno, 1002 target_ulong *val) 1003 { 1004 if (riscv_cpu_virt_enabled(env)) { 1005 *val = env->vstimecmp; 1006 } else { 1007 *val = env->stimecmp; 1008 } 1009 1010 return RISCV_EXCP_NONE; 1011 } 1012 1013 static RISCVException read_stimecmph(CPURISCVState *env, int csrno, 1014 target_ulong *val) 1015 { 1016 if (riscv_cpu_virt_enabled(env)) { 1017 *val = env->vstimecmp >> 32; 1018 } else { 1019 *val = env->stimecmp >> 32; 1020 } 1021 1022 return RISCV_EXCP_NONE; 1023 } 1024 1025 static RISCVException write_stimecmp(CPURISCVState *env, int csrno, 1026 target_ulong val) 1027 { 1028 RISCVCPU *cpu = env_archcpu(env); 1029 1030 if (riscv_cpu_virt_enabled(env)) { 1031 if (env->hvictl & HVICTL_VTI) { 1032 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 1033 } 1034 return write_vstimecmp(env, csrno, val); 1035 } 1036 1037 if (riscv_cpu_mxl(env) == MXL_RV32) { 1038 env->stimecmp = deposit64(env->stimecmp, 0, 32, (uint64_t)val); 1039 } else { 1040 env->stimecmp = val; 1041 } 1042 1043 riscv_timer_write_timecmp(cpu, env->stimer, env->stimecmp, 0, MIP_STIP); 1044 1045 return RISCV_EXCP_NONE; 1046 } 1047 1048 static RISCVException write_stimecmph(CPURISCVState *env, int csrno, 1049 target_ulong val) 1050 { 1051 RISCVCPU *cpu = env_archcpu(env); 1052 1053 if (riscv_cpu_virt_enabled(env)) { 1054 if (env->hvictl & HVICTL_VTI) { 1055 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 1056 } 1057 return write_vstimecmph(env, csrno, val); 1058 } 1059 1060 env->stimecmp = deposit64(env->stimecmp, 32, 32, (uint64_t)val); 1061 riscv_timer_write_timecmp(cpu, env->stimer, env->stimecmp, 0, MIP_STIP); 1062 1063 return RISCV_EXCP_NONE; 1064 } 1065 1066 /* Machine constants */ 1067 1068 #define M_MODE_INTERRUPTS ((uint64_t)(MIP_MSIP | MIP_MTIP | MIP_MEIP)) 1069 #define S_MODE_INTERRUPTS ((uint64_t)(MIP_SSIP | MIP_STIP | MIP_SEIP | \ 1070 MIP_LCOFIP)) 1071 #define VS_MODE_INTERRUPTS ((uint64_t)(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP)) 1072 #define HS_MODE_INTERRUPTS ((uint64_t)(MIP_SGEIP | VS_MODE_INTERRUPTS)) 1073 1074 #define VSTOPI_NUM_SRCS 5 1075 1076 static const uint64_t delegable_ints = S_MODE_INTERRUPTS | 1077 VS_MODE_INTERRUPTS; 1078 static const uint64_t vs_delegable_ints = VS_MODE_INTERRUPTS; 1079 static const uint64_t all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS | 1080 HS_MODE_INTERRUPTS; 1081 #define DELEGABLE_EXCPS ((1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | \ 1082 (1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | \ 1083 (1ULL << (RISCV_EXCP_ILLEGAL_INST)) | \ 1084 (1ULL << (RISCV_EXCP_BREAKPOINT)) | \ 1085 (1ULL << (RISCV_EXCP_LOAD_ADDR_MIS)) | \ 1086 (1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT)) | \ 1087 (1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS)) | \ 1088 (1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT)) | \ 1089 (1ULL << (RISCV_EXCP_U_ECALL)) | \ 1090 (1ULL << (RISCV_EXCP_S_ECALL)) | \ 1091 (1ULL << (RISCV_EXCP_VS_ECALL)) | \ 1092 (1ULL << (RISCV_EXCP_M_ECALL)) | \ 1093 (1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | \ 1094 (1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | \ 1095 (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | \ 1096 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | \ 1097 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | \ 1098 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | \ 1099 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT))) 1100 static const target_ulong vs_delegable_excps = DELEGABLE_EXCPS & 1101 ~((1ULL << (RISCV_EXCP_S_ECALL)) | 1102 (1ULL << (RISCV_EXCP_VS_ECALL)) | 1103 (1ULL << (RISCV_EXCP_M_ECALL)) | 1104 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | 1105 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | 1106 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | 1107 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT))); 1108 static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE | 1109 SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS | 1110 SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS; 1111 static const target_ulong sip_writable_mask = SIP_SSIP | MIP_USIP | MIP_UEIP | 1112 SIP_LCOFIP; 1113 static const target_ulong hip_writable_mask = MIP_VSSIP; 1114 static const target_ulong hvip_writable_mask = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP; 1115 static const target_ulong vsip_writable_mask = MIP_VSSIP; 1116 1117 static const char valid_vm_1_10_32[16] = { 1118 [VM_1_10_MBARE] = 1, 1119 [VM_1_10_SV32] = 1 1120 }; 1121 1122 static const char valid_vm_1_10_64[16] = { 1123 [VM_1_10_MBARE] = 1, 1124 [VM_1_10_SV39] = 1, 1125 [VM_1_10_SV48] = 1, 1126 [VM_1_10_SV57] = 1 1127 }; 1128 1129 /* Machine Information Registers */ 1130 static RISCVException read_zero(CPURISCVState *env, int csrno, 1131 target_ulong *val) 1132 { 1133 *val = 0; 1134 return RISCV_EXCP_NONE; 1135 } 1136 1137 static RISCVException write_ignore(CPURISCVState *env, int csrno, 1138 target_ulong val) 1139 { 1140 return RISCV_EXCP_NONE; 1141 } 1142 1143 static RISCVException read_mvendorid(CPURISCVState *env, int csrno, 1144 target_ulong *val) 1145 { 1146 RISCVCPU *cpu = env_archcpu(env); 1147 1148 *val = cpu->cfg.mvendorid; 1149 return RISCV_EXCP_NONE; 1150 } 1151 1152 static RISCVException read_marchid(CPURISCVState *env, int csrno, 1153 target_ulong *val) 1154 { 1155 RISCVCPU *cpu = env_archcpu(env); 1156 1157 *val = cpu->cfg.marchid; 1158 return RISCV_EXCP_NONE; 1159 } 1160 1161 static RISCVException read_mimpid(CPURISCVState *env, int csrno, 1162 target_ulong *val) 1163 { 1164 RISCVCPU *cpu = env_archcpu(env); 1165 1166 *val = cpu->cfg.mimpid; 1167 return RISCV_EXCP_NONE; 1168 } 1169 1170 static RISCVException read_mhartid(CPURISCVState *env, int csrno, 1171 target_ulong *val) 1172 { 1173 *val = env->mhartid; 1174 return RISCV_EXCP_NONE; 1175 } 1176 1177 /* Machine Trap Setup */ 1178 1179 /* We do not store SD explicitly, only compute it on demand. */ 1180 static uint64_t add_status_sd(RISCVMXL xl, uint64_t status) 1181 { 1182 if ((status & MSTATUS_FS) == MSTATUS_FS || 1183 (status & MSTATUS_VS) == MSTATUS_VS || 1184 (status & MSTATUS_XS) == MSTATUS_XS) { 1185 switch (xl) { 1186 case MXL_RV32: 1187 return status | MSTATUS32_SD; 1188 case MXL_RV64: 1189 return status | MSTATUS64_SD; 1190 case MXL_RV128: 1191 return MSTATUSH128_SD; 1192 default: 1193 g_assert_not_reached(); 1194 } 1195 } 1196 return status; 1197 } 1198 1199 static RISCVException read_mstatus(CPURISCVState *env, int csrno, 1200 target_ulong *val) 1201 { 1202 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus); 1203 return RISCV_EXCP_NONE; 1204 } 1205 1206 static int validate_vm(CPURISCVState *env, target_ulong vm) 1207 { 1208 if (riscv_cpu_mxl(env) == MXL_RV32) { 1209 return valid_vm_1_10_32[vm & 0xf]; 1210 } else { 1211 return valid_vm_1_10_64[vm & 0xf]; 1212 } 1213 } 1214 1215 static RISCVException write_mstatus(CPURISCVState *env, int csrno, 1216 target_ulong val) 1217 { 1218 uint64_t mstatus = env->mstatus; 1219 uint64_t mask = 0; 1220 RISCVMXL xl = riscv_cpu_mxl(env); 1221 1222 /* flush tlb on mstatus fields that affect VM */ 1223 if ((val ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP | MSTATUS_MPV | 1224 MSTATUS_MPRV | MSTATUS_SUM)) { 1225 tlb_flush(env_cpu(env)); 1226 } 1227 mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE | 1228 MSTATUS_SPP | MSTATUS_MPRV | MSTATUS_SUM | 1229 MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR | 1230 MSTATUS_TW | MSTATUS_VS; 1231 1232 if (riscv_has_ext(env, RVF)) { 1233 mask |= MSTATUS_FS; 1234 } 1235 1236 if (xl != MXL_RV32 || env->debugger) { 1237 /* 1238 * RV32: MPV and GVA are not in mstatus. The current plan is to 1239 * add them to mstatush. For now, we just don't support it. 1240 */ 1241 mask |= MSTATUS_MPV | MSTATUS_GVA; 1242 if ((val & MSTATUS64_UXL) != 0) { 1243 mask |= MSTATUS64_UXL; 1244 } 1245 } 1246 1247 mstatus = (mstatus & ~mask) | (val & mask); 1248 1249 if (xl > MXL_RV32) { 1250 /* SXL field is for now read only */ 1251 mstatus = set_field(mstatus, MSTATUS64_SXL, xl); 1252 } 1253 env->mstatus = mstatus; 1254 env->xl = cpu_recompute_xl(env); 1255 1256 return RISCV_EXCP_NONE; 1257 } 1258 1259 static RISCVException read_mstatush(CPURISCVState *env, int csrno, 1260 target_ulong *val) 1261 { 1262 *val = env->mstatus >> 32; 1263 return RISCV_EXCP_NONE; 1264 } 1265 1266 static RISCVException write_mstatush(CPURISCVState *env, int csrno, 1267 target_ulong val) 1268 { 1269 uint64_t valh = (uint64_t)val << 32; 1270 uint64_t mask = MSTATUS_MPV | MSTATUS_GVA; 1271 1272 if ((valh ^ env->mstatus) & (MSTATUS_MPV)) { 1273 tlb_flush(env_cpu(env)); 1274 } 1275 1276 env->mstatus = (env->mstatus & ~mask) | (valh & mask); 1277 1278 return RISCV_EXCP_NONE; 1279 } 1280 1281 static RISCVException read_mstatus_i128(CPURISCVState *env, int csrno, 1282 Int128 *val) 1283 { 1284 *val = int128_make128(env->mstatus, add_status_sd(MXL_RV128, env->mstatus)); 1285 return RISCV_EXCP_NONE; 1286 } 1287 1288 static RISCVException read_misa_i128(CPURISCVState *env, int csrno, 1289 Int128 *val) 1290 { 1291 *val = int128_make128(env->misa_ext, (uint64_t)MXL_RV128 << 62); 1292 return RISCV_EXCP_NONE; 1293 } 1294 1295 static RISCVException read_misa(CPURISCVState *env, int csrno, 1296 target_ulong *val) 1297 { 1298 target_ulong misa; 1299 1300 switch (env->misa_mxl) { 1301 case MXL_RV32: 1302 misa = (target_ulong)MXL_RV32 << 30; 1303 break; 1304 #ifdef TARGET_RISCV64 1305 case MXL_RV64: 1306 misa = (target_ulong)MXL_RV64 << 62; 1307 break; 1308 #endif 1309 default: 1310 g_assert_not_reached(); 1311 } 1312 1313 *val = misa | env->misa_ext; 1314 return RISCV_EXCP_NONE; 1315 } 1316 1317 static RISCVException write_misa(CPURISCVState *env, int csrno, 1318 target_ulong val) 1319 { 1320 if (!riscv_cpu_cfg(env)->misa_w) { 1321 /* drop write to misa */ 1322 return RISCV_EXCP_NONE; 1323 } 1324 1325 /* 'I' or 'E' must be present */ 1326 if (!(val & (RVI | RVE))) { 1327 /* It is not, drop write to misa */ 1328 return RISCV_EXCP_NONE; 1329 } 1330 1331 /* 'E' excludes all other extensions */ 1332 if (val & RVE) { 1333 /* 1334 * when we support 'E' we can do "val = RVE;" however 1335 * for now we just drop writes if 'E' is present. 1336 */ 1337 return RISCV_EXCP_NONE; 1338 } 1339 1340 /* 1341 * misa.MXL writes are not supported by QEMU. 1342 * Drop writes to those bits. 1343 */ 1344 1345 /* Mask extensions that are not supported by this hart */ 1346 val &= env->misa_ext_mask; 1347 1348 /* 'D' depends on 'F', so clear 'D' if 'F' is not present */ 1349 if ((val & RVD) && !(val & RVF)) { 1350 val &= ~RVD; 1351 } 1352 1353 /* 1354 * Suppress 'C' if next instruction is not aligned 1355 * TODO: this should check next_pc 1356 */ 1357 if ((val & RVC) && (GETPC() & ~3) != 0) { 1358 val &= ~RVC; 1359 } 1360 1361 /* If nothing changed, do nothing. */ 1362 if (val == env->misa_ext) { 1363 return RISCV_EXCP_NONE; 1364 } 1365 1366 if (!(val & RVF)) { 1367 env->mstatus &= ~MSTATUS_FS; 1368 } 1369 1370 /* flush translation cache */ 1371 tb_flush(env_cpu(env)); 1372 env->misa_ext = val; 1373 env->xl = riscv_cpu_mxl(env); 1374 return RISCV_EXCP_NONE; 1375 } 1376 1377 static RISCVException read_medeleg(CPURISCVState *env, int csrno, 1378 target_ulong *val) 1379 { 1380 *val = env->medeleg; 1381 return RISCV_EXCP_NONE; 1382 } 1383 1384 static RISCVException write_medeleg(CPURISCVState *env, int csrno, 1385 target_ulong val) 1386 { 1387 env->medeleg = (env->medeleg & ~DELEGABLE_EXCPS) | (val & DELEGABLE_EXCPS); 1388 return RISCV_EXCP_NONE; 1389 } 1390 1391 static RISCVException rmw_mideleg64(CPURISCVState *env, int csrno, 1392 uint64_t *ret_val, 1393 uint64_t new_val, uint64_t wr_mask) 1394 { 1395 uint64_t mask = wr_mask & delegable_ints; 1396 1397 if (ret_val) { 1398 *ret_val = env->mideleg; 1399 } 1400 1401 env->mideleg = (env->mideleg & ~mask) | (new_val & mask); 1402 1403 if (riscv_has_ext(env, RVH)) { 1404 env->mideleg |= HS_MODE_INTERRUPTS; 1405 } 1406 1407 return RISCV_EXCP_NONE; 1408 } 1409 1410 static RISCVException rmw_mideleg(CPURISCVState *env, int csrno, 1411 target_ulong *ret_val, 1412 target_ulong new_val, target_ulong wr_mask) 1413 { 1414 uint64_t rval; 1415 RISCVException ret; 1416 1417 ret = rmw_mideleg64(env, csrno, &rval, new_val, wr_mask); 1418 if (ret_val) { 1419 *ret_val = rval; 1420 } 1421 1422 return ret; 1423 } 1424 1425 static RISCVException rmw_midelegh(CPURISCVState *env, int csrno, 1426 target_ulong *ret_val, 1427 target_ulong new_val, 1428 target_ulong wr_mask) 1429 { 1430 uint64_t rval; 1431 RISCVException ret; 1432 1433 ret = rmw_mideleg64(env, csrno, &rval, 1434 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 1435 if (ret_val) { 1436 *ret_val = rval >> 32; 1437 } 1438 1439 return ret; 1440 } 1441 1442 static RISCVException rmw_mie64(CPURISCVState *env, int csrno, 1443 uint64_t *ret_val, 1444 uint64_t new_val, uint64_t wr_mask) 1445 { 1446 uint64_t mask = wr_mask & all_ints; 1447 1448 if (ret_val) { 1449 *ret_val = env->mie; 1450 } 1451 1452 env->mie = (env->mie & ~mask) | (new_val & mask); 1453 1454 if (!riscv_has_ext(env, RVH)) { 1455 env->mie &= ~((uint64_t)MIP_SGEIP); 1456 } 1457 1458 return RISCV_EXCP_NONE; 1459 } 1460 1461 static RISCVException rmw_mie(CPURISCVState *env, int csrno, 1462 target_ulong *ret_val, 1463 target_ulong new_val, target_ulong wr_mask) 1464 { 1465 uint64_t rval; 1466 RISCVException ret; 1467 1468 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask); 1469 if (ret_val) { 1470 *ret_val = rval; 1471 } 1472 1473 return ret; 1474 } 1475 1476 static RISCVException rmw_mieh(CPURISCVState *env, int csrno, 1477 target_ulong *ret_val, 1478 target_ulong new_val, target_ulong wr_mask) 1479 { 1480 uint64_t rval; 1481 RISCVException ret; 1482 1483 ret = rmw_mie64(env, csrno, &rval, 1484 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 1485 if (ret_val) { 1486 *ret_val = rval >> 32; 1487 } 1488 1489 return ret; 1490 } 1491 1492 static int read_mtopi(CPURISCVState *env, int csrno, target_ulong *val) 1493 { 1494 int irq; 1495 uint8_t iprio; 1496 1497 irq = riscv_cpu_mirq_pending(env); 1498 if (irq <= 0 || irq > 63) { 1499 *val = 0; 1500 } else { 1501 iprio = env->miprio[irq]; 1502 if (!iprio) { 1503 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_M) { 1504 iprio = IPRIO_MMAXIPRIO; 1505 } 1506 } 1507 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT; 1508 *val |= iprio; 1509 } 1510 1511 return RISCV_EXCP_NONE; 1512 } 1513 1514 static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno) 1515 { 1516 if (!riscv_cpu_virt_enabled(env)) { 1517 return csrno; 1518 } 1519 1520 switch (csrno) { 1521 case CSR_SISELECT: 1522 return CSR_VSISELECT; 1523 case CSR_SIREG: 1524 return CSR_VSIREG; 1525 case CSR_STOPEI: 1526 return CSR_VSTOPEI; 1527 default: 1528 return csrno; 1529 }; 1530 } 1531 1532 static int rmw_xiselect(CPURISCVState *env, int csrno, target_ulong *val, 1533 target_ulong new_val, target_ulong wr_mask) 1534 { 1535 target_ulong *iselect; 1536 1537 /* Translate CSR number for VS-mode */ 1538 csrno = aia_xlate_vs_csrno(env, csrno); 1539 1540 /* Find the iselect CSR based on CSR number */ 1541 switch (csrno) { 1542 case CSR_MISELECT: 1543 iselect = &env->miselect; 1544 break; 1545 case CSR_SISELECT: 1546 iselect = &env->siselect; 1547 break; 1548 case CSR_VSISELECT: 1549 iselect = &env->vsiselect; 1550 break; 1551 default: 1552 return RISCV_EXCP_ILLEGAL_INST; 1553 }; 1554 1555 if (val) { 1556 *val = *iselect; 1557 } 1558 1559 wr_mask &= ISELECT_MASK; 1560 if (wr_mask) { 1561 *iselect = (*iselect & ~wr_mask) | (new_val & wr_mask); 1562 } 1563 1564 return RISCV_EXCP_NONE; 1565 } 1566 1567 static int rmw_iprio(target_ulong xlen, 1568 target_ulong iselect, uint8_t *iprio, 1569 target_ulong *val, target_ulong new_val, 1570 target_ulong wr_mask, int ext_irq_no) 1571 { 1572 int i, firq, nirqs; 1573 target_ulong old_val; 1574 1575 if (iselect < ISELECT_IPRIO0 || ISELECT_IPRIO15 < iselect) { 1576 return -EINVAL; 1577 } 1578 if (xlen != 32 && iselect & 0x1) { 1579 return -EINVAL; 1580 } 1581 1582 nirqs = 4 * (xlen / 32); 1583 firq = ((iselect - ISELECT_IPRIO0) / (xlen / 32)) * (nirqs); 1584 1585 old_val = 0; 1586 for (i = 0; i < nirqs; i++) { 1587 old_val |= ((target_ulong)iprio[firq + i]) << (IPRIO_IRQ_BITS * i); 1588 } 1589 1590 if (val) { 1591 *val = old_val; 1592 } 1593 1594 if (wr_mask) { 1595 new_val = (old_val & ~wr_mask) | (new_val & wr_mask); 1596 for (i = 0; i < nirqs; i++) { 1597 /* 1598 * M-level and S-level external IRQ priority always read-only 1599 * zero. This means default priority order is always preferred 1600 * for M-level and S-level external IRQs. 1601 */ 1602 if ((firq + i) == ext_irq_no) { 1603 continue; 1604 } 1605 iprio[firq + i] = (new_val >> (IPRIO_IRQ_BITS * i)) & 0xff; 1606 } 1607 } 1608 1609 return 0; 1610 } 1611 1612 static int rmw_xireg(CPURISCVState *env, int csrno, target_ulong *val, 1613 target_ulong new_val, target_ulong wr_mask) 1614 { 1615 bool virt; 1616 uint8_t *iprio; 1617 int ret = -EINVAL; 1618 target_ulong priv, isel, vgein; 1619 1620 /* Translate CSR number for VS-mode */ 1621 csrno = aia_xlate_vs_csrno(env, csrno); 1622 1623 /* Decode register details from CSR number */ 1624 virt = false; 1625 switch (csrno) { 1626 case CSR_MIREG: 1627 iprio = env->miprio; 1628 isel = env->miselect; 1629 priv = PRV_M; 1630 break; 1631 case CSR_SIREG: 1632 iprio = env->siprio; 1633 isel = env->siselect; 1634 priv = PRV_S; 1635 break; 1636 case CSR_VSIREG: 1637 iprio = env->hviprio; 1638 isel = env->vsiselect; 1639 priv = PRV_S; 1640 virt = true; 1641 break; 1642 default: 1643 goto done; 1644 }; 1645 1646 /* Find the selected guest interrupt file */ 1647 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0; 1648 1649 if (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) { 1650 /* Local interrupt priority registers not available for VS-mode */ 1651 if (!virt) { 1652 ret = rmw_iprio(riscv_cpu_mxl_bits(env), 1653 isel, iprio, val, new_val, wr_mask, 1654 (priv == PRV_M) ? IRQ_M_EXT : IRQ_S_EXT); 1655 } 1656 } else if (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST) { 1657 /* IMSIC registers only available when machine implements it. */ 1658 if (env->aia_ireg_rmw_fn[priv]) { 1659 /* Selected guest interrupt file should not be zero */ 1660 if (virt && (!vgein || env->geilen < vgein)) { 1661 goto done; 1662 } 1663 /* Call machine specific IMSIC register emulation */ 1664 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv], 1665 AIA_MAKE_IREG(isel, priv, virt, vgein, 1666 riscv_cpu_mxl_bits(env)), 1667 val, new_val, wr_mask); 1668 } 1669 } 1670 1671 done: 1672 if (ret) { 1673 return (riscv_cpu_virt_enabled(env) && virt) ? 1674 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; 1675 } 1676 return RISCV_EXCP_NONE; 1677 } 1678 1679 static int rmw_xtopei(CPURISCVState *env, int csrno, target_ulong *val, 1680 target_ulong new_val, target_ulong wr_mask) 1681 { 1682 bool virt; 1683 int ret = -EINVAL; 1684 target_ulong priv, vgein; 1685 1686 /* Translate CSR number for VS-mode */ 1687 csrno = aia_xlate_vs_csrno(env, csrno); 1688 1689 /* Decode register details from CSR number */ 1690 virt = false; 1691 switch (csrno) { 1692 case CSR_MTOPEI: 1693 priv = PRV_M; 1694 break; 1695 case CSR_STOPEI: 1696 priv = PRV_S; 1697 break; 1698 case CSR_VSTOPEI: 1699 priv = PRV_S; 1700 virt = true; 1701 break; 1702 default: 1703 goto done; 1704 }; 1705 1706 /* IMSIC CSRs only available when machine implements IMSIC. */ 1707 if (!env->aia_ireg_rmw_fn[priv]) { 1708 goto done; 1709 } 1710 1711 /* Find the selected guest interrupt file */ 1712 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0; 1713 1714 /* Selected guest interrupt file should be valid */ 1715 if (virt && (!vgein || env->geilen < vgein)) { 1716 goto done; 1717 } 1718 1719 /* Call machine specific IMSIC register emulation for TOPEI */ 1720 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv], 1721 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, priv, virt, vgein, 1722 riscv_cpu_mxl_bits(env)), 1723 val, new_val, wr_mask); 1724 1725 done: 1726 if (ret) { 1727 return (riscv_cpu_virt_enabled(env) && virt) ? 1728 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; 1729 } 1730 return RISCV_EXCP_NONE; 1731 } 1732 1733 static RISCVException read_mtvec(CPURISCVState *env, int csrno, 1734 target_ulong *val) 1735 { 1736 *val = env->mtvec; 1737 return RISCV_EXCP_NONE; 1738 } 1739 1740 static RISCVException write_mtvec(CPURISCVState *env, int csrno, 1741 target_ulong val) 1742 { 1743 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */ 1744 if ((val & 3) < 2) { 1745 env->mtvec = val; 1746 } else { 1747 qemu_log_mask(LOG_UNIMP, "CSR_MTVEC: reserved mode not supported\n"); 1748 } 1749 return RISCV_EXCP_NONE; 1750 } 1751 1752 static RISCVException read_mcountinhibit(CPURISCVState *env, int csrno, 1753 target_ulong *val) 1754 { 1755 *val = env->mcountinhibit; 1756 return RISCV_EXCP_NONE; 1757 } 1758 1759 static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno, 1760 target_ulong val) 1761 { 1762 int cidx; 1763 PMUCTRState *counter; 1764 1765 env->mcountinhibit = val; 1766 1767 /* Check if any other counter is also monitoring cycles/instructions */ 1768 for (cidx = 0; cidx < RV_MAX_MHPMCOUNTERS; cidx++) { 1769 if (!get_field(env->mcountinhibit, BIT(cidx))) { 1770 counter = &env->pmu_ctrs[cidx]; 1771 counter->started = true; 1772 } 1773 } 1774 1775 return RISCV_EXCP_NONE; 1776 } 1777 1778 static RISCVException read_mcounteren(CPURISCVState *env, int csrno, 1779 target_ulong *val) 1780 { 1781 *val = env->mcounteren; 1782 return RISCV_EXCP_NONE; 1783 } 1784 1785 static RISCVException write_mcounteren(CPURISCVState *env, int csrno, 1786 target_ulong val) 1787 { 1788 env->mcounteren = val; 1789 return RISCV_EXCP_NONE; 1790 } 1791 1792 /* Machine Trap Handling */ 1793 static RISCVException read_mscratch_i128(CPURISCVState *env, int csrno, 1794 Int128 *val) 1795 { 1796 *val = int128_make128(env->mscratch, env->mscratchh); 1797 return RISCV_EXCP_NONE; 1798 } 1799 1800 static RISCVException write_mscratch_i128(CPURISCVState *env, int csrno, 1801 Int128 val) 1802 { 1803 env->mscratch = int128_getlo(val); 1804 env->mscratchh = int128_gethi(val); 1805 return RISCV_EXCP_NONE; 1806 } 1807 1808 static RISCVException read_mscratch(CPURISCVState *env, int csrno, 1809 target_ulong *val) 1810 { 1811 *val = env->mscratch; 1812 return RISCV_EXCP_NONE; 1813 } 1814 1815 static RISCVException write_mscratch(CPURISCVState *env, int csrno, 1816 target_ulong val) 1817 { 1818 env->mscratch = val; 1819 return RISCV_EXCP_NONE; 1820 } 1821 1822 static RISCVException read_mepc(CPURISCVState *env, int csrno, 1823 target_ulong *val) 1824 { 1825 *val = env->mepc; 1826 return RISCV_EXCP_NONE; 1827 } 1828 1829 static RISCVException write_mepc(CPURISCVState *env, int csrno, 1830 target_ulong val) 1831 { 1832 env->mepc = val; 1833 return RISCV_EXCP_NONE; 1834 } 1835 1836 static RISCVException read_mcause(CPURISCVState *env, int csrno, 1837 target_ulong *val) 1838 { 1839 *val = env->mcause; 1840 return RISCV_EXCP_NONE; 1841 } 1842 1843 static RISCVException write_mcause(CPURISCVState *env, int csrno, 1844 target_ulong val) 1845 { 1846 env->mcause = val; 1847 return RISCV_EXCP_NONE; 1848 } 1849 1850 static RISCVException read_mtval(CPURISCVState *env, int csrno, 1851 target_ulong *val) 1852 { 1853 *val = env->mtval; 1854 return RISCV_EXCP_NONE; 1855 } 1856 1857 static RISCVException write_mtval(CPURISCVState *env, int csrno, 1858 target_ulong val) 1859 { 1860 env->mtval = val; 1861 return RISCV_EXCP_NONE; 1862 } 1863 1864 /* Execution environment configuration setup */ 1865 static RISCVException read_menvcfg(CPURISCVState *env, int csrno, 1866 target_ulong *val) 1867 { 1868 *val = env->menvcfg; 1869 return RISCV_EXCP_NONE; 1870 } 1871 1872 static RISCVException write_menvcfg(CPURISCVState *env, int csrno, 1873 target_ulong val) 1874 { 1875 uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE | MENVCFG_CBZE; 1876 1877 if (riscv_cpu_mxl(env) == MXL_RV64) { 1878 mask |= MENVCFG_PBMTE | MENVCFG_STCE; 1879 } 1880 env->menvcfg = (env->menvcfg & ~mask) | (val & mask); 1881 1882 return RISCV_EXCP_NONE; 1883 } 1884 1885 static RISCVException read_menvcfgh(CPURISCVState *env, int csrno, 1886 target_ulong *val) 1887 { 1888 *val = env->menvcfg >> 32; 1889 return RISCV_EXCP_NONE; 1890 } 1891 1892 static RISCVException write_menvcfgh(CPURISCVState *env, int csrno, 1893 target_ulong val) 1894 { 1895 uint64_t mask = MENVCFG_PBMTE | MENVCFG_STCE; 1896 uint64_t valh = (uint64_t)val << 32; 1897 1898 env->menvcfg = (env->menvcfg & ~mask) | (valh & mask); 1899 1900 return RISCV_EXCP_NONE; 1901 } 1902 1903 static RISCVException read_senvcfg(CPURISCVState *env, int csrno, 1904 target_ulong *val) 1905 { 1906 RISCVException ret; 1907 1908 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG); 1909 if (ret != RISCV_EXCP_NONE) { 1910 return ret; 1911 } 1912 1913 *val = env->senvcfg; 1914 return RISCV_EXCP_NONE; 1915 } 1916 1917 static RISCVException write_senvcfg(CPURISCVState *env, int csrno, 1918 target_ulong val) 1919 { 1920 uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE; 1921 RISCVException ret; 1922 1923 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG); 1924 if (ret != RISCV_EXCP_NONE) { 1925 return ret; 1926 } 1927 1928 env->senvcfg = (env->senvcfg & ~mask) | (val & mask); 1929 return RISCV_EXCP_NONE; 1930 } 1931 1932 static RISCVException read_henvcfg(CPURISCVState *env, int csrno, 1933 target_ulong *val) 1934 { 1935 RISCVException ret; 1936 1937 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG); 1938 if (ret != RISCV_EXCP_NONE) { 1939 return ret; 1940 } 1941 1942 *val = env->henvcfg; 1943 return RISCV_EXCP_NONE; 1944 } 1945 1946 static RISCVException write_henvcfg(CPURISCVState *env, int csrno, 1947 target_ulong val) 1948 { 1949 uint64_t mask = HENVCFG_FIOM | HENVCFG_CBIE | HENVCFG_CBCFE | HENVCFG_CBZE; 1950 RISCVException ret; 1951 1952 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG); 1953 if (ret != RISCV_EXCP_NONE) { 1954 return ret; 1955 } 1956 1957 if (riscv_cpu_mxl(env) == MXL_RV64) { 1958 mask |= HENVCFG_PBMTE | HENVCFG_STCE; 1959 } 1960 1961 env->henvcfg = (env->henvcfg & ~mask) | (val & mask); 1962 1963 return RISCV_EXCP_NONE; 1964 } 1965 1966 static RISCVException read_henvcfgh(CPURISCVState *env, int csrno, 1967 target_ulong *val) 1968 { 1969 RISCVException ret; 1970 1971 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG); 1972 if (ret != RISCV_EXCP_NONE) { 1973 return ret; 1974 } 1975 1976 *val = env->henvcfg >> 32; 1977 return RISCV_EXCP_NONE; 1978 } 1979 1980 static RISCVException write_henvcfgh(CPURISCVState *env, int csrno, 1981 target_ulong val) 1982 { 1983 uint64_t mask = HENVCFG_PBMTE | HENVCFG_STCE; 1984 uint64_t valh = (uint64_t)val << 32; 1985 RISCVException ret; 1986 1987 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG); 1988 if (ret != RISCV_EXCP_NONE) { 1989 return ret; 1990 } 1991 1992 env->henvcfg = (env->henvcfg & ~mask) | (valh & mask); 1993 return RISCV_EXCP_NONE; 1994 } 1995 1996 static RISCVException read_mstateen(CPURISCVState *env, int csrno, 1997 target_ulong *val) 1998 { 1999 *val = env->mstateen[csrno - CSR_MSTATEEN0]; 2000 2001 return RISCV_EXCP_NONE; 2002 } 2003 2004 static RISCVException write_mstateen(CPURISCVState *env, int csrno, 2005 uint64_t wr_mask, target_ulong new_val) 2006 { 2007 uint64_t *reg; 2008 2009 reg = &env->mstateen[csrno - CSR_MSTATEEN0]; 2010 *reg = (*reg & ~wr_mask) | (new_val & wr_mask); 2011 2012 return RISCV_EXCP_NONE; 2013 } 2014 2015 static RISCVException write_mstateen0(CPURISCVState *env, int csrno, 2016 target_ulong new_val) 2017 { 2018 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; 2019 2020 return write_mstateen(env, csrno, wr_mask, new_val); 2021 } 2022 2023 static RISCVException write_mstateen_1_3(CPURISCVState *env, int csrno, 2024 target_ulong new_val) 2025 { 2026 return write_mstateen(env, csrno, SMSTATEEN_STATEEN, new_val); 2027 } 2028 2029 static RISCVException read_mstateenh(CPURISCVState *env, int csrno, 2030 target_ulong *val) 2031 { 2032 *val = env->mstateen[csrno - CSR_MSTATEEN0H] >> 32; 2033 2034 return RISCV_EXCP_NONE; 2035 } 2036 2037 static RISCVException write_mstateenh(CPURISCVState *env, int csrno, 2038 uint64_t wr_mask, target_ulong new_val) 2039 { 2040 uint64_t *reg, val; 2041 2042 reg = &env->mstateen[csrno - CSR_MSTATEEN0H]; 2043 val = (uint64_t)new_val << 32; 2044 val |= *reg & 0xFFFFFFFF; 2045 *reg = (*reg & ~wr_mask) | (val & wr_mask); 2046 2047 return RISCV_EXCP_NONE; 2048 } 2049 2050 static RISCVException write_mstateen0h(CPURISCVState *env, int csrno, 2051 target_ulong new_val) 2052 { 2053 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; 2054 2055 return write_mstateenh(env, csrno, wr_mask, new_val); 2056 } 2057 2058 static RISCVException write_mstateenh_1_3(CPURISCVState *env, int csrno, 2059 target_ulong new_val) 2060 { 2061 return write_mstateenh(env, csrno, SMSTATEEN_STATEEN, new_val); 2062 } 2063 2064 static RISCVException read_hstateen(CPURISCVState *env, int csrno, 2065 target_ulong *val) 2066 { 2067 int index = csrno - CSR_HSTATEEN0; 2068 2069 *val = env->hstateen[index] & env->mstateen[index]; 2070 2071 return RISCV_EXCP_NONE; 2072 } 2073 2074 static RISCVException write_hstateen(CPURISCVState *env, int csrno, 2075 uint64_t mask, target_ulong new_val) 2076 { 2077 int index = csrno - CSR_HSTATEEN0; 2078 uint64_t *reg, wr_mask; 2079 2080 reg = &env->hstateen[index]; 2081 wr_mask = env->mstateen[index] & mask; 2082 *reg = (*reg & ~wr_mask) | (new_val & wr_mask); 2083 2084 return RISCV_EXCP_NONE; 2085 } 2086 2087 static RISCVException write_hstateen0(CPURISCVState *env, int csrno, 2088 target_ulong new_val) 2089 { 2090 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; 2091 2092 return write_hstateen(env, csrno, wr_mask, new_val); 2093 } 2094 2095 static RISCVException write_hstateen_1_3(CPURISCVState *env, int csrno, 2096 target_ulong new_val) 2097 { 2098 return write_hstateen(env, csrno, SMSTATEEN_STATEEN, new_val); 2099 } 2100 2101 static RISCVException read_hstateenh(CPURISCVState *env, int csrno, 2102 target_ulong *val) 2103 { 2104 int index = csrno - CSR_HSTATEEN0H; 2105 2106 *val = (env->hstateen[index] >> 32) & (env->mstateen[index] >> 32); 2107 2108 return RISCV_EXCP_NONE; 2109 } 2110 2111 static RISCVException write_hstateenh(CPURISCVState *env, int csrno, 2112 uint64_t mask, target_ulong new_val) 2113 { 2114 int index = csrno - CSR_HSTATEEN0H; 2115 uint64_t *reg, wr_mask, val; 2116 2117 reg = &env->hstateen[index]; 2118 val = (uint64_t)new_val << 32; 2119 val |= *reg & 0xFFFFFFFF; 2120 wr_mask = env->mstateen[index] & mask; 2121 *reg = (*reg & ~wr_mask) | (val & wr_mask); 2122 2123 return RISCV_EXCP_NONE; 2124 } 2125 2126 static RISCVException write_hstateen0h(CPURISCVState *env, int csrno, 2127 target_ulong new_val) 2128 { 2129 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; 2130 2131 return write_hstateenh(env, csrno, wr_mask, new_val); 2132 } 2133 2134 static RISCVException write_hstateenh_1_3(CPURISCVState *env, int csrno, 2135 target_ulong new_val) 2136 { 2137 return write_hstateenh(env, csrno, SMSTATEEN_STATEEN, new_val); 2138 } 2139 2140 static RISCVException read_sstateen(CPURISCVState *env, int csrno, 2141 target_ulong *val) 2142 { 2143 bool virt = riscv_cpu_virt_enabled(env); 2144 int index = csrno - CSR_SSTATEEN0; 2145 2146 *val = env->sstateen[index] & env->mstateen[index]; 2147 if (virt) { 2148 *val &= env->hstateen[index]; 2149 } 2150 2151 return RISCV_EXCP_NONE; 2152 } 2153 2154 static RISCVException write_sstateen(CPURISCVState *env, int csrno, 2155 uint64_t mask, target_ulong new_val) 2156 { 2157 bool virt = riscv_cpu_virt_enabled(env); 2158 int index = csrno - CSR_SSTATEEN0; 2159 uint64_t wr_mask; 2160 uint64_t *reg; 2161 2162 wr_mask = env->mstateen[index] & mask; 2163 if (virt) { 2164 wr_mask &= env->hstateen[index]; 2165 } 2166 2167 reg = &env->sstateen[index]; 2168 *reg = (*reg & ~wr_mask) | (new_val & wr_mask); 2169 2170 return RISCV_EXCP_NONE; 2171 } 2172 2173 static RISCVException write_sstateen0(CPURISCVState *env, int csrno, 2174 target_ulong new_val) 2175 { 2176 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; 2177 2178 return write_sstateen(env, csrno, wr_mask, new_val); 2179 } 2180 2181 static RISCVException write_sstateen_1_3(CPURISCVState *env, int csrno, 2182 target_ulong new_val) 2183 { 2184 return write_sstateen(env, csrno, SMSTATEEN_STATEEN, new_val); 2185 } 2186 2187 static RISCVException rmw_mip64(CPURISCVState *env, int csrno, 2188 uint64_t *ret_val, 2189 uint64_t new_val, uint64_t wr_mask) 2190 { 2191 RISCVCPU *cpu = env_archcpu(env); 2192 uint64_t old_mip, mask = wr_mask & delegable_ints; 2193 uint32_t gin; 2194 2195 if (mask & MIP_SEIP) { 2196 env->software_seip = new_val & MIP_SEIP; 2197 new_val |= env->external_seip * MIP_SEIP; 2198 } 2199 2200 if (cpu->cfg.ext_sstc && (env->priv == PRV_M) && 2201 get_field(env->menvcfg, MENVCFG_STCE)) { 2202 /* sstc extension forbids STIP & VSTIP to be writeable in mip */ 2203 mask = mask & ~(MIP_STIP | MIP_VSTIP); 2204 } 2205 2206 if (mask) { 2207 old_mip = riscv_cpu_update_mip(cpu, mask, (new_val & mask)); 2208 } else { 2209 old_mip = env->mip; 2210 } 2211 2212 if (csrno != CSR_HVIP) { 2213 gin = get_field(env->hstatus, HSTATUS_VGEIN); 2214 old_mip |= (env->hgeip & ((target_ulong)1 << gin)) ? MIP_VSEIP : 0; 2215 old_mip |= env->vstime_irq ? MIP_VSTIP : 0; 2216 } 2217 2218 if (ret_val) { 2219 *ret_val = old_mip; 2220 } 2221 2222 return RISCV_EXCP_NONE; 2223 } 2224 2225 static RISCVException rmw_mip(CPURISCVState *env, int csrno, 2226 target_ulong *ret_val, 2227 target_ulong new_val, target_ulong wr_mask) 2228 { 2229 uint64_t rval; 2230 RISCVException ret; 2231 2232 ret = rmw_mip64(env, csrno, &rval, new_val, wr_mask); 2233 if (ret_val) { 2234 *ret_val = rval; 2235 } 2236 2237 return ret; 2238 } 2239 2240 static RISCVException rmw_miph(CPURISCVState *env, int csrno, 2241 target_ulong *ret_val, 2242 target_ulong new_val, target_ulong wr_mask) 2243 { 2244 uint64_t rval; 2245 RISCVException ret; 2246 2247 ret = rmw_mip64(env, csrno, &rval, 2248 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 2249 if (ret_val) { 2250 *ret_val = rval >> 32; 2251 } 2252 2253 return ret; 2254 } 2255 2256 /* Supervisor Trap Setup */ 2257 static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno, 2258 Int128 *val) 2259 { 2260 uint64_t mask = sstatus_v1_10_mask; 2261 uint64_t sstatus = env->mstatus & mask; 2262 if (env->xl != MXL_RV32 || env->debugger) { 2263 mask |= SSTATUS64_UXL; 2264 } 2265 2266 *val = int128_make128(sstatus, add_status_sd(MXL_RV128, sstatus)); 2267 return RISCV_EXCP_NONE; 2268 } 2269 2270 static RISCVException read_sstatus(CPURISCVState *env, int csrno, 2271 target_ulong *val) 2272 { 2273 target_ulong mask = (sstatus_v1_10_mask); 2274 if (env->xl != MXL_RV32 || env->debugger) { 2275 mask |= SSTATUS64_UXL; 2276 } 2277 /* TODO: Use SXL not MXL. */ 2278 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask); 2279 return RISCV_EXCP_NONE; 2280 } 2281 2282 static RISCVException write_sstatus(CPURISCVState *env, int csrno, 2283 target_ulong val) 2284 { 2285 target_ulong mask = (sstatus_v1_10_mask); 2286 2287 if (env->xl != MXL_RV32 || env->debugger) { 2288 if ((val & SSTATUS64_UXL) != 0) { 2289 mask |= SSTATUS64_UXL; 2290 } 2291 } 2292 target_ulong newval = (env->mstatus & ~mask) | (val & mask); 2293 return write_mstatus(env, CSR_MSTATUS, newval); 2294 } 2295 2296 static RISCVException rmw_vsie64(CPURISCVState *env, int csrno, 2297 uint64_t *ret_val, 2298 uint64_t new_val, uint64_t wr_mask) 2299 { 2300 RISCVException ret; 2301 uint64_t rval, mask = env->hideleg & VS_MODE_INTERRUPTS; 2302 2303 /* Bring VS-level bits to correct position */ 2304 new_val = (new_val & (VS_MODE_INTERRUPTS >> 1)) << 1; 2305 wr_mask = (wr_mask & (VS_MODE_INTERRUPTS >> 1)) << 1; 2306 2307 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & mask); 2308 if (ret_val) { 2309 *ret_val = (rval & mask) >> 1; 2310 } 2311 2312 return ret; 2313 } 2314 2315 static RISCVException rmw_vsie(CPURISCVState *env, int csrno, 2316 target_ulong *ret_val, 2317 target_ulong new_val, target_ulong wr_mask) 2318 { 2319 uint64_t rval; 2320 RISCVException ret; 2321 2322 ret = rmw_vsie64(env, csrno, &rval, new_val, wr_mask); 2323 if (ret_val) { 2324 *ret_val = rval; 2325 } 2326 2327 return ret; 2328 } 2329 2330 static RISCVException rmw_vsieh(CPURISCVState *env, int csrno, 2331 target_ulong *ret_val, 2332 target_ulong new_val, target_ulong wr_mask) 2333 { 2334 uint64_t rval; 2335 RISCVException ret; 2336 2337 ret = rmw_vsie64(env, csrno, &rval, 2338 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 2339 if (ret_val) { 2340 *ret_val = rval >> 32; 2341 } 2342 2343 return ret; 2344 } 2345 2346 static RISCVException rmw_sie64(CPURISCVState *env, int csrno, 2347 uint64_t *ret_val, 2348 uint64_t new_val, uint64_t wr_mask) 2349 { 2350 RISCVException ret; 2351 uint64_t mask = env->mideleg & S_MODE_INTERRUPTS; 2352 2353 if (riscv_cpu_virt_enabled(env)) { 2354 if (env->hvictl & HVICTL_VTI) { 2355 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 2356 } 2357 ret = rmw_vsie64(env, CSR_VSIE, ret_val, new_val, wr_mask); 2358 } else { 2359 ret = rmw_mie64(env, csrno, ret_val, new_val, wr_mask & mask); 2360 } 2361 2362 if (ret_val) { 2363 *ret_val &= mask; 2364 } 2365 2366 return ret; 2367 } 2368 2369 static RISCVException rmw_sie(CPURISCVState *env, int csrno, 2370 target_ulong *ret_val, 2371 target_ulong new_val, target_ulong wr_mask) 2372 { 2373 uint64_t rval; 2374 RISCVException ret; 2375 2376 ret = rmw_sie64(env, csrno, &rval, new_val, wr_mask); 2377 if (ret == RISCV_EXCP_NONE && ret_val) { 2378 *ret_val = rval; 2379 } 2380 2381 return ret; 2382 } 2383 2384 static RISCVException rmw_sieh(CPURISCVState *env, int csrno, 2385 target_ulong *ret_val, 2386 target_ulong new_val, target_ulong wr_mask) 2387 { 2388 uint64_t rval; 2389 RISCVException ret; 2390 2391 ret = rmw_sie64(env, csrno, &rval, 2392 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 2393 if (ret_val) { 2394 *ret_val = rval >> 32; 2395 } 2396 2397 return ret; 2398 } 2399 2400 static RISCVException read_stvec(CPURISCVState *env, int csrno, 2401 target_ulong *val) 2402 { 2403 *val = env->stvec; 2404 return RISCV_EXCP_NONE; 2405 } 2406 2407 static RISCVException write_stvec(CPURISCVState *env, int csrno, 2408 target_ulong val) 2409 { 2410 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */ 2411 if ((val & 3) < 2) { 2412 env->stvec = val; 2413 } else { 2414 qemu_log_mask(LOG_UNIMP, "CSR_STVEC: reserved mode not supported\n"); 2415 } 2416 return RISCV_EXCP_NONE; 2417 } 2418 2419 static RISCVException read_scounteren(CPURISCVState *env, int csrno, 2420 target_ulong *val) 2421 { 2422 *val = env->scounteren; 2423 return RISCV_EXCP_NONE; 2424 } 2425 2426 static RISCVException write_scounteren(CPURISCVState *env, int csrno, 2427 target_ulong val) 2428 { 2429 env->scounteren = val; 2430 return RISCV_EXCP_NONE; 2431 } 2432 2433 /* Supervisor Trap Handling */ 2434 static RISCVException read_sscratch_i128(CPURISCVState *env, int csrno, 2435 Int128 *val) 2436 { 2437 *val = int128_make128(env->sscratch, env->sscratchh); 2438 return RISCV_EXCP_NONE; 2439 } 2440 2441 static RISCVException write_sscratch_i128(CPURISCVState *env, int csrno, 2442 Int128 val) 2443 { 2444 env->sscratch = int128_getlo(val); 2445 env->sscratchh = int128_gethi(val); 2446 return RISCV_EXCP_NONE; 2447 } 2448 2449 static RISCVException read_sscratch(CPURISCVState *env, int csrno, 2450 target_ulong *val) 2451 { 2452 *val = env->sscratch; 2453 return RISCV_EXCP_NONE; 2454 } 2455 2456 static RISCVException write_sscratch(CPURISCVState *env, int csrno, 2457 target_ulong val) 2458 { 2459 env->sscratch = val; 2460 return RISCV_EXCP_NONE; 2461 } 2462 2463 static RISCVException read_sepc(CPURISCVState *env, int csrno, 2464 target_ulong *val) 2465 { 2466 *val = env->sepc; 2467 return RISCV_EXCP_NONE; 2468 } 2469 2470 static RISCVException write_sepc(CPURISCVState *env, int csrno, 2471 target_ulong val) 2472 { 2473 env->sepc = val; 2474 return RISCV_EXCP_NONE; 2475 } 2476 2477 static RISCVException read_scause(CPURISCVState *env, int csrno, 2478 target_ulong *val) 2479 { 2480 *val = env->scause; 2481 return RISCV_EXCP_NONE; 2482 } 2483 2484 static RISCVException write_scause(CPURISCVState *env, int csrno, 2485 target_ulong val) 2486 { 2487 env->scause = val; 2488 return RISCV_EXCP_NONE; 2489 } 2490 2491 static RISCVException read_stval(CPURISCVState *env, int csrno, 2492 target_ulong *val) 2493 { 2494 *val = env->stval; 2495 return RISCV_EXCP_NONE; 2496 } 2497 2498 static RISCVException write_stval(CPURISCVState *env, int csrno, 2499 target_ulong val) 2500 { 2501 env->stval = val; 2502 return RISCV_EXCP_NONE; 2503 } 2504 2505 static RISCVException rmw_vsip64(CPURISCVState *env, int csrno, 2506 uint64_t *ret_val, 2507 uint64_t new_val, uint64_t wr_mask) 2508 { 2509 RISCVException ret; 2510 uint64_t rval, mask = env->hideleg & VS_MODE_INTERRUPTS; 2511 2512 /* Bring VS-level bits to correct position */ 2513 new_val = (new_val & (VS_MODE_INTERRUPTS >> 1)) << 1; 2514 wr_mask = (wr_mask & (VS_MODE_INTERRUPTS >> 1)) << 1; 2515 2516 ret = rmw_mip64(env, csrno, &rval, new_val, 2517 wr_mask & mask & vsip_writable_mask); 2518 if (ret_val) { 2519 *ret_val = (rval & mask) >> 1; 2520 } 2521 2522 return ret; 2523 } 2524 2525 static RISCVException rmw_vsip(CPURISCVState *env, int csrno, 2526 target_ulong *ret_val, 2527 target_ulong new_val, target_ulong wr_mask) 2528 { 2529 uint64_t rval; 2530 RISCVException ret; 2531 2532 ret = rmw_vsip64(env, csrno, &rval, new_val, wr_mask); 2533 if (ret_val) { 2534 *ret_val = rval; 2535 } 2536 2537 return ret; 2538 } 2539 2540 static RISCVException rmw_vsiph(CPURISCVState *env, int csrno, 2541 target_ulong *ret_val, 2542 target_ulong new_val, target_ulong wr_mask) 2543 { 2544 uint64_t rval; 2545 RISCVException ret; 2546 2547 ret = rmw_vsip64(env, csrno, &rval, 2548 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 2549 if (ret_val) { 2550 *ret_val = rval >> 32; 2551 } 2552 2553 return ret; 2554 } 2555 2556 static RISCVException rmw_sip64(CPURISCVState *env, int csrno, 2557 uint64_t *ret_val, 2558 uint64_t new_val, uint64_t wr_mask) 2559 { 2560 RISCVException ret; 2561 uint64_t mask = env->mideleg & sip_writable_mask; 2562 2563 if (riscv_cpu_virt_enabled(env)) { 2564 if (env->hvictl & HVICTL_VTI) { 2565 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 2566 } 2567 ret = rmw_vsip64(env, CSR_VSIP, ret_val, new_val, wr_mask); 2568 } else { 2569 ret = rmw_mip64(env, csrno, ret_val, new_val, wr_mask & mask); 2570 } 2571 2572 if (ret_val) { 2573 *ret_val &= env->mideleg & S_MODE_INTERRUPTS; 2574 } 2575 2576 return ret; 2577 } 2578 2579 static RISCVException rmw_sip(CPURISCVState *env, int csrno, 2580 target_ulong *ret_val, 2581 target_ulong new_val, target_ulong wr_mask) 2582 { 2583 uint64_t rval; 2584 RISCVException ret; 2585 2586 ret = rmw_sip64(env, csrno, &rval, new_val, wr_mask); 2587 if (ret_val) { 2588 *ret_val = rval; 2589 } 2590 2591 return ret; 2592 } 2593 2594 static RISCVException rmw_siph(CPURISCVState *env, int csrno, 2595 target_ulong *ret_val, 2596 target_ulong new_val, target_ulong wr_mask) 2597 { 2598 uint64_t rval; 2599 RISCVException ret; 2600 2601 ret = rmw_sip64(env, csrno, &rval, 2602 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 2603 if (ret_val) { 2604 *ret_val = rval >> 32; 2605 } 2606 2607 return ret; 2608 } 2609 2610 /* Supervisor Protection and Translation */ 2611 static RISCVException read_satp(CPURISCVState *env, int csrno, 2612 target_ulong *val) 2613 { 2614 if (!riscv_cpu_cfg(env)->mmu) { 2615 *val = 0; 2616 return RISCV_EXCP_NONE; 2617 } 2618 2619 if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) { 2620 return RISCV_EXCP_ILLEGAL_INST; 2621 } else { 2622 *val = env->satp; 2623 } 2624 2625 return RISCV_EXCP_NONE; 2626 } 2627 2628 static RISCVException write_satp(CPURISCVState *env, int csrno, 2629 target_ulong val) 2630 { 2631 target_ulong vm, mask; 2632 2633 if (!riscv_cpu_cfg(env)->mmu) { 2634 return RISCV_EXCP_NONE; 2635 } 2636 2637 if (riscv_cpu_mxl(env) == MXL_RV32) { 2638 vm = validate_vm(env, get_field(val, SATP32_MODE)); 2639 mask = (val ^ env->satp) & (SATP32_MODE | SATP32_ASID | SATP32_PPN); 2640 } else { 2641 vm = validate_vm(env, get_field(val, SATP64_MODE)); 2642 mask = (val ^ env->satp) & (SATP64_MODE | SATP64_ASID | SATP64_PPN); 2643 } 2644 2645 if (vm && mask) { 2646 if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) { 2647 return RISCV_EXCP_ILLEGAL_INST; 2648 } else { 2649 /* 2650 * The ISA defines SATP.MODE=Bare as "no translation", but we still 2651 * pass these through QEMU's TLB emulation as it improves 2652 * performance. Flushing the TLB on SATP writes with paging 2653 * enabled avoids leaking those invalid cached mappings. 2654 */ 2655 tlb_flush(env_cpu(env)); 2656 env->satp = val; 2657 } 2658 } 2659 return RISCV_EXCP_NONE; 2660 } 2661 2662 static int read_vstopi(CPURISCVState *env, int csrno, target_ulong *val) 2663 { 2664 int irq, ret; 2665 target_ulong topei; 2666 uint64_t vseip, vsgein; 2667 uint32_t iid, iprio, hviid, hviprio, gein; 2668 uint32_t s, scount = 0, siid[VSTOPI_NUM_SRCS], siprio[VSTOPI_NUM_SRCS]; 2669 2670 gein = get_field(env->hstatus, HSTATUS_VGEIN); 2671 hviid = get_field(env->hvictl, HVICTL_IID); 2672 hviprio = get_field(env->hvictl, HVICTL_IPRIO); 2673 2674 if (gein) { 2675 vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0; 2676 vseip = env->mie & (env->mip | vsgein) & MIP_VSEIP; 2677 if (gein <= env->geilen && vseip) { 2678 siid[scount] = IRQ_S_EXT; 2679 siprio[scount] = IPRIO_MMAXIPRIO + 1; 2680 if (env->aia_ireg_rmw_fn[PRV_S]) { 2681 /* 2682 * Call machine specific IMSIC register emulation for 2683 * reading TOPEI. 2684 */ 2685 ret = env->aia_ireg_rmw_fn[PRV_S]( 2686 env->aia_ireg_rmw_fn_arg[PRV_S], 2687 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, PRV_S, true, gein, 2688 riscv_cpu_mxl_bits(env)), 2689 &topei, 0, 0); 2690 if (!ret && topei) { 2691 siprio[scount] = topei & IMSIC_TOPEI_IPRIO_MASK; 2692 } 2693 } 2694 scount++; 2695 } 2696 } else { 2697 if (hviid == IRQ_S_EXT && hviprio) { 2698 siid[scount] = IRQ_S_EXT; 2699 siprio[scount] = hviprio; 2700 scount++; 2701 } 2702 } 2703 2704 if (env->hvictl & HVICTL_VTI) { 2705 if (hviid != IRQ_S_EXT) { 2706 siid[scount] = hviid; 2707 siprio[scount] = hviprio; 2708 scount++; 2709 } 2710 } else { 2711 irq = riscv_cpu_vsirq_pending(env); 2712 if (irq != IRQ_S_EXT && 0 < irq && irq <= 63) { 2713 siid[scount] = irq; 2714 siprio[scount] = env->hviprio[irq]; 2715 scount++; 2716 } 2717 } 2718 2719 iid = 0; 2720 iprio = UINT_MAX; 2721 for (s = 0; s < scount; s++) { 2722 if (siprio[s] < iprio) { 2723 iid = siid[s]; 2724 iprio = siprio[s]; 2725 } 2726 } 2727 2728 if (iid) { 2729 if (env->hvictl & HVICTL_IPRIOM) { 2730 if (iprio > IPRIO_MMAXIPRIO) { 2731 iprio = IPRIO_MMAXIPRIO; 2732 } 2733 if (!iprio) { 2734 if (riscv_cpu_default_priority(iid) > IPRIO_DEFAULT_S) { 2735 iprio = IPRIO_MMAXIPRIO; 2736 } 2737 } 2738 } else { 2739 iprio = 1; 2740 } 2741 } else { 2742 iprio = 0; 2743 } 2744 2745 *val = (iid & TOPI_IID_MASK) << TOPI_IID_SHIFT; 2746 *val |= iprio; 2747 return RISCV_EXCP_NONE; 2748 } 2749 2750 static int read_stopi(CPURISCVState *env, int csrno, target_ulong *val) 2751 { 2752 int irq; 2753 uint8_t iprio; 2754 2755 if (riscv_cpu_virt_enabled(env)) { 2756 return read_vstopi(env, CSR_VSTOPI, val); 2757 } 2758 2759 irq = riscv_cpu_sirq_pending(env); 2760 if (irq <= 0 || irq > 63) { 2761 *val = 0; 2762 } else { 2763 iprio = env->siprio[irq]; 2764 if (!iprio) { 2765 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_S) { 2766 iprio = IPRIO_MMAXIPRIO; 2767 } 2768 } 2769 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT; 2770 *val |= iprio; 2771 } 2772 2773 return RISCV_EXCP_NONE; 2774 } 2775 2776 /* Hypervisor Extensions */ 2777 static RISCVException read_hstatus(CPURISCVState *env, int csrno, 2778 target_ulong *val) 2779 { 2780 *val = env->hstatus; 2781 if (riscv_cpu_mxl(env) != MXL_RV32) { 2782 /* We only support 64-bit VSXL */ 2783 *val = set_field(*val, HSTATUS_VSXL, 2); 2784 } 2785 /* We only support little endian */ 2786 *val = set_field(*val, HSTATUS_VSBE, 0); 2787 return RISCV_EXCP_NONE; 2788 } 2789 2790 static RISCVException write_hstatus(CPURISCVState *env, int csrno, 2791 target_ulong val) 2792 { 2793 env->hstatus = val; 2794 if (riscv_cpu_mxl(env) != MXL_RV32 && get_field(val, HSTATUS_VSXL) != 2) { 2795 qemu_log_mask(LOG_UNIMP, "QEMU does not support mixed HSXLEN options."); 2796 } 2797 if (get_field(val, HSTATUS_VSBE) != 0) { 2798 qemu_log_mask(LOG_UNIMP, "QEMU does not support big endian guests."); 2799 } 2800 return RISCV_EXCP_NONE; 2801 } 2802 2803 static RISCVException read_hedeleg(CPURISCVState *env, int csrno, 2804 target_ulong *val) 2805 { 2806 *val = env->hedeleg; 2807 return RISCV_EXCP_NONE; 2808 } 2809 2810 static RISCVException write_hedeleg(CPURISCVState *env, int csrno, 2811 target_ulong val) 2812 { 2813 env->hedeleg = val & vs_delegable_excps; 2814 return RISCV_EXCP_NONE; 2815 } 2816 2817 static RISCVException rmw_hideleg64(CPURISCVState *env, int csrno, 2818 uint64_t *ret_val, 2819 uint64_t new_val, uint64_t wr_mask) 2820 { 2821 uint64_t mask = wr_mask & vs_delegable_ints; 2822 2823 if (ret_val) { 2824 *ret_val = env->hideleg & vs_delegable_ints; 2825 } 2826 2827 env->hideleg = (env->hideleg & ~mask) | (new_val & mask); 2828 return RISCV_EXCP_NONE; 2829 } 2830 2831 static RISCVException rmw_hideleg(CPURISCVState *env, int csrno, 2832 target_ulong *ret_val, 2833 target_ulong new_val, target_ulong wr_mask) 2834 { 2835 uint64_t rval; 2836 RISCVException ret; 2837 2838 ret = rmw_hideleg64(env, csrno, &rval, new_val, wr_mask); 2839 if (ret_val) { 2840 *ret_val = rval; 2841 } 2842 2843 return ret; 2844 } 2845 2846 static RISCVException rmw_hidelegh(CPURISCVState *env, int csrno, 2847 target_ulong *ret_val, 2848 target_ulong new_val, target_ulong wr_mask) 2849 { 2850 uint64_t rval; 2851 RISCVException ret; 2852 2853 ret = rmw_hideleg64(env, csrno, &rval, 2854 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 2855 if (ret_val) { 2856 *ret_val = rval >> 32; 2857 } 2858 2859 return ret; 2860 } 2861 2862 static RISCVException rmw_hvip64(CPURISCVState *env, int csrno, 2863 uint64_t *ret_val, 2864 uint64_t new_val, uint64_t wr_mask) 2865 { 2866 RISCVException ret; 2867 2868 ret = rmw_mip64(env, csrno, ret_val, new_val, 2869 wr_mask & hvip_writable_mask); 2870 if (ret_val) { 2871 *ret_val &= VS_MODE_INTERRUPTS; 2872 } 2873 2874 return ret; 2875 } 2876 2877 static RISCVException rmw_hvip(CPURISCVState *env, int csrno, 2878 target_ulong *ret_val, 2879 target_ulong new_val, target_ulong wr_mask) 2880 { 2881 uint64_t rval; 2882 RISCVException ret; 2883 2884 ret = rmw_hvip64(env, csrno, &rval, new_val, wr_mask); 2885 if (ret_val) { 2886 *ret_val = rval; 2887 } 2888 2889 return ret; 2890 } 2891 2892 static RISCVException rmw_hviph(CPURISCVState *env, int csrno, 2893 target_ulong *ret_val, 2894 target_ulong new_val, target_ulong wr_mask) 2895 { 2896 uint64_t rval; 2897 RISCVException ret; 2898 2899 ret = rmw_hvip64(env, csrno, &rval, 2900 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 2901 if (ret_val) { 2902 *ret_val = rval >> 32; 2903 } 2904 2905 return ret; 2906 } 2907 2908 static RISCVException rmw_hip(CPURISCVState *env, int csrno, 2909 target_ulong *ret_value, 2910 target_ulong new_value, target_ulong write_mask) 2911 { 2912 int ret = rmw_mip(env, csrno, ret_value, new_value, 2913 write_mask & hip_writable_mask); 2914 2915 if (ret_value) { 2916 *ret_value &= HS_MODE_INTERRUPTS; 2917 } 2918 return ret; 2919 } 2920 2921 static RISCVException rmw_hie(CPURISCVState *env, int csrno, 2922 target_ulong *ret_val, 2923 target_ulong new_val, target_ulong wr_mask) 2924 { 2925 uint64_t rval; 2926 RISCVException ret; 2927 2928 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & HS_MODE_INTERRUPTS); 2929 if (ret_val) { 2930 *ret_val = rval & HS_MODE_INTERRUPTS; 2931 } 2932 2933 return ret; 2934 } 2935 2936 static RISCVException read_hcounteren(CPURISCVState *env, int csrno, 2937 target_ulong *val) 2938 { 2939 *val = env->hcounteren; 2940 return RISCV_EXCP_NONE; 2941 } 2942 2943 static RISCVException write_hcounteren(CPURISCVState *env, int csrno, 2944 target_ulong val) 2945 { 2946 env->hcounteren = val; 2947 return RISCV_EXCP_NONE; 2948 } 2949 2950 static RISCVException read_hgeie(CPURISCVState *env, int csrno, 2951 target_ulong *val) 2952 { 2953 if (val) { 2954 *val = env->hgeie; 2955 } 2956 return RISCV_EXCP_NONE; 2957 } 2958 2959 static RISCVException write_hgeie(CPURISCVState *env, int csrno, 2960 target_ulong val) 2961 { 2962 /* Only GEILEN:1 bits implemented and BIT0 is never implemented */ 2963 val &= ((((target_ulong)1) << env->geilen) - 1) << 1; 2964 env->hgeie = val; 2965 /* Update mip.SGEIP bit */ 2966 riscv_cpu_update_mip(env_archcpu(env), MIP_SGEIP, 2967 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 2968 return RISCV_EXCP_NONE; 2969 } 2970 2971 static RISCVException read_htval(CPURISCVState *env, int csrno, 2972 target_ulong *val) 2973 { 2974 *val = env->htval; 2975 return RISCV_EXCP_NONE; 2976 } 2977 2978 static RISCVException write_htval(CPURISCVState *env, int csrno, 2979 target_ulong val) 2980 { 2981 env->htval = val; 2982 return RISCV_EXCP_NONE; 2983 } 2984 2985 static RISCVException read_htinst(CPURISCVState *env, int csrno, 2986 target_ulong *val) 2987 { 2988 *val = env->htinst; 2989 return RISCV_EXCP_NONE; 2990 } 2991 2992 static RISCVException write_htinst(CPURISCVState *env, int csrno, 2993 target_ulong val) 2994 { 2995 return RISCV_EXCP_NONE; 2996 } 2997 2998 static RISCVException read_hgeip(CPURISCVState *env, int csrno, 2999 target_ulong *val) 3000 { 3001 if (val) { 3002 *val = env->hgeip; 3003 } 3004 return RISCV_EXCP_NONE; 3005 } 3006 3007 static RISCVException read_hgatp(CPURISCVState *env, int csrno, 3008 target_ulong *val) 3009 { 3010 *val = env->hgatp; 3011 return RISCV_EXCP_NONE; 3012 } 3013 3014 static RISCVException write_hgatp(CPURISCVState *env, int csrno, 3015 target_ulong val) 3016 { 3017 env->hgatp = val; 3018 return RISCV_EXCP_NONE; 3019 } 3020 3021 static RISCVException read_htimedelta(CPURISCVState *env, int csrno, 3022 target_ulong *val) 3023 { 3024 if (!env->rdtime_fn) { 3025 return RISCV_EXCP_ILLEGAL_INST; 3026 } 3027 3028 *val = env->htimedelta; 3029 return RISCV_EXCP_NONE; 3030 } 3031 3032 static RISCVException write_htimedelta(CPURISCVState *env, int csrno, 3033 target_ulong val) 3034 { 3035 RISCVCPU *cpu = env_archcpu(env); 3036 3037 if (!env->rdtime_fn) { 3038 return RISCV_EXCP_ILLEGAL_INST; 3039 } 3040 3041 if (riscv_cpu_mxl(env) == MXL_RV32) { 3042 env->htimedelta = deposit64(env->htimedelta, 0, 32, (uint64_t)val); 3043 } else { 3044 env->htimedelta = val; 3045 } 3046 3047 if (cpu->cfg.ext_sstc && env->rdtime_fn) { 3048 riscv_timer_write_timecmp(cpu, env->vstimer, env->vstimecmp, 3049 env->htimedelta, MIP_VSTIP); 3050 } 3051 3052 return RISCV_EXCP_NONE; 3053 } 3054 3055 static RISCVException read_htimedeltah(CPURISCVState *env, int csrno, 3056 target_ulong *val) 3057 { 3058 if (!env->rdtime_fn) { 3059 return RISCV_EXCP_ILLEGAL_INST; 3060 } 3061 3062 *val = env->htimedelta >> 32; 3063 return RISCV_EXCP_NONE; 3064 } 3065 3066 static RISCVException write_htimedeltah(CPURISCVState *env, int csrno, 3067 target_ulong val) 3068 { 3069 RISCVCPU *cpu = env_archcpu(env); 3070 3071 if (!env->rdtime_fn) { 3072 return RISCV_EXCP_ILLEGAL_INST; 3073 } 3074 3075 env->htimedelta = deposit64(env->htimedelta, 32, 32, (uint64_t)val); 3076 3077 if (cpu->cfg.ext_sstc && env->rdtime_fn) { 3078 riscv_timer_write_timecmp(cpu, env->vstimer, env->vstimecmp, 3079 env->htimedelta, MIP_VSTIP); 3080 } 3081 3082 return RISCV_EXCP_NONE; 3083 } 3084 3085 static int read_hvictl(CPURISCVState *env, int csrno, target_ulong *val) 3086 { 3087 *val = env->hvictl; 3088 return RISCV_EXCP_NONE; 3089 } 3090 3091 static int write_hvictl(CPURISCVState *env, int csrno, target_ulong val) 3092 { 3093 env->hvictl = val & HVICTL_VALID_MASK; 3094 return RISCV_EXCP_NONE; 3095 } 3096 3097 static int read_hvipriox(CPURISCVState *env, int first_index, 3098 uint8_t *iprio, target_ulong *val) 3099 { 3100 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32); 3101 3102 /* First index has to be a multiple of number of irqs per register */ 3103 if (first_index % num_irqs) { 3104 return (riscv_cpu_virt_enabled(env)) ? 3105 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; 3106 } 3107 3108 /* Fill-up return value */ 3109 *val = 0; 3110 for (i = 0; i < num_irqs; i++) { 3111 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) { 3112 continue; 3113 } 3114 if (rdzero) { 3115 continue; 3116 } 3117 *val |= ((target_ulong)iprio[irq]) << (i * 8); 3118 } 3119 3120 return RISCV_EXCP_NONE; 3121 } 3122 3123 static int write_hvipriox(CPURISCVState *env, int first_index, 3124 uint8_t *iprio, target_ulong val) 3125 { 3126 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32); 3127 3128 /* First index has to be a multiple of number of irqs per register */ 3129 if (first_index % num_irqs) { 3130 return (riscv_cpu_virt_enabled(env)) ? 3131 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; 3132 } 3133 3134 /* Fill-up priority arrary */ 3135 for (i = 0; i < num_irqs; i++) { 3136 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) { 3137 continue; 3138 } 3139 if (rdzero) { 3140 iprio[irq] = 0; 3141 } else { 3142 iprio[irq] = (val >> (i * 8)) & 0xff; 3143 } 3144 } 3145 3146 return RISCV_EXCP_NONE; 3147 } 3148 3149 static int read_hviprio1(CPURISCVState *env, int csrno, target_ulong *val) 3150 { 3151 return read_hvipriox(env, 0, env->hviprio, val); 3152 } 3153 3154 static int write_hviprio1(CPURISCVState *env, int csrno, target_ulong val) 3155 { 3156 return write_hvipriox(env, 0, env->hviprio, val); 3157 } 3158 3159 static int read_hviprio1h(CPURISCVState *env, int csrno, target_ulong *val) 3160 { 3161 return read_hvipriox(env, 4, env->hviprio, val); 3162 } 3163 3164 static int write_hviprio1h(CPURISCVState *env, int csrno, target_ulong val) 3165 { 3166 return write_hvipriox(env, 4, env->hviprio, val); 3167 } 3168 3169 static int read_hviprio2(CPURISCVState *env, int csrno, target_ulong *val) 3170 { 3171 return read_hvipriox(env, 8, env->hviprio, val); 3172 } 3173 3174 static int write_hviprio2(CPURISCVState *env, int csrno, target_ulong val) 3175 { 3176 return write_hvipriox(env, 8, env->hviprio, val); 3177 } 3178 3179 static int read_hviprio2h(CPURISCVState *env, int csrno, target_ulong *val) 3180 { 3181 return read_hvipriox(env, 12, env->hviprio, val); 3182 } 3183 3184 static int write_hviprio2h(CPURISCVState *env, int csrno, target_ulong val) 3185 { 3186 return write_hvipriox(env, 12, env->hviprio, val); 3187 } 3188 3189 /* Virtual CSR Registers */ 3190 static RISCVException read_vsstatus(CPURISCVState *env, int csrno, 3191 target_ulong *val) 3192 { 3193 *val = env->vsstatus; 3194 return RISCV_EXCP_NONE; 3195 } 3196 3197 static RISCVException write_vsstatus(CPURISCVState *env, int csrno, 3198 target_ulong val) 3199 { 3200 uint64_t mask = (target_ulong)-1; 3201 if ((val & VSSTATUS64_UXL) == 0) { 3202 mask &= ~VSSTATUS64_UXL; 3203 } 3204 env->vsstatus = (env->vsstatus & ~mask) | (uint64_t)val; 3205 return RISCV_EXCP_NONE; 3206 } 3207 3208 static int read_vstvec(CPURISCVState *env, int csrno, target_ulong *val) 3209 { 3210 *val = env->vstvec; 3211 return RISCV_EXCP_NONE; 3212 } 3213 3214 static RISCVException write_vstvec(CPURISCVState *env, int csrno, 3215 target_ulong val) 3216 { 3217 env->vstvec = val; 3218 return RISCV_EXCP_NONE; 3219 } 3220 3221 static RISCVException read_vsscratch(CPURISCVState *env, int csrno, 3222 target_ulong *val) 3223 { 3224 *val = env->vsscratch; 3225 return RISCV_EXCP_NONE; 3226 } 3227 3228 static RISCVException write_vsscratch(CPURISCVState *env, int csrno, 3229 target_ulong val) 3230 { 3231 env->vsscratch = val; 3232 return RISCV_EXCP_NONE; 3233 } 3234 3235 static RISCVException read_vsepc(CPURISCVState *env, int csrno, 3236 target_ulong *val) 3237 { 3238 *val = env->vsepc; 3239 return RISCV_EXCP_NONE; 3240 } 3241 3242 static RISCVException write_vsepc(CPURISCVState *env, int csrno, 3243 target_ulong val) 3244 { 3245 env->vsepc = val; 3246 return RISCV_EXCP_NONE; 3247 } 3248 3249 static RISCVException read_vscause(CPURISCVState *env, int csrno, 3250 target_ulong *val) 3251 { 3252 *val = env->vscause; 3253 return RISCV_EXCP_NONE; 3254 } 3255 3256 static RISCVException write_vscause(CPURISCVState *env, int csrno, 3257 target_ulong val) 3258 { 3259 env->vscause = val; 3260 return RISCV_EXCP_NONE; 3261 } 3262 3263 static RISCVException read_vstval(CPURISCVState *env, int csrno, 3264 target_ulong *val) 3265 { 3266 *val = env->vstval; 3267 return RISCV_EXCP_NONE; 3268 } 3269 3270 static RISCVException write_vstval(CPURISCVState *env, int csrno, 3271 target_ulong val) 3272 { 3273 env->vstval = val; 3274 return RISCV_EXCP_NONE; 3275 } 3276 3277 static RISCVException read_vsatp(CPURISCVState *env, int csrno, 3278 target_ulong *val) 3279 { 3280 *val = env->vsatp; 3281 return RISCV_EXCP_NONE; 3282 } 3283 3284 static RISCVException write_vsatp(CPURISCVState *env, int csrno, 3285 target_ulong val) 3286 { 3287 env->vsatp = val; 3288 return RISCV_EXCP_NONE; 3289 } 3290 3291 static RISCVException read_mtval2(CPURISCVState *env, int csrno, 3292 target_ulong *val) 3293 { 3294 *val = env->mtval2; 3295 return RISCV_EXCP_NONE; 3296 } 3297 3298 static RISCVException write_mtval2(CPURISCVState *env, int csrno, 3299 target_ulong val) 3300 { 3301 env->mtval2 = val; 3302 return RISCV_EXCP_NONE; 3303 } 3304 3305 static RISCVException read_mtinst(CPURISCVState *env, int csrno, 3306 target_ulong *val) 3307 { 3308 *val = env->mtinst; 3309 return RISCV_EXCP_NONE; 3310 } 3311 3312 static RISCVException write_mtinst(CPURISCVState *env, int csrno, 3313 target_ulong val) 3314 { 3315 env->mtinst = val; 3316 return RISCV_EXCP_NONE; 3317 } 3318 3319 /* Physical Memory Protection */ 3320 static RISCVException read_mseccfg(CPURISCVState *env, int csrno, 3321 target_ulong *val) 3322 { 3323 *val = mseccfg_csr_read(env); 3324 return RISCV_EXCP_NONE; 3325 } 3326 3327 static RISCVException write_mseccfg(CPURISCVState *env, int csrno, 3328 target_ulong val) 3329 { 3330 mseccfg_csr_write(env, val); 3331 return RISCV_EXCP_NONE; 3332 } 3333 3334 static bool check_pmp_reg_index(CPURISCVState *env, uint32_t reg_index) 3335 { 3336 /* TODO: RV128 restriction check */ 3337 if ((reg_index & 1) && (riscv_cpu_mxl(env) == MXL_RV64)) { 3338 return false; 3339 } 3340 return true; 3341 } 3342 3343 static RISCVException read_pmpcfg(CPURISCVState *env, int csrno, 3344 target_ulong *val) 3345 { 3346 uint32_t reg_index = csrno - CSR_PMPCFG0; 3347 3348 if (!check_pmp_reg_index(env, reg_index)) { 3349 return RISCV_EXCP_ILLEGAL_INST; 3350 } 3351 *val = pmpcfg_csr_read(env, reg_index); 3352 return RISCV_EXCP_NONE; 3353 } 3354 3355 static RISCVException write_pmpcfg(CPURISCVState *env, int csrno, 3356 target_ulong val) 3357 { 3358 uint32_t reg_index = csrno - CSR_PMPCFG0; 3359 3360 if (!check_pmp_reg_index(env, reg_index)) { 3361 return RISCV_EXCP_ILLEGAL_INST; 3362 } 3363 pmpcfg_csr_write(env, reg_index, val); 3364 return RISCV_EXCP_NONE; 3365 } 3366 3367 static RISCVException read_pmpaddr(CPURISCVState *env, int csrno, 3368 target_ulong *val) 3369 { 3370 *val = pmpaddr_csr_read(env, csrno - CSR_PMPADDR0); 3371 return RISCV_EXCP_NONE; 3372 } 3373 3374 static RISCVException write_pmpaddr(CPURISCVState *env, int csrno, 3375 target_ulong val) 3376 { 3377 pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val); 3378 return RISCV_EXCP_NONE; 3379 } 3380 3381 static RISCVException read_tselect(CPURISCVState *env, int csrno, 3382 target_ulong *val) 3383 { 3384 *val = tselect_csr_read(env); 3385 return RISCV_EXCP_NONE; 3386 } 3387 3388 static RISCVException write_tselect(CPURISCVState *env, int csrno, 3389 target_ulong val) 3390 { 3391 tselect_csr_write(env, val); 3392 return RISCV_EXCP_NONE; 3393 } 3394 3395 static RISCVException read_tdata(CPURISCVState *env, int csrno, 3396 target_ulong *val) 3397 { 3398 /* return 0 in tdata1 to end the trigger enumeration */ 3399 if (env->trigger_cur >= RV_MAX_TRIGGERS && csrno == CSR_TDATA1) { 3400 *val = 0; 3401 return RISCV_EXCP_NONE; 3402 } 3403 3404 if (!tdata_available(env, csrno - CSR_TDATA1)) { 3405 return RISCV_EXCP_ILLEGAL_INST; 3406 } 3407 3408 *val = tdata_csr_read(env, csrno - CSR_TDATA1); 3409 return RISCV_EXCP_NONE; 3410 } 3411 3412 static RISCVException write_tdata(CPURISCVState *env, int csrno, 3413 target_ulong val) 3414 { 3415 if (!tdata_available(env, csrno - CSR_TDATA1)) { 3416 return RISCV_EXCP_ILLEGAL_INST; 3417 } 3418 3419 tdata_csr_write(env, csrno - CSR_TDATA1, val); 3420 return RISCV_EXCP_NONE; 3421 } 3422 3423 static RISCVException read_tinfo(CPURISCVState *env, int csrno, 3424 target_ulong *val) 3425 { 3426 *val = tinfo_csr_read(env); 3427 return RISCV_EXCP_NONE; 3428 } 3429 3430 /* 3431 * Functions to access Pointer Masking feature registers 3432 * We have to check if current priv lvl could modify 3433 * csr in given mode 3434 */ 3435 static bool check_pm_current_disabled(CPURISCVState *env, int csrno) 3436 { 3437 int csr_priv = get_field(csrno, 0x300); 3438 int pm_current; 3439 3440 if (env->debugger) { 3441 return false; 3442 } 3443 /* 3444 * If priv lvls differ that means we're accessing csr from higher priv lvl, 3445 * so allow the access 3446 */ 3447 if (env->priv != csr_priv) { 3448 return false; 3449 } 3450 switch (env->priv) { 3451 case PRV_M: 3452 pm_current = get_field(env->mmte, M_PM_CURRENT); 3453 break; 3454 case PRV_S: 3455 pm_current = get_field(env->mmte, S_PM_CURRENT); 3456 break; 3457 case PRV_U: 3458 pm_current = get_field(env->mmte, U_PM_CURRENT); 3459 break; 3460 default: 3461 g_assert_not_reached(); 3462 } 3463 /* It's same priv lvl, so we allow to modify csr only if pm.current==1 */ 3464 return !pm_current; 3465 } 3466 3467 static RISCVException read_mmte(CPURISCVState *env, int csrno, 3468 target_ulong *val) 3469 { 3470 *val = env->mmte & MMTE_MASK; 3471 return RISCV_EXCP_NONE; 3472 } 3473 3474 static RISCVException write_mmte(CPURISCVState *env, int csrno, 3475 target_ulong val) 3476 { 3477 uint64_t mstatus; 3478 target_ulong wpri_val = val & MMTE_MASK; 3479 3480 if (val != wpri_val) { 3481 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n", 3482 "MMTE: WPRI violation written 0x", val, 3483 "vs expected 0x", wpri_val); 3484 } 3485 /* for machine mode pm.current is hardwired to 1 */ 3486 wpri_val |= MMTE_M_PM_CURRENT; 3487 3488 /* hardwiring pm.instruction bit to 0, since it's not supported yet */ 3489 wpri_val &= ~(MMTE_M_PM_INSN | MMTE_S_PM_INSN | MMTE_U_PM_INSN); 3490 env->mmte = wpri_val | PM_EXT_DIRTY; 3491 riscv_cpu_update_mask(env); 3492 3493 /* Set XS and SD bits, since PM CSRs are dirty */ 3494 mstatus = env->mstatus | MSTATUS_XS; 3495 write_mstatus(env, csrno, mstatus); 3496 return RISCV_EXCP_NONE; 3497 } 3498 3499 static RISCVException read_smte(CPURISCVState *env, int csrno, 3500 target_ulong *val) 3501 { 3502 *val = env->mmte & SMTE_MASK; 3503 return RISCV_EXCP_NONE; 3504 } 3505 3506 static RISCVException write_smte(CPURISCVState *env, int csrno, 3507 target_ulong val) 3508 { 3509 target_ulong wpri_val = val & SMTE_MASK; 3510 3511 if (val != wpri_val) { 3512 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n", 3513 "SMTE: WPRI violation written 0x", val, 3514 "vs expected 0x", wpri_val); 3515 } 3516 3517 /* if pm.current==0 we can't modify current PM CSRs */ 3518 if (check_pm_current_disabled(env, csrno)) { 3519 return RISCV_EXCP_NONE; 3520 } 3521 3522 wpri_val |= (env->mmte & ~SMTE_MASK); 3523 write_mmte(env, csrno, wpri_val); 3524 return RISCV_EXCP_NONE; 3525 } 3526 3527 static RISCVException read_umte(CPURISCVState *env, int csrno, 3528 target_ulong *val) 3529 { 3530 *val = env->mmte & UMTE_MASK; 3531 return RISCV_EXCP_NONE; 3532 } 3533 3534 static RISCVException write_umte(CPURISCVState *env, int csrno, 3535 target_ulong val) 3536 { 3537 target_ulong wpri_val = val & UMTE_MASK; 3538 3539 if (val != wpri_val) { 3540 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n", 3541 "UMTE: WPRI violation written 0x", val, 3542 "vs expected 0x", wpri_val); 3543 } 3544 3545 if (check_pm_current_disabled(env, csrno)) { 3546 return RISCV_EXCP_NONE; 3547 } 3548 3549 wpri_val |= (env->mmte & ~UMTE_MASK); 3550 write_mmte(env, csrno, wpri_val); 3551 return RISCV_EXCP_NONE; 3552 } 3553 3554 static RISCVException read_mpmmask(CPURISCVState *env, int csrno, 3555 target_ulong *val) 3556 { 3557 *val = env->mpmmask; 3558 return RISCV_EXCP_NONE; 3559 } 3560 3561 static RISCVException write_mpmmask(CPURISCVState *env, int csrno, 3562 target_ulong val) 3563 { 3564 uint64_t mstatus; 3565 3566 env->mpmmask = val; 3567 if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) { 3568 env->cur_pmmask = val; 3569 } 3570 env->mmte |= PM_EXT_DIRTY; 3571 3572 /* Set XS and SD bits, since PM CSRs are dirty */ 3573 mstatus = env->mstatus | MSTATUS_XS; 3574 write_mstatus(env, csrno, mstatus); 3575 return RISCV_EXCP_NONE; 3576 } 3577 3578 static RISCVException read_spmmask(CPURISCVState *env, int csrno, 3579 target_ulong *val) 3580 { 3581 *val = env->spmmask; 3582 return RISCV_EXCP_NONE; 3583 } 3584 3585 static RISCVException write_spmmask(CPURISCVState *env, int csrno, 3586 target_ulong val) 3587 { 3588 uint64_t mstatus; 3589 3590 /* if pm.current==0 we can't modify current PM CSRs */ 3591 if (check_pm_current_disabled(env, csrno)) { 3592 return RISCV_EXCP_NONE; 3593 } 3594 env->spmmask = val; 3595 if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) { 3596 env->cur_pmmask = val; 3597 } 3598 env->mmte |= PM_EXT_DIRTY; 3599 3600 /* Set XS and SD bits, since PM CSRs are dirty */ 3601 mstatus = env->mstatus | MSTATUS_XS; 3602 write_mstatus(env, csrno, mstatus); 3603 return RISCV_EXCP_NONE; 3604 } 3605 3606 static RISCVException read_upmmask(CPURISCVState *env, int csrno, 3607 target_ulong *val) 3608 { 3609 *val = env->upmmask; 3610 return RISCV_EXCP_NONE; 3611 } 3612 3613 static RISCVException write_upmmask(CPURISCVState *env, int csrno, 3614 target_ulong val) 3615 { 3616 uint64_t mstatus; 3617 3618 /* if pm.current==0 we can't modify current PM CSRs */ 3619 if (check_pm_current_disabled(env, csrno)) { 3620 return RISCV_EXCP_NONE; 3621 } 3622 env->upmmask = val; 3623 if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) { 3624 env->cur_pmmask = val; 3625 } 3626 env->mmte |= PM_EXT_DIRTY; 3627 3628 /* Set XS and SD bits, since PM CSRs are dirty */ 3629 mstatus = env->mstatus | MSTATUS_XS; 3630 write_mstatus(env, csrno, mstatus); 3631 return RISCV_EXCP_NONE; 3632 } 3633 3634 static RISCVException read_mpmbase(CPURISCVState *env, int csrno, 3635 target_ulong *val) 3636 { 3637 *val = env->mpmbase; 3638 return RISCV_EXCP_NONE; 3639 } 3640 3641 static RISCVException write_mpmbase(CPURISCVState *env, int csrno, 3642 target_ulong val) 3643 { 3644 uint64_t mstatus; 3645 3646 env->mpmbase = val; 3647 if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) { 3648 env->cur_pmbase = val; 3649 } 3650 env->mmte |= PM_EXT_DIRTY; 3651 3652 /* Set XS and SD bits, since PM CSRs are dirty */ 3653 mstatus = env->mstatus | MSTATUS_XS; 3654 write_mstatus(env, csrno, mstatus); 3655 return RISCV_EXCP_NONE; 3656 } 3657 3658 static RISCVException read_spmbase(CPURISCVState *env, int csrno, 3659 target_ulong *val) 3660 { 3661 *val = env->spmbase; 3662 return RISCV_EXCP_NONE; 3663 } 3664 3665 static RISCVException write_spmbase(CPURISCVState *env, int csrno, 3666 target_ulong val) 3667 { 3668 uint64_t mstatus; 3669 3670 /* if pm.current==0 we can't modify current PM CSRs */ 3671 if (check_pm_current_disabled(env, csrno)) { 3672 return RISCV_EXCP_NONE; 3673 } 3674 env->spmbase = val; 3675 if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) { 3676 env->cur_pmbase = val; 3677 } 3678 env->mmte |= PM_EXT_DIRTY; 3679 3680 /* Set XS and SD bits, since PM CSRs are dirty */ 3681 mstatus = env->mstatus | MSTATUS_XS; 3682 write_mstatus(env, csrno, mstatus); 3683 return RISCV_EXCP_NONE; 3684 } 3685 3686 static RISCVException read_upmbase(CPURISCVState *env, int csrno, 3687 target_ulong *val) 3688 { 3689 *val = env->upmbase; 3690 return RISCV_EXCP_NONE; 3691 } 3692 3693 static RISCVException write_upmbase(CPURISCVState *env, int csrno, 3694 target_ulong val) 3695 { 3696 uint64_t mstatus; 3697 3698 /* if pm.current==0 we can't modify current PM CSRs */ 3699 if (check_pm_current_disabled(env, csrno)) { 3700 return RISCV_EXCP_NONE; 3701 } 3702 env->upmbase = val; 3703 if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) { 3704 env->cur_pmbase = val; 3705 } 3706 env->mmte |= PM_EXT_DIRTY; 3707 3708 /* Set XS and SD bits, since PM CSRs are dirty */ 3709 mstatus = env->mstatus | MSTATUS_XS; 3710 write_mstatus(env, csrno, mstatus); 3711 return RISCV_EXCP_NONE; 3712 } 3713 3714 #endif 3715 3716 /* Crypto Extension */ 3717 static RISCVException rmw_seed(CPURISCVState *env, int csrno, 3718 target_ulong *ret_value, 3719 target_ulong new_value, 3720 target_ulong write_mask) 3721 { 3722 uint16_t random_v; 3723 Error *random_e = NULL; 3724 int random_r; 3725 target_ulong rval; 3726 3727 random_r = qemu_guest_getrandom(&random_v, 2, &random_e); 3728 if (unlikely(random_r < 0)) { 3729 /* 3730 * Failed, for unknown reasons in the crypto subsystem. 3731 * The best we can do is log the reason and return a 3732 * failure indication to the guest. There is no reason 3733 * we know to expect the failure to be transitory, so 3734 * indicate DEAD to avoid having the guest spin on WAIT. 3735 */ 3736 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s", 3737 __func__, error_get_pretty(random_e)); 3738 error_free(random_e); 3739 rval = SEED_OPST_DEAD; 3740 } else { 3741 rval = random_v | SEED_OPST_ES16; 3742 } 3743 3744 if (ret_value) { 3745 *ret_value = rval; 3746 } 3747 3748 return RISCV_EXCP_NONE; 3749 } 3750 3751 /* 3752 * riscv_csrrw - read and/or update control and status register 3753 * 3754 * csrr <-> riscv_csrrw(env, csrno, ret_value, 0, 0); 3755 * csrrw <-> riscv_csrrw(env, csrno, ret_value, value, -1); 3756 * csrrs <-> riscv_csrrw(env, csrno, ret_value, -1, value); 3757 * csrrc <-> riscv_csrrw(env, csrno, ret_value, 0, value); 3758 */ 3759 3760 static inline RISCVException riscv_csrrw_check(CPURISCVState *env, 3761 int csrno, 3762 bool write_mask, 3763 RISCVCPU *cpu) 3764 { 3765 /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */ 3766 bool read_only = get_field(csrno, 0xC00) == 3; 3767 int csr_min_priv = csr_ops[csrno].min_priv_ver; 3768 3769 /* ensure the CSR extension is enabled */ 3770 if (!cpu->cfg.ext_icsr) { 3771 return RISCV_EXCP_ILLEGAL_INST; 3772 } 3773 3774 /* privileged spec version check */ 3775 if (env->priv_ver < csr_min_priv) { 3776 return RISCV_EXCP_ILLEGAL_INST; 3777 } 3778 3779 /* read / write check */ 3780 if (write_mask && read_only) { 3781 return RISCV_EXCP_ILLEGAL_INST; 3782 } 3783 3784 /* 3785 * The predicate() not only does existence check but also does some 3786 * access control check which triggers for example virtual instruction 3787 * exception in some cases. When writing read-only CSRs in those cases 3788 * illegal instruction exception should be triggered instead of virtual 3789 * instruction exception. Hence this comes after the read / write check. 3790 */ 3791 g_assert(csr_ops[csrno].predicate != NULL); 3792 RISCVException ret = csr_ops[csrno].predicate(env, csrno); 3793 if (ret != RISCV_EXCP_NONE) { 3794 return ret; 3795 } 3796 3797 #if !defined(CONFIG_USER_ONLY) 3798 int csr_priv, effective_priv = env->priv; 3799 3800 if (riscv_has_ext(env, RVH) && env->priv == PRV_S && 3801 !riscv_cpu_virt_enabled(env)) { 3802 /* 3803 * We are in HS mode. Add 1 to the effective privledge level to 3804 * allow us to access the Hypervisor CSRs. 3805 */ 3806 effective_priv++; 3807 } 3808 3809 csr_priv = get_field(csrno, 0x300); 3810 if (!env->debugger && (effective_priv < csr_priv)) { 3811 if (csr_priv == (PRV_S + 1) && riscv_cpu_virt_enabled(env)) { 3812 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 3813 } 3814 return RISCV_EXCP_ILLEGAL_INST; 3815 } 3816 #endif 3817 return RISCV_EXCP_NONE; 3818 } 3819 3820 static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno, 3821 target_ulong *ret_value, 3822 target_ulong new_value, 3823 target_ulong write_mask) 3824 { 3825 RISCVException ret; 3826 target_ulong old_value; 3827 3828 /* execute combined read/write operation if it exists */ 3829 if (csr_ops[csrno].op) { 3830 return csr_ops[csrno].op(env, csrno, ret_value, new_value, write_mask); 3831 } 3832 3833 /* if no accessor exists then return failure */ 3834 if (!csr_ops[csrno].read) { 3835 return RISCV_EXCP_ILLEGAL_INST; 3836 } 3837 /* read old value */ 3838 ret = csr_ops[csrno].read(env, csrno, &old_value); 3839 if (ret != RISCV_EXCP_NONE) { 3840 return ret; 3841 } 3842 3843 /* write value if writable and write mask set, otherwise drop writes */ 3844 if (write_mask) { 3845 new_value = (old_value & ~write_mask) | (new_value & write_mask); 3846 if (csr_ops[csrno].write) { 3847 ret = csr_ops[csrno].write(env, csrno, new_value); 3848 if (ret != RISCV_EXCP_NONE) { 3849 return ret; 3850 } 3851 } 3852 } 3853 3854 /* return old value */ 3855 if (ret_value) { 3856 *ret_value = old_value; 3857 } 3858 3859 return RISCV_EXCP_NONE; 3860 } 3861 3862 RISCVException riscv_csrrw(CPURISCVState *env, int csrno, 3863 target_ulong *ret_value, 3864 target_ulong new_value, target_ulong write_mask) 3865 { 3866 RISCVCPU *cpu = env_archcpu(env); 3867 3868 RISCVException ret = riscv_csrrw_check(env, csrno, write_mask, cpu); 3869 if (ret != RISCV_EXCP_NONE) { 3870 return ret; 3871 } 3872 3873 return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask); 3874 } 3875 3876 static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno, 3877 Int128 *ret_value, 3878 Int128 new_value, 3879 Int128 write_mask) 3880 { 3881 RISCVException ret; 3882 Int128 old_value; 3883 3884 /* read old value */ 3885 ret = csr_ops[csrno].read128(env, csrno, &old_value); 3886 if (ret != RISCV_EXCP_NONE) { 3887 return ret; 3888 } 3889 3890 /* write value if writable and write mask set, otherwise drop writes */ 3891 if (int128_nz(write_mask)) { 3892 new_value = int128_or(int128_and(old_value, int128_not(write_mask)), 3893 int128_and(new_value, write_mask)); 3894 if (csr_ops[csrno].write128) { 3895 ret = csr_ops[csrno].write128(env, csrno, new_value); 3896 if (ret != RISCV_EXCP_NONE) { 3897 return ret; 3898 } 3899 } else if (csr_ops[csrno].write) { 3900 /* avoids having to write wrappers for all registers */ 3901 ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value)); 3902 if (ret != RISCV_EXCP_NONE) { 3903 return ret; 3904 } 3905 } 3906 } 3907 3908 /* return old value */ 3909 if (ret_value) { 3910 *ret_value = old_value; 3911 } 3912 3913 return RISCV_EXCP_NONE; 3914 } 3915 3916 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno, 3917 Int128 *ret_value, 3918 Int128 new_value, Int128 write_mask) 3919 { 3920 RISCVException ret; 3921 RISCVCPU *cpu = env_archcpu(env); 3922 3923 ret = riscv_csrrw_check(env, csrno, int128_nz(write_mask), cpu); 3924 if (ret != RISCV_EXCP_NONE) { 3925 return ret; 3926 } 3927 3928 if (csr_ops[csrno].read128) { 3929 return riscv_csrrw_do128(env, csrno, ret_value, new_value, write_mask); 3930 } 3931 3932 /* 3933 * Fall back to 64-bit version for now, if the 128-bit alternative isn't 3934 * at all defined. 3935 * Note, some CSRs don't need to extend to MXLEN (64 upper bits non 3936 * significant), for those, this fallback is correctly handling the accesses 3937 */ 3938 target_ulong old_value; 3939 ret = riscv_csrrw_do64(env, csrno, &old_value, 3940 int128_getlo(new_value), 3941 int128_getlo(write_mask)); 3942 if (ret == RISCV_EXCP_NONE && ret_value) { 3943 *ret_value = int128_make64(old_value); 3944 } 3945 return ret; 3946 } 3947 3948 /* 3949 * Debugger support. If not in user mode, set env->debugger before the 3950 * riscv_csrrw call and clear it after the call. 3951 */ 3952 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno, 3953 target_ulong *ret_value, 3954 target_ulong new_value, 3955 target_ulong write_mask) 3956 { 3957 RISCVException ret; 3958 #if !defined(CONFIG_USER_ONLY) 3959 env->debugger = true; 3960 #endif 3961 ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask); 3962 #if !defined(CONFIG_USER_ONLY) 3963 env->debugger = false; 3964 #endif 3965 return ret; 3966 } 3967 3968 /* Control and Status Register function table */ 3969 riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { 3970 /* User Floating-Point CSRs */ 3971 [CSR_FFLAGS] = { "fflags", fs, read_fflags, write_fflags }, 3972 [CSR_FRM] = { "frm", fs, read_frm, write_frm }, 3973 [CSR_FCSR] = { "fcsr", fs, read_fcsr, write_fcsr }, 3974 /* Vector CSRs */ 3975 [CSR_VSTART] = { "vstart", vs, read_vstart, write_vstart }, 3976 [CSR_VXSAT] = { "vxsat", vs, read_vxsat, write_vxsat }, 3977 [CSR_VXRM] = { "vxrm", vs, read_vxrm, write_vxrm }, 3978 [CSR_VCSR] = { "vcsr", vs, read_vcsr, write_vcsr }, 3979 [CSR_VL] = { "vl", vs, read_vl }, 3980 [CSR_VTYPE] = { "vtype", vs, read_vtype }, 3981 [CSR_VLENB] = { "vlenb", vs, read_vlenb }, 3982 /* User Timers and Counters */ 3983 [CSR_CYCLE] = { "cycle", ctr, read_hpmcounter }, 3984 [CSR_INSTRET] = { "instret", ctr, read_hpmcounter }, 3985 [CSR_CYCLEH] = { "cycleh", ctr32, read_hpmcounterh }, 3986 [CSR_INSTRETH] = { "instreth", ctr32, read_hpmcounterh }, 3987 3988 /* 3989 * In privileged mode, the monitor will have to emulate TIME CSRs only if 3990 * rdtime callback is not provided by machine/platform emulation. 3991 */ 3992 [CSR_TIME] = { "time", ctr, read_time }, 3993 [CSR_TIMEH] = { "timeh", ctr32, read_timeh }, 3994 3995 /* Crypto Extension */ 3996 [CSR_SEED] = { "seed", seed, NULL, NULL, rmw_seed }, 3997 3998 #if !defined(CONFIG_USER_ONLY) 3999 /* Machine Timers and Counters */ 4000 [CSR_MCYCLE] = { "mcycle", any, read_hpmcounter, 4001 write_mhpmcounter }, 4002 [CSR_MINSTRET] = { "minstret", any, read_hpmcounter, 4003 write_mhpmcounter }, 4004 [CSR_MCYCLEH] = { "mcycleh", any32, read_hpmcounterh, 4005 write_mhpmcounterh }, 4006 [CSR_MINSTRETH] = { "minstreth", any32, read_hpmcounterh, 4007 write_mhpmcounterh }, 4008 4009 /* Machine Information Registers */ 4010 [CSR_MVENDORID] = { "mvendorid", any, read_mvendorid }, 4011 [CSR_MARCHID] = { "marchid", any, read_marchid }, 4012 [CSR_MIMPID] = { "mimpid", any, read_mimpid }, 4013 [CSR_MHARTID] = { "mhartid", any, read_mhartid }, 4014 4015 [CSR_MCONFIGPTR] = { "mconfigptr", any, read_zero, 4016 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4017 /* Machine Trap Setup */ 4018 [CSR_MSTATUS] = { "mstatus", any, read_mstatus, write_mstatus, 4019 NULL, read_mstatus_i128 }, 4020 [CSR_MISA] = { "misa", any, read_misa, write_misa, 4021 NULL, read_misa_i128 }, 4022 [CSR_MIDELEG] = { "mideleg", any, NULL, NULL, rmw_mideleg }, 4023 [CSR_MEDELEG] = { "medeleg", any, read_medeleg, write_medeleg }, 4024 [CSR_MIE] = { "mie", any, NULL, NULL, rmw_mie }, 4025 [CSR_MTVEC] = { "mtvec", any, read_mtvec, write_mtvec }, 4026 [CSR_MCOUNTEREN] = { "mcounteren", umode, read_mcounteren, 4027 write_mcounteren }, 4028 4029 [CSR_MSTATUSH] = { "mstatush", any32, read_mstatush, 4030 write_mstatush }, 4031 4032 /* Machine Trap Handling */ 4033 [CSR_MSCRATCH] = { "mscratch", any, read_mscratch, write_mscratch, 4034 NULL, read_mscratch_i128, write_mscratch_i128 }, 4035 [CSR_MEPC] = { "mepc", any, read_mepc, write_mepc }, 4036 [CSR_MCAUSE] = { "mcause", any, read_mcause, write_mcause }, 4037 [CSR_MTVAL] = { "mtval", any, read_mtval, write_mtval }, 4038 [CSR_MIP] = { "mip", any, NULL, NULL, rmw_mip }, 4039 4040 /* Machine-Level Window to Indirectly Accessed Registers (AIA) */ 4041 [CSR_MISELECT] = { "miselect", aia_any, NULL, NULL, rmw_xiselect }, 4042 [CSR_MIREG] = { "mireg", aia_any, NULL, NULL, rmw_xireg }, 4043 4044 /* Machine-Level Interrupts (AIA) */ 4045 [CSR_MTOPEI] = { "mtopei", aia_any, NULL, NULL, rmw_xtopei }, 4046 [CSR_MTOPI] = { "mtopi", aia_any, read_mtopi }, 4047 4048 /* Virtual Interrupts for Supervisor Level (AIA) */ 4049 [CSR_MVIEN] = { "mvien", aia_any, read_zero, write_ignore }, 4050 [CSR_MVIP] = { "mvip", aia_any, read_zero, write_ignore }, 4051 4052 /* Machine-Level High-Half CSRs (AIA) */ 4053 [CSR_MIDELEGH] = { "midelegh", aia_any32, NULL, NULL, rmw_midelegh }, 4054 [CSR_MIEH] = { "mieh", aia_any32, NULL, NULL, rmw_mieh }, 4055 [CSR_MVIENH] = { "mvienh", aia_any32, read_zero, write_ignore }, 4056 [CSR_MVIPH] = { "mviph", aia_any32, read_zero, write_ignore }, 4057 [CSR_MIPH] = { "miph", aia_any32, NULL, NULL, rmw_miph }, 4058 4059 /* Execution environment configuration */ 4060 [CSR_MENVCFG] = { "menvcfg", umode, read_menvcfg, write_menvcfg, 4061 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4062 [CSR_MENVCFGH] = { "menvcfgh", umode32, read_menvcfgh, write_menvcfgh, 4063 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4064 [CSR_SENVCFG] = { "senvcfg", smode, read_senvcfg, write_senvcfg, 4065 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4066 [CSR_HENVCFG] = { "henvcfg", hmode, read_henvcfg, write_henvcfg, 4067 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4068 [CSR_HENVCFGH] = { "henvcfgh", hmode32, read_henvcfgh, write_henvcfgh, 4069 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4070 4071 /* Smstateen extension CSRs */ 4072 [CSR_MSTATEEN0] = { "mstateen0", mstateen, read_mstateen, write_mstateen0, 4073 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4074 [CSR_MSTATEEN0H] = { "mstateen0h", mstateen, read_mstateenh, 4075 write_mstateen0h, 4076 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4077 [CSR_MSTATEEN1] = { "mstateen1", mstateen, read_mstateen, 4078 write_mstateen_1_3, 4079 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4080 [CSR_MSTATEEN1H] = { "mstateen1h", mstateen, read_mstateenh, 4081 write_mstateenh_1_3, 4082 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4083 [CSR_MSTATEEN2] = { "mstateen2", mstateen, read_mstateen, 4084 write_mstateen_1_3, 4085 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4086 [CSR_MSTATEEN2H] = { "mstateen2h", mstateen, read_mstateenh, 4087 write_mstateenh_1_3, 4088 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4089 [CSR_MSTATEEN3] = { "mstateen3", mstateen, read_mstateen, 4090 write_mstateen_1_3, 4091 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4092 [CSR_MSTATEEN3H] = { "mstateen3h", mstateen, read_mstateenh, 4093 write_mstateenh_1_3, 4094 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4095 [CSR_HSTATEEN0] = { "hstateen0", hstateen, read_hstateen, write_hstateen0, 4096 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4097 [CSR_HSTATEEN0H] = { "hstateen0h", hstateenh, read_hstateenh, 4098 write_hstateen0h, 4099 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4100 [CSR_HSTATEEN1] = { "hstateen1", hstateen, read_hstateen, 4101 write_hstateen_1_3, 4102 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4103 [CSR_HSTATEEN1H] = { "hstateen1h", hstateenh, read_hstateenh, 4104 write_hstateenh_1_3, 4105 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4106 [CSR_HSTATEEN2] = { "hstateen2", hstateen, read_hstateen, 4107 write_hstateen_1_3, 4108 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4109 [CSR_HSTATEEN2H] = { "hstateen2h", hstateenh, read_hstateenh, 4110 write_hstateenh_1_3, 4111 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4112 [CSR_HSTATEEN3] = { "hstateen3", hstateen, read_hstateen, 4113 write_hstateen_1_3, 4114 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4115 [CSR_HSTATEEN3H] = { "hstateen3h", hstateenh, read_hstateenh, 4116 write_hstateenh_1_3, 4117 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4118 [CSR_SSTATEEN0] = { "sstateen0", sstateen, read_sstateen, write_sstateen0, 4119 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4120 [CSR_SSTATEEN1] = { "sstateen1", sstateen, read_sstateen, 4121 write_sstateen_1_3, 4122 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4123 [CSR_SSTATEEN2] = { "sstateen2", sstateen, read_sstateen, 4124 write_sstateen_1_3, 4125 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4126 [CSR_SSTATEEN3] = { "sstateen3", sstateen, read_sstateen, 4127 write_sstateen_1_3, 4128 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4129 4130 /* Supervisor Trap Setup */ 4131 [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus, 4132 NULL, read_sstatus_i128 }, 4133 [CSR_SIE] = { "sie", smode, NULL, NULL, rmw_sie }, 4134 [CSR_STVEC] = { "stvec", smode, read_stvec, write_stvec }, 4135 [CSR_SCOUNTEREN] = { "scounteren", smode, read_scounteren, 4136 write_scounteren }, 4137 4138 /* Supervisor Trap Handling */ 4139 [CSR_SSCRATCH] = { "sscratch", smode, read_sscratch, write_sscratch, 4140 NULL, read_sscratch_i128, write_sscratch_i128 }, 4141 [CSR_SEPC] = { "sepc", smode, read_sepc, write_sepc }, 4142 [CSR_SCAUSE] = { "scause", smode, read_scause, write_scause }, 4143 [CSR_STVAL] = { "stval", smode, read_stval, write_stval }, 4144 [CSR_SIP] = { "sip", smode, NULL, NULL, rmw_sip }, 4145 [CSR_STIMECMP] = { "stimecmp", sstc, read_stimecmp, write_stimecmp, 4146 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4147 [CSR_STIMECMPH] = { "stimecmph", sstc_32, read_stimecmph, write_stimecmph, 4148 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4149 [CSR_VSTIMECMP] = { "vstimecmp", sstc, read_vstimecmp, 4150 write_vstimecmp, 4151 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4152 [CSR_VSTIMECMPH] = { "vstimecmph", sstc_32, read_vstimecmph, 4153 write_vstimecmph, 4154 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4155 4156 /* Supervisor Protection and Translation */ 4157 [CSR_SATP] = { "satp", smode, read_satp, write_satp }, 4158 4159 /* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */ 4160 [CSR_SISELECT] = { "siselect", aia_smode, NULL, NULL, rmw_xiselect }, 4161 [CSR_SIREG] = { "sireg", aia_smode, NULL, NULL, rmw_xireg }, 4162 4163 /* Supervisor-Level Interrupts (AIA) */ 4164 [CSR_STOPEI] = { "stopei", aia_smode, NULL, NULL, rmw_xtopei }, 4165 [CSR_STOPI] = { "stopi", aia_smode, read_stopi }, 4166 4167 /* Supervisor-Level High-Half CSRs (AIA) */ 4168 [CSR_SIEH] = { "sieh", aia_smode32, NULL, NULL, rmw_sieh }, 4169 [CSR_SIPH] = { "siph", aia_smode32, NULL, NULL, rmw_siph }, 4170 4171 [CSR_HSTATUS] = { "hstatus", hmode, read_hstatus, write_hstatus, 4172 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4173 [CSR_HEDELEG] = { "hedeleg", hmode, read_hedeleg, write_hedeleg, 4174 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4175 [CSR_HIDELEG] = { "hideleg", hmode, NULL, NULL, rmw_hideleg, 4176 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4177 [CSR_HVIP] = { "hvip", hmode, NULL, NULL, rmw_hvip, 4178 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4179 [CSR_HIP] = { "hip", hmode, NULL, NULL, rmw_hip, 4180 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4181 [CSR_HIE] = { "hie", hmode, NULL, NULL, rmw_hie, 4182 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4183 [CSR_HCOUNTEREN] = { "hcounteren", hmode, read_hcounteren, 4184 write_hcounteren, 4185 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4186 [CSR_HGEIE] = { "hgeie", hmode, read_hgeie, write_hgeie, 4187 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4188 [CSR_HTVAL] = { "htval", hmode, read_htval, write_htval, 4189 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4190 [CSR_HTINST] = { "htinst", hmode, read_htinst, write_htinst, 4191 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4192 [CSR_HGEIP] = { "hgeip", hmode, read_hgeip, 4193 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4194 [CSR_HGATP] = { "hgatp", hmode, read_hgatp, write_hgatp, 4195 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4196 [CSR_HTIMEDELTA] = { "htimedelta", hmode, read_htimedelta, 4197 write_htimedelta, 4198 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4199 [CSR_HTIMEDELTAH] = { "htimedeltah", hmode32, read_htimedeltah, 4200 write_htimedeltah, 4201 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4202 4203 [CSR_VSSTATUS] = { "vsstatus", hmode, read_vsstatus, 4204 write_vsstatus, 4205 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4206 [CSR_VSIP] = { "vsip", hmode, NULL, NULL, rmw_vsip, 4207 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4208 [CSR_VSIE] = { "vsie", hmode, NULL, NULL, rmw_vsie , 4209 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4210 [CSR_VSTVEC] = { "vstvec", hmode, read_vstvec, write_vstvec, 4211 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4212 [CSR_VSSCRATCH] = { "vsscratch", hmode, read_vsscratch, 4213 write_vsscratch, 4214 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4215 [CSR_VSEPC] = { "vsepc", hmode, read_vsepc, write_vsepc, 4216 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4217 [CSR_VSCAUSE] = { "vscause", hmode, read_vscause, write_vscause, 4218 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4219 [CSR_VSTVAL] = { "vstval", hmode, read_vstval, write_vstval, 4220 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4221 [CSR_VSATP] = { "vsatp", hmode, read_vsatp, write_vsatp, 4222 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4223 4224 [CSR_MTVAL2] = { "mtval2", hmode, read_mtval2, write_mtval2, 4225 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4226 [CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst, 4227 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4228 4229 /* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */ 4230 [CSR_HVIEN] = { "hvien", aia_hmode, read_zero, write_ignore }, 4231 [CSR_HVICTL] = { "hvictl", aia_hmode, read_hvictl, 4232 write_hvictl }, 4233 [CSR_HVIPRIO1] = { "hviprio1", aia_hmode, read_hviprio1, 4234 write_hviprio1 }, 4235 [CSR_HVIPRIO2] = { "hviprio2", aia_hmode, read_hviprio2, 4236 write_hviprio2 }, 4237 4238 /* 4239 * VS-Level Window to Indirectly Accessed Registers (H-extension with AIA) 4240 */ 4241 [CSR_VSISELECT] = { "vsiselect", aia_hmode, NULL, NULL, 4242 rmw_xiselect }, 4243 [CSR_VSIREG] = { "vsireg", aia_hmode, NULL, NULL, rmw_xireg }, 4244 4245 /* VS-Level Interrupts (H-extension with AIA) */ 4246 [CSR_VSTOPEI] = { "vstopei", aia_hmode, NULL, NULL, rmw_xtopei }, 4247 [CSR_VSTOPI] = { "vstopi", aia_hmode, read_vstopi }, 4248 4249 /* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */ 4250 [CSR_HIDELEGH] = { "hidelegh", aia_hmode32, NULL, NULL, 4251 rmw_hidelegh }, 4252 [CSR_HVIENH] = { "hvienh", aia_hmode32, read_zero, 4253 write_ignore }, 4254 [CSR_HVIPH] = { "hviph", aia_hmode32, NULL, NULL, rmw_hviph }, 4255 [CSR_HVIPRIO1H] = { "hviprio1h", aia_hmode32, read_hviprio1h, 4256 write_hviprio1h }, 4257 [CSR_HVIPRIO2H] = { "hviprio2h", aia_hmode32, read_hviprio2h, 4258 write_hviprio2h }, 4259 [CSR_VSIEH] = { "vsieh", aia_hmode32, NULL, NULL, rmw_vsieh }, 4260 [CSR_VSIPH] = { "vsiph", aia_hmode32, NULL, NULL, rmw_vsiph }, 4261 4262 /* Physical Memory Protection */ 4263 [CSR_MSECCFG] = { "mseccfg", epmp, read_mseccfg, write_mseccfg, 4264 .min_priv_ver = PRIV_VERSION_1_11_0 }, 4265 [CSR_PMPCFG0] = { "pmpcfg0", pmp, read_pmpcfg, write_pmpcfg }, 4266 [CSR_PMPCFG1] = { "pmpcfg1", pmp, read_pmpcfg, write_pmpcfg }, 4267 [CSR_PMPCFG2] = { "pmpcfg2", pmp, read_pmpcfg, write_pmpcfg }, 4268 [CSR_PMPCFG3] = { "pmpcfg3", pmp, read_pmpcfg, write_pmpcfg }, 4269 [CSR_PMPADDR0] = { "pmpaddr0", pmp, read_pmpaddr, write_pmpaddr }, 4270 [CSR_PMPADDR1] = { "pmpaddr1", pmp, read_pmpaddr, write_pmpaddr }, 4271 [CSR_PMPADDR2] = { "pmpaddr2", pmp, read_pmpaddr, write_pmpaddr }, 4272 [CSR_PMPADDR3] = { "pmpaddr3", pmp, read_pmpaddr, write_pmpaddr }, 4273 [CSR_PMPADDR4] = { "pmpaddr4", pmp, read_pmpaddr, write_pmpaddr }, 4274 [CSR_PMPADDR5] = { "pmpaddr5", pmp, read_pmpaddr, write_pmpaddr }, 4275 [CSR_PMPADDR6] = { "pmpaddr6", pmp, read_pmpaddr, write_pmpaddr }, 4276 [CSR_PMPADDR7] = { "pmpaddr7", pmp, read_pmpaddr, write_pmpaddr }, 4277 [CSR_PMPADDR8] = { "pmpaddr8", pmp, read_pmpaddr, write_pmpaddr }, 4278 [CSR_PMPADDR9] = { "pmpaddr9", pmp, read_pmpaddr, write_pmpaddr }, 4279 [CSR_PMPADDR10] = { "pmpaddr10", pmp, read_pmpaddr, write_pmpaddr }, 4280 [CSR_PMPADDR11] = { "pmpaddr11", pmp, read_pmpaddr, write_pmpaddr }, 4281 [CSR_PMPADDR12] = { "pmpaddr12", pmp, read_pmpaddr, write_pmpaddr }, 4282 [CSR_PMPADDR13] = { "pmpaddr13", pmp, read_pmpaddr, write_pmpaddr }, 4283 [CSR_PMPADDR14] = { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr }, 4284 [CSR_PMPADDR15] = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr }, 4285 4286 /* Debug CSRs */ 4287 [CSR_TSELECT] = { "tselect", debug, read_tselect, write_tselect }, 4288 [CSR_TDATA1] = { "tdata1", debug, read_tdata, write_tdata }, 4289 [CSR_TDATA2] = { "tdata2", debug, read_tdata, write_tdata }, 4290 [CSR_TDATA3] = { "tdata3", debug, read_tdata, write_tdata }, 4291 [CSR_TINFO] = { "tinfo", debug, read_tinfo, write_ignore }, 4292 4293 /* User Pointer Masking */ 4294 [CSR_UMTE] = { "umte", pointer_masking, read_umte, write_umte }, 4295 [CSR_UPMMASK] = { "upmmask", pointer_masking, read_upmmask, 4296 write_upmmask }, 4297 [CSR_UPMBASE] = { "upmbase", pointer_masking, read_upmbase, 4298 write_upmbase }, 4299 /* Machine Pointer Masking */ 4300 [CSR_MMTE] = { "mmte", pointer_masking, read_mmte, write_mmte }, 4301 [CSR_MPMMASK] = { "mpmmask", pointer_masking, read_mpmmask, 4302 write_mpmmask }, 4303 [CSR_MPMBASE] = { "mpmbase", pointer_masking, read_mpmbase, 4304 write_mpmbase }, 4305 /* Supervisor Pointer Masking */ 4306 [CSR_SMTE] = { "smte", pointer_masking, read_smte, write_smte }, 4307 [CSR_SPMMASK] = { "spmmask", pointer_masking, read_spmmask, 4308 write_spmmask }, 4309 [CSR_SPMBASE] = { "spmbase", pointer_masking, read_spmbase, 4310 write_spmbase }, 4311 4312 /* Performance Counters */ 4313 [CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_hpmcounter }, 4314 [CSR_HPMCOUNTER4] = { "hpmcounter4", ctr, read_hpmcounter }, 4315 [CSR_HPMCOUNTER5] = { "hpmcounter5", ctr, read_hpmcounter }, 4316 [CSR_HPMCOUNTER6] = { "hpmcounter6", ctr, read_hpmcounter }, 4317 [CSR_HPMCOUNTER7] = { "hpmcounter7", ctr, read_hpmcounter }, 4318 [CSR_HPMCOUNTER8] = { "hpmcounter8", ctr, read_hpmcounter }, 4319 [CSR_HPMCOUNTER9] = { "hpmcounter9", ctr, read_hpmcounter }, 4320 [CSR_HPMCOUNTER10] = { "hpmcounter10", ctr, read_hpmcounter }, 4321 [CSR_HPMCOUNTER11] = { "hpmcounter11", ctr, read_hpmcounter }, 4322 [CSR_HPMCOUNTER12] = { "hpmcounter12", ctr, read_hpmcounter }, 4323 [CSR_HPMCOUNTER13] = { "hpmcounter13", ctr, read_hpmcounter }, 4324 [CSR_HPMCOUNTER14] = { "hpmcounter14", ctr, read_hpmcounter }, 4325 [CSR_HPMCOUNTER15] = { "hpmcounter15", ctr, read_hpmcounter }, 4326 [CSR_HPMCOUNTER16] = { "hpmcounter16", ctr, read_hpmcounter }, 4327 [CSR_HPMCOUNTER17] = { "hpmcounter17", ctr, read_hpmcounter }, 4328 [CSR_HPMCOUNTER18] = { "hpmcounter18", ctr, read_hpmcounter }, 4329 [CSR_HPMCOUNTER19] = { "hpmcounter19", ctr, read_hpmcounter }, 4330 [CSR_HPMCOUNTER20] = { "hpmcounter20", ctr, read_hpmcounter }, 4331 [CSR_HPMCOUNTER21] = { "hpmcounter21", ctr, read_hpmcounter }, 4332 [CSR_HPMCOUNTER22] = { "hpmcounter22", ctr, read_hpmcounter }, 4333 [CSR_HPMCOUNTER23] = { "hpmcounter23", ctr, read_hpmcounter }, 4334 [CSR_HPMCOUNTER24] = { "hpmcounter24", ctr, read_hpmcounter }, 4335 [CSR_HPMCOUNTER25] = { "hpmcounter25", ctr, read_hpmcounter }, 4336 [CSR_HPMCOUNTER26] = { "hpmcounter26", ctr, read_hpmcounter }, 4337 [CSR_HPMCOUNTER27] = { "hpmcounter27", ctr, read_hpmcounter }, 4338 [CSR_HPMCOUNTER28] = { "hpmcounter28", ctr, read_hpmcounter }, 4339 [CSR_HPMCOUNTER29] = { "hpmcounter29", ctr, read_hpmcounter }, 4340 [CSR_HPMCOUNTER30] = { "hpmcounter30", ctr, read_hpmcounter }, 4341 [CSR_HPMCOUNTER31] = { "hpmcounter31", ctr, read_hpmcounter }, 4342 4343 [CSR_MHPMCOUNTER3] = { "mhpmcounter3", mctr, read_hpmcounter, 4344 write_mhpmcounter }, 4345 [CSR_MHPMCOUNTER4] = { "mhpmcounter4", mctr, read_hpmcounter, 4346 write_mhpmcounter }, 4347 [CSR_MHPMCOUNTER5] = { "mhpmcounter5", mctr, read_hpmcounter, 4348 write_mhpmcounter }, 4349 [CSR_MHPMCOUNTER6] = { "mhpmcounter6", mctr, read_hpmcounter, 4350 write_mhpmcounter }, 4351 [CSR_MHPMCOUNTER7] = { "mhpmcounter7", mctr, read_hpmcounter, 4352 write_mhpmcounter }, 4353 [CSR_MHPMCOUNTER8] = { "mhpmcounter8", mctr, read_hpmcounter, 4354 write_mhpmcounter }, 4355 [CSR_MHPMCOUNTER9] = { "mhpmcounter9", mctr, read_hpmcounter, 4356 write_mhpmcounter }, 4357 [CSR_MHPMCOUNTER10] = { "mhpmcounter10", mctr, read_hpmcounter, 4358 write_mhpmcounter }, 4359 [CSR_MHPMCOUNTER11] = { "mhpmcounter11", mctr, read_hpmcounter, 4360 write_mhpmcounter }, 4361 [CSR_MHPMCOUNTER12] = { "mhpmcounter12", mctr, read_hpmcounter, 4362 write_mhpmcounter }, 4363 [CSR_MHPMCOUNTER13] = { "mhpmcounter13", mctr, read_hpmcounter, 4364 write_mhpmcounter }, 4365 [CSR_MHPMCOUNTER14] = { "mhpmcounter14", mctr, read_hpmcounter, 4366 write_mhpmcounter }, 4367 [CSR_MHPMCOUNTER15] = { "mhpmcounter15", mctr, read_hpmcounter, 4368 write_mhpmcounter }, 4369 [CSR_MHPMCOUNTER16] = { "mhpmcounter16", mctr, read_hpmcounter, 4370 write_mhpmcounter }, 4371 [CSR_MHPMCOUNTER17] = { "mhpmcounter17", mctr, read_hpmcounter, 4372 write_mhpmcounter }, 4373 [CSR_MHPMCOUNTER18] = { "mhpmcounter18", mctr, read_hpmcounter, 4374 write_mhpmcounter }, 4375 [CSR_MHPMCOUNTER19] = { "mhpmcounter19", mctr, read_hpmcounter, 4376 write_mhpmcounter }, 4377 [CSR_MHPMCOUNTER20] = { "mhpmcounter20", mctr, read_hpmcounter, 4378 write_mhpmcounter }, 4379 [CSR_MHPMCOUNTER21] = { "mhpmcounter21", mctr, read_hpmcounter, 4380 write_mhpmcounter }, 4381 [CSR_MHPMCOUNTER22] = { "mhpmcounter22", mctr, read_hpmcounter, 4382 write_mhpmcounter }, 4383 [CSR_MHPMCOUNTER23] = { "mhpmcounter23", mctr, read_hpmcounter, 4384 write_mhpmcounter }, 4385 [CSR_MHPMCOUNTER24] = { "mhpmcounter24", mctr, read_hpmcounter, 4386 write_mhpmcounter }, 4387 [CSR_MHPMCOUNTER25] = { "mhpmcounter25", mctr, read_hpmcounter, 4388 write_mhpmcounter }, 4389 [CSR_MHPMCOUNTER26] = { "mhpmcounter26", mctr, read_hpmcounter, 4390 write_mhpmcounter }, 4391 [CSR_MHPMCOUNTER27] = { "mhpmcounter27", mctr, read_hpmcounter, 4392 write_mhpmcounter }, 4393 [CSR_MHPMCOUNTER28] = { "mhpmcounter28", mctr, read_hpmcounter, 4394 write_mhpmcounter }, 4395 [CSR_MHPMCOUNTER29] = { "mhpmcounter29", mctr, read_hpmcounter, 4396 write_mhpmcounter }, 4397 [CSR_MHPMCOUNTER30] = { "mhpmcounter30", mctr, read_hpmcounter, 4398 write_mhpmcounter }, 4399 [CSR_MHPMCOUNTER31] = { "mhpmcounter31", mctr, read_hpmcounter, 4400 write_mhpmcounter }, 4401 4402 [CSR_MCOUNTINHIBIT] = { "mcountinhibit", any, read_mcountinhibit, 4403 write_mcountinhibit, 4404 .min_priv_ver = PRIV_VERSION_1_11_0 }, 4405 4406 [CSR_MHPMEVENT3] = { "mhpmevent3", any, read_mhpmevent, 4407 write_mhpmevent }, 4408 [CSR_MHPMEVENT4] = { "mhpmevent4", any, read_mhpmevent, 4409 write_mhpmevent }, 4410 [CSR_MHPMEVENT5] = { "mhpmevent5", any, read_mhpmevent, 4411 write_mhpmevent }, 4412 [CSR_MHPMEVENT6] = { "mhpmevent6", any, read_mhpmevent, 4413 write_mhpmevent }, 4414 [CSR_MHPMEVENT7] = { "mhpmevent7", any, read_mhpmevent, 4415 write_mhpmevent }, 4416 [CSR_MHPMEVENT8] = { "mhpmevent8", any, read_mhpmevent, 4417 write_mhpmevent }, 4418 [CSR_MHPMEVENT9] = { "mhpmevent9", any, read_mhpmevent, 4419 write_mhpmevent }, 4420 [CSR_MHPMEVENT10] = { "mhpmevent10", any, read_mhpmevent, 4421 write_mhpmevent }, 4422 [CSR_MHPMEVENT11] = { "mhpmevent11", any, read_mhpmevent, 4423 write_mhpmevent }, 4424 [CSR_MHPMEVENT12] = { "mhpmevent12", any, read_mhpmevent, 4425 write_mhpmevent }, 4426 [CSR_MHPMEVENT13] = { "mhpmevent13", any, read_mhpmevent, 4427 write_mhpmevent }, 4428 [CSR_MHPMEVENT14] = { "mhpmevent14", any, read_mhpmevent, 4429 write_mhpmevent }, 4430 [CSR_MHPMEVENT15] = { "mhpmevent15", any, read_mhpmevent, 4431 write_mhpmevent }, 4432 [CSR_MHPMEVENT16] = { "mhpmevent16", any, read_mhpmevent, 4433 write_mhpmevent }, 4434 [CSR_MHPMEVENT17] = { "mhpmevent17", any, read_mhpmevent, 4435 write_mhpmevent }, 4436 [CSR_MHPMEVENT18] = { "mhpmevent18", any, read_mhpmevent, 4437 write_mhpmevent }, 4438 [CSR_MHPMEVENT19] = { "mhpmevent19", any, read_mhpmevent, 4439 write_mhpmevent }, 4440 [CSR_MHPMEVENT20] = { "mhpmevent20", any, read_mhpmevent, 4441 write_mhpmevent }, 4442 [CSR_MHPMEVENT21] = { "mhpmevent21", any, read_mhpmevent, 4443 write_mhpmevent }, 4444 [CSR_MHPMEVENT22] = { "mhpmevent22", any, read_mhpmevent, 4445 write_mhpmevent }, 4446 [CSR_MHPMEVENT23] = { "mhpmevent23", any, read_mhpmevent, 4447 write_mhpmevent }, 4448 [CSR_MHPMEVENT24] = { "mhpmevent24", any, read_mhpmevent, 4449 write_mhpmevent }, 4450 [CSR_MHPMEVENT25] = { "mhpmevent25", any, read_mhpmevent, 4451 write_mhpmevent }, 4452 [CSR_MHPMEVENT26] = { "mhpmevent26", any, read_mhpmevent, 4453 write_mhpmevent }, 4454 [CSR_MHPMEVENT27] = { "mhpmevent27", any, read_mhpmevent, 4455 write_mhpmevent }, 4456 [CSR_MHPMEVENT28] = { "mhpmevent28", any, read_mhpmevent, 4457 write_mhpmevent }, 4458 [CSR_MHPMEVENT29] = { "mhpmevent29", any, read_mhpmevent, 4459 write_mhpmevent }, 4460 [CSR_MHPMEVENT30] = { "mhpmevent30", any, read_mhpmevent, 4461 write_mhpmevent }, 4462 [CSR_MHPMEVENT31] = { "mhpmevent31", any, read_mhpmevent, 4463 write_mhpmevent }, 4464 4465 [CSR_MHPMEVENT3H] = { "mhpmevent3h", sscofpmf, read_mhpmeventh, 4466 write_mhpmeventh, 4467 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4468 [CSR_MHPMEVENT4H] = { "mhpmevent4h", sscofpmf, read_mhpmeventh, 4469 write_mhpmeventh, 4470 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4471 [CSR_MHPMEVENT5H] = { "mhpmevent5h", sscofpmf, read_mhpmeventh, 4472 write_mhpmeventh, 4473 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4474 [CSR_MHPMEVENT6H] = { "mhpmevent6h", sscofpmf, read_mhpmeventh, 4475 write_mhpmeventh, 4476 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4477 [CSR_MHPMEVENT7H] = { "mhpmevent7h", sscofpmf, read_mhpmeventh, 4478 write_mhpmeventh, 4479 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4480 [CSR_MHPMEVENT8H] = { "mhpmevent8h", sscofpmf, read_mhpmeventh, 4481 write_mhpmeventh, 4482 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4483 [CSR_MHPMEVENT9H] = { "mhpmevent9h", sscofpmf, read_mhpmeventh, 4484 write_mhpmeventh, 4485 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4486 [CSR_MHPMEVENT10H] = { "mhpmevent10h", sscofpmf, read_mhpmeventh, 4487 write_mhpmeventh, 4488 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4489 [CSR_MHPMEVENT11H] = { "mhpmevent11h", sscofpmf, read_mhpmeventh, 4490 write_mhpmeventh, 4491 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4492 [CSR_MHPMEVENT12H] = { "mhpmevent12h", sscofpmf, read_mhpmeventh, 4493 write_mhpmeventh, 4494 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4495 [CSR_MHPMEVENT13H] = { "mhpmevent13h", sscofpmf, read_mhpmeventh, 4496 write_mhpmeventh, 4497 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4498 [CSR_MHPMEVENT14H] = { "mhpmevent14h", sscofpmf, read_mhpmeventh, 4499 write_mhpmeventh, 4500 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4501 [CSR_MHPMEVENT15H] = { "mhpmevent15h", sscofpmf, read_mhpmeventh, 4502 write_mhpmeventh, 4503 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4504 [CSR_MHPMEVENT16H] = { "mhpmevent16h", sscofpmf, read_mhpmeventh, 4505 write_mhpmeventh, 4506 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4507 [CSR_MHPMEVENT17H] = { "mhpmevent17h", sscofpmf, read_mhpmeventh, 4508 write_mhpmeventh, 4509 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4510 [CSR_MHPMEVENT18H] = { "mhpmevent18h", sscofpmf, read_mhpmeventh, 4511 write_mhpmeventh, 4512 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4513 [CSR_MHPMEVENT19H] = { "mhpmevent19h", sscofpmf, read_mhpmeventh, 4514 write_mhpmeventh, 4515 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4516 [CSR_MHPMEVENT20H] = { "mhpmevent20h", sscofpmf, read_mhpmeventh, 4517 write_mhpmeventh, 4518 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4519 [CSR_MHPMEVENT21H] = { "mhpmevent21h", sscofpmf, read_mhpmeventh, 4520 write_mhpmeventh, 4521 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4522 [CSR_MHPMEVENT22H] = { "mhpmevent22h", sscofpmf, read_mhpmeventh, 4523 write_mhpmeventh, 4524 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4525 [CSR_MHPMEVENT23H] = { "mhpmevent23h", sscofpmf, read_mhpmeventh, 4526 write_mhpmeventh, 4527 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4528 [CSR_MHPMEVENT24H] = { "mhpmevent24h", sscofpmf, read_mhpmeventh, 4529 write_mhpmeventh, 4530 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4531 [CSR_MHPMEVENT25H] = { "mhpmevent25h", sscofpmf, read_mhpmeventh, 4532 write_mhpmeventh, 4533 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4534 [CSR_MHPMEVENT26H] = { "mhpmevent26h", sscofpmf, read_mhpmeventh, 4535 write_mhpmeventh, 4536 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4537 [CSR_MHPMEVENT27H] = { "mhpmevent27h", sscofpmf, read_mhpmeventh, 4538 write_mhpmeventh, 4539 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4540 [CSR_MHPMEVENT28H] = { "mhpmevent28h", sscofpmf, read_mhpmeventh, 4541 write_mhpmeventh, 4542 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4543 [CSR_MHPMEVENT29H] = { "mhpmevent29h", sscofpmf, read_mhpmeventh, 4544 write_mhpmeventh, 4545 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4546 [CSR_MHPMEVENT30H] = { "mhpmevent30h", sscofpmf, read_mhpmeventh, 4547 write_mhpmeventh, 4548 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4549 [CSR_MHPMEVENT31H] = { "mhpmevent31h", sscofpmf, read_mhpmeventh, 4550 write_mhpmeventh, 4551 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4552 4553 [CSR_HPMCOUNTER3H] = { "hpmcounter3h", ctr32, read_hpmcounterh }, 4554 [CSR_HPMCOUNTER4H] = { "hpmcounter4h", ctr32, read_hpmcounterh }, 4555 [CSR_HPMCOUNTER5H] = { "hpmcounter5h", ctr32, read_hpmcounterh }, 4556 [CSR_HPMCOUNTER6H] = { "hpmcounter6h", ctr32, read_hpmcounterh }, 4557 [CSR_HPMCOUNTER7H] = { "hpmcounter7h", ctr32, read_hpmcounterh }, 4558 [CSR_HPMCOUNTER8H] = { "hpmcounter8h", ctr32, read_hpmcounterh }, 4559 [CSR_HPMCOUNTER9H] = { "hpmcounter9h", ctr32, read_hpmcounterh }, 4560 [CSR_HPMCOUNTER10H] = { "hpmcounter10h", ctr32, read_hpmcounterh }, 4561 [CSR_HPMCOUNTER11H] = { "hpmcounter11h", ctr32, read_hpmcounterh }, 4562 [CSR_HPMCOUNTER12H] = { "hpmcounter12h", ctr32, read_hpmcounterh }, 4563 [CSR_HPMCOUNTER13H] = { "hpmcounter13h", ctr32, read_hpmcounterh }, 4564 [CSR_HPMCOUNTER14H] = { "hpmcounter14h", ctr32, read_hpmcounterh }, 4565 [CSR_HPMCOUNTER15H] = { "hpmcounter15h", ctr32, read_hpmcounterh }, 4566 [CSR_HPMCOUNTER16H] = { "hpmcounter16h", ctr32, read_hpmcounterh }, 4567 [CSR_HPMCOUNTER17H] = { "hpmcounter17h", ctr32, read_hpmcounterh }, 4568 [CSR_HPMCOUNTER18H] = { "hpmcounter18h", ctr32, read_hpmcounterh }, 4569 [CSR_HPMCOUNTER19H] = { "hpmcounter19h", ctr32, read_hpmcounterh }, 4570 [CSR_HPMCOUNTER20H] = { "hpmcounter20h", ctr32, read_hpmcounterh }, 4571 [CSR_HPMCOUNTER21H] = { "hpmcounter21h", ctr32, read_hpmcounterh }, 4572 [CSR_HPMCOUNTER22H] = { "hpmcounter22h", ctr32, read_hpmcounterh }, 4573 [CSR_HPMCOUNTER23H] = { "hpmcounter23h", ctr32, read_hpmcounterh }, 4574 [CSR_HPMCOUNTER24H] = { "hpmcounter24h", ctr32, read_hpmcounterh }, 4575 [CSR_HPMCOUNTER25H] = { "hpmcounter25h", ctr32, read_hpmcounterh }, 4576 [CSR_HPMCOUNTER26H] = { "hpmcounter26h", ctr32, read_hpmcounterh }, 4577 [CSR_HPMCOUNTER27H] = { "hpmcounter27h", ctr32, read_hpmcounterh }, 4578 [CSR_HPMCOUNTER28H] = { "hpmcounter28h", ctr32, read_hpmcounterh }, 4579 [CSR_HPMCOUNTER29H] = { "hpmcounter29h", ctr32, read_hpmcounterh }, 4580 [CSR_HPMCOUNTER30H] = { "hpmcounter30h", ctr32, read_hpmcounterh }, 4581 [CSR_HPMCOUNTER31H] = { "hpmcounter31h", ctr32, read_hpmcounterh }, 4582 4583 [CSR_MHPMCOUNTER3H] = { "mhpmcounter3h", mctr32, read_hpmcounterh, 4584 write_mhpmcounterh }, 4585 [CSR_MHPMCOUNTER4H] = { "mhpmcounter4h", mctr32, read_hpmcounterh, 4586 write_mhpmcounterh }, 4587 [CSR_MHPMCOUNTER5H] = { "mhpmcounter5h", mctr32, read_hpmcounterh, 4588 write_mhpmcounterh }, 4589 [CSR_MHPMCOUNTER6H] = { "mhpmcounter6h", mctr32, read_hpmcounterh, 4590 write_mhpmcounterh }, 4591 [CSR_MHPMCOUNTER7H] = { "mhpmcounter7h", mctr32, read_hpmcounterh, 4592 write_mhpmcounterh }, 4593 [CSR_MHPMCOUNTER8H] = { "mhpmcounter8h", mctr32, read_hpmcounterh, 4594 write_mhpmcounterh }, 4595 [CSR_MHPMCOUNTER9H] = { "mhpmcounter9h", mctr32, read_hpmcounterh, 4596 write_mhpmcounterh }, 4597 [CSR_MHPMCOUNTER10H] = { "mhpmcounter10h", mctr32, read_hpmcounterh, 4598 write_mhpmcounterh }, 4599 [CSR_MHPMCOUNTER11H] = { "mhpmcounter11h", mctr32, read_hpmcounterh, 4600 write_mhpmcounterh }, 4601 [CSR_MHPMCOUNTER12H] = { "mhpmcounter12h", mctr32, read_hpmcounterh, 4602 write_mhpmcounterh }, 4603 [CSR_MHPMCOUNTER13H] = { "mhpmcounter13h", mctr32, read_hpmcounterh, 4604 write_mhpmcounterh }, 4605 [CSR_MHPMCOUNTER14H] = { "mhpmcounter14h", mctr32, read_hpmcounterh, 4606 write_mhpmcounterh }, 4607 [CSR_MHPMCOUNTER15H] = { "mhpmcounter15h", mctr32, read_hpmcounterh, 4608 write_mhpmcounterh }, 4609 [CSR_MHPMCOUNTER16H] = { "mhpmcounter16h", mctr32, read_hpmcounterh, 4610 write_mhpmcounterh }, 4611 [CSR_MHPMCOUNTER17H] = { "mhpmcounter17h", mctr32, read_hpmcounterh, 4612 write_mhpmcounterh }, 4613 [CSR_MHPMCOUNTER18H] = { "mhpmcounter18h", mctr32, read_hpmcounterh, 4614 write_mhpmcounterh }, 4615 [CSR_MHPMCOUNTER19H] = { "mhpmcounter19h", mctr32, read_hpmcounterh, 4616 write_mhpmcounterh }, 4617 [CSR_MHPMCOUNTER20H] = { "mhpmcounter20h", mctr32, read_hpmcounterh, 4618 write_mhpmcounterh }, 4619 [CSR_MHPMCOUNTER21H] = { "mhpmcounter21h", mctr32, read_hpmcounterh, 4620 write_mhpmcounterh }, 4621 [CSR_MHPMCOUNTER22H] = { "mhpmcounter22h", mctr32, read_hpmcounterh, 4622 write_mhpmcounterh }, 4623 [CSR_MHPMCOUNTER23H] = { "mhpmcounter23h", mctr32, read_hpmcounterh, 4624 write_mhpmcounterh }, 4625 [CSR_MHPMCOUNTER24H] = { "mhpmcounter24h", mctr32, read_hpmcounterh, 4626 write_mhpmcounterh }, 4627 [CSR_MHPMCOUNTER25H] = { "mhpmcounter25h", mctr32, read_hpmcounterh, 4628 write_mhpmcounterh }, 4629 [CSR_MHPMCOUNTER26H] = { "mhpmcounter26h", mctr32, read_hpmcounterh, 4630 write_mhpmcounterh }, 4631 [CSR_MHPMCOUNTER27H] = { "mhpmcounter27h", mctr32, read_hpmcounterh, 4632 write_mhpmcounterh }, 4633 [CSR_MHPMCOUNTER28H] = { "mhpmcounter28h", mctr32, read_hpmcounterh, 4634 write_mhpmcounterh }, 4635 [CSR_MHPMCOUNTER29H] = { "mhpmcounter29h", mctr32, read_hpmcounterh, 4636 write_mhpmcounterh }, 4637 [CSR_MHPMCOUNTER30H] = { "mhpmcounter30h", mctr32, read_hpmcounterh, 4638 write_mhpmcounterh }, 4639 [CSR_MHPMCOUNTER31H] = { "mhpmcounter31h", mctr32, read_hpmcounterh, 4640 write_mhpmcounterh }, 4641 [CSR_SCOUNTOVF] = { "scountovf", sscofpmf, read_scountovf, 4642 .min_priv_ver = PRIV_VERSION_1_12_0 }, 4643 4644 #endif /* !CONFIG_USER_ONLY */ 4645 }; 4646