1 /* 2 * RISC-V Control and Status Registers. 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/log.h" 22 #include "qemu/timer.h" 23 #include "cpu.h" 24 #include "pmu.h" 25 #include "time_helper.h" 26 #include "qemu/main-loop.h" 27 #include "exec/exec-all.h" 28 #include "sysemu/cpu-timers.h" 29 #include "qemu/guest-random.h" 30 #include "qapi/error.h" 31 32 /* CSR function table public API */ 33 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops) 34 { 35 *ops = csr_ops[csrno & (CSR_TABLE_SIZE - 1)]; 36 } 37 38 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops) 39 { 40 csr_ops[csrno & (CSR_TABLE_SIZE - 1)] = *ops; 41 } 42 43 /* Predicates */ 44 static RISCVException fs(CPURISCVState *env, int csrno) 45 { 46 #if !defined(CONFIG_USER_ONLY) 47 if (!env->debugger && !riscv_cpu_fp_enabled(env) && 48 !RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) { 49 return RISCV_EXCP_ILLEGAL_INST; 50 } 51 #endif 52 return RISCV_EXCP_NONE; 53 } 54 55 static RISCVException vs(CPURISCVState *env, int csrno) 56 { 57 CPUState *cs = env_cpu(env); 58 RISCVCPU *cpu = RISCV_CPU(cs); 59 60 if (env->misa_ext & RVV || 61 cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) { 62 #if !defined(CONFIG_USER_ONLY) 63 if (!env->debugger && !riscv_cpu_vector_enabled(env)) { 64 return RISCV_EXCP_ILLEGAL_INST; 65 } 66 #endif 67 return RISCV_EXCP_NONE; 68 } 69 return RISCV_EXCP_ILLEGAL_INST; 70 } 71 72 static RISCVException ctr(CPURISCVState *env, int csrno) 73 { 74 #if !defined(CONFIG_USER_ONLY) 75 CPUState *cs = env_cpu(env); 76 RISCVCPU *cpu = RISCV_CPU(cs); 77 int ctr_index; 78 int base_csrno = CSR_CYCLE; 79 bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false; 80 81 if (rv32 && csrno >= CSR_CYCLEH) { 82 /* Offset for RV32 hpmcounternh counters */ 83 base_csrno += 0x80; 84 } 85 ctr_index = csrno - base_csrno; 86 87 if ((csrno >= CSR_CYCLE && csrno <= CSR_INSTRET) || 88 (csrno >= CSR_CYCLEH && csrno <= CSR_INSTRETH)) { 89 goto skip_ext_pmu_check; 90 } 91 92 if ((!cpu->cfg.pmu_num || !(cpu->pmu_avail_ctrs & BIT(ctr_index)))) { 93 /* No counter is enabled in PMU or the counter is out of range */ 94 return RISCV_EXCP_ILLEGAL_INST; 95 } 96 97 skip_ext_pmu_check: 98 99 if (env->priv == PRV_S) { 100 switch (csrno) { 101 case CSR_CYCLE: 102 if (!get_field(env->mcounteren, COUNTEREN_CY)) { 103 return RISCV_EXCP_ILLEGAL_INST; 104 } 105 break; 106 case CSR_TIME: 107 if (!get_field(env->mcounteren, COUNTEREN_TM)) { 108 return RISCV_EXCP_ILLEGAL_INST; 109 } 110 break; 111 case CSR_INSTRET: 112 if (!get_field(env->mcounteren, COUNTEREN_IR)) { 113 return RISCV_EXCP_ILLEGAL_INST; 114 } 115 break; 116 case CSR_HPMCOUNTER3...CSR_HPMCOUNTER31: 117 if (!get_field(env->mcounteren, 1 << ctr_index)) { 118 return RISCV_EXCP_ILLEGAL_INST; 119 } 120 break; 121 } 122 if (rv32) { 123 switch (csrno) { 124 case CSR_CYCLEH: 125 if (!get_field(env->mcounteren, COUNTEREN_CY)) { 126 return RISCV_EXCP_ILLEGAL_INST; 127 } 128 break; 129 case CSR_TIMEH: 130 if (!get_field(env->mcounteren, COUNTEREN_TM)) { 131 return RISCV_EXCP_ILLEGAL_INST; 132 } 133 break; 134 case CSR_INSTRETH: 135 if (!get_field(env->mcounteren, COUNTEREN_IR)) { 136 return RISCV_EXCP_ILLEGAL_INST; 137 } 138 break; 139 case CSR_HPMCOUNTER3H...CSR_HPMCOUNTER31H: 140 if (!get_field(env->mcounteren, 1 << ctr_index)) { 141 return RISCV_EXCP_ILLEGAL_INST; 142 } 143 break; 144 } 145 } 146 } 147 148 if (riscv_cpu_virt_enabled(env)) { 149 switch (csrno) { 150 case CSR_CYCLE: 151 if (!get_field(env->hcounteren, COUNTEREN_CY) && 152 get_field(env->mcounteren, COUNTEREN_CY)) { 153 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 154 } 155 break; 156 case CSR_TIME: 157 if (!get_field(env->hcounteren, COUNTEREN_TM) && 158 get_field(env->mcounteren, COUNTEREN_TM)) { 159 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 160 } 161 break; 162 case CSR_INSTRET: 163 if (!get_field(env->hcounteren, COUNTEREN_IR) && 164 get_field(env->mcounteren, COUNTEREN_IR)) { 165 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 166 } 167 break; 168 case CSR_HPMCOUNTER3...CSR_HPMCOUNTER31: 169 if (!get_field(env->hcounteren, 1 << ctr_index) && 170 get_field(env->mcounteren, 1 << ctr_index)) { 171 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 172 } 173 break; 174 } 175 if (rv32) { 176 switch (csrno) { 177 case CSR_CYCLEH: 178 if (!get_field(env->hcounteren, COUNTEREN_CY) && 179 get_field(env->mcounteren, COUNTEREN_CY)) { 180 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 181 } 182 break; 183 case CSR_TIMEH: 184 if (!get_field(env->hcounteren, COUNTEREN_TM) && 185 get_field(env->mcounteren, COUNTEREN_TM)) { 186 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 187 } 188 break; 189 case CSR_INSTRETH: 190 if (!get_field(env->hcounteren, COUNTEREN_IR) && 191 get_field(env->mcounteren, COUNTEREN_IR)) { 192 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 193 } 194 break; 195 case CSR_HPMCOUNTER3H...CSR_HPMCOUNTER31H: 196 if (!get_field(env->hcounteren, 1 << ctr_index) && 197 get_field(env->mcounteren, 1 << ctr_index)) { 198 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 199 } 200 break; 201 } 202 } 203 } 204 #endif 205 return RISCV_EXCP_NONE; 206 } 207 208 static RISCVException ctr32(CPURISCVState *env, int csrno) 209 { 210 if (riscv_cpu_mxl(env) != MXL_RV32) { 211 return RISCV_EXCP_ILLEGAL_INST; 212 } 213 214 return ctr(env, csrno); 215 } 216 217 #if !defined(CONFIG_USER_ONLY) 218 static RISCVException mctr(CPURISCVState *env, int csrno) 219 { 220 CPUState *cs = env_cpu(env); 221 RISCVCPU *cpu = RISCV_CPU(cs); 222 int ctr_index; 223 int base_csrno = CSR_MHPMCOUNTER3; 224 225 if ((riscv_cpu_mxl(env) == MXL_RV32) && csrno >= CSR_MCYCLEH) { 226 /* Offset for RV32 mhpmcounternh counters */ 227 base_csrno += 0x80; 228 } 229 ctr_index = csrno - base_csrno; 230 if (!cpu->cfg.pmu_num || ctr_index >= cpu->cfg.pmu_num) { 231 /* The PMU is not enabled or counter is out of range*/ 232 return RISCV_EXCP_ILLEGAL_INST; 233 } 234 235 return RISCV_EXCP_NONE; 236 } 237 238 static RISCVException mctr32(CPURISCVState *env, int csrno) 239 { 240 if (riscv_cpu_mxl(env) != MXL_RV32) { 241 return RISCV_EXCP_ILLEGAL_INST; 242 } 243 244 return mctr(env, csrno); 245 } 246 247 static RISCVException sscofpmf(CPURISCVState *env, int csrno) 248 { 249 CPUState *cs = env_cpu(env); 250 RISCVCPU *cpu = RISCV_CPU(cs); 251 252 if (!cpu->cfg.ext_sscofpmf) { 253 return RISCV_EXCP_ILLEGAL_INST; 254 } 255 256 return RISCV_EXCP_NONE; 257 } 258 259 static RISCVException any(CPURISCVState *env, int csrno) 260 { 261 return RISCV_EXCP_NONE; 262 } 263 264 static RISCVException any32(CPURISCVState *env, int csrno) 265 { 266 if (riscv_cpu_mxl(env) != MXL_RV32) { 267 return RISCV_EXCP_ILLEGAL_INST; 268 } 269 270 return any(env, csrno); 271 272 } 273 274 static int aia_any(CPURISCVState *env, int csrno) 275 { 276 RISCVCPU *cpu = env_archcpu(env); 277 278 if (!cpu->cfg.ext_smaia) { 279 return RISCV_EXCP_ILLEGAL_INST; 280 } 281 282 return any(env, csrno); 283 } 284 285 static int aia_any32(CPURISCVState *env, int csrno) 286 { 287 RISCVCPU *cpu = env_archcpu(env); 288 289 if (!cpu->cfg.ext_smaia) { 290 return RISCV_EXCP_ILLEGAL_INST; 291 } 292 293 return any32(env, csrno); 294 } 295 296 static RISCVException smode(CPURISCVState *env, int csrno) 297 { 298 if (riscv_has_ext(env, RVS)) { 299 return RISCV_EXCP_NONE; 300 } 301 302 return RISCV_EXCP_ILLEGAL_INST; 303 } 304 305 static int smode32(CPURISCVState *env, int csrno) 306 { 307 if (riscv_cpu_mxl(env) != MXL_RV32) { 308 return RISCV_EXCP_ILLEGAL_INST; 309 } 310 311 return smode(env, csrno); 312 } 313 314 static int aia_smode(CPURISCVState *env, int csrno) 315 { 316 RISCVCPU *cpu = env_archcpu(env); 317 318 if (!cpu->cfg.ext_ssaia) { 319 return RISCV_EXCP_ILLEGAL_INST; 320 } 321 322 return smode(env, csrno); 323 } 324 325 static int aia_smode32(CPURISCVState *env, int csrno) 326 { 327 RISCVCPU *cpu = env_archcpu(env); 328 329 if (!cpu->cfg.ext_ssaia) { 330 return RISCV_EXCP_ILLEGAL_INST; 331 } 332 333 return smode32(env, csrno); 334 } 335 336 static RISCVException hmode(CPURISCVState *env, int csrno) 337 { 338 if (riscv_has_ext(env, RVH)) { 339 return RISCV_EXCP_NONE; 340 } 341 342 return RISCV_EXCP_ILLEGAL_INST; 343 } 344 345 static RISCVException hmode32(CPURISCVState *env, int csrno) 346 { 347 if (riscv_cpu_mxl(env) != MXL_RV32) { 348 return RISCV_EXCP_ILLEGAL_INST; 349 } 350 351 return hmode(env, csrno); 352 353 } 354 355 static RISCVException umode(CPURISCVState *env, int csrno) 356 { 357 if (riscv_has_ext(env, RVU)) { 358 return RISCV_EXCP_NONE; 359 } 360 361 return RISCV_EXCP_ILLEGAL_INST; 362 } 363 364 static RISCVException umode32(CPURISCVState *env, int csrno) 365 { 366 if (riscv_cpu_mxl(env) != MXL_RV32) { 367 return RISCV_EXCP_ILLEGAL_INST; 368 } 369 370 return umode(env, csrno); 371 } 372 373 /* Checks if PointerMasking registers could be accessed */ 374 static RISCVException pointer_masking(CPURISCVState *env, int csrno) 375 { 376 /* Check if j-ext is present */ 377 if (riscv_has_ext(env, RVJ)) { 378 return RISCV_EXCP_NONE; 379 } 380 return RISCV_EXCP_ILLEGAL_INST; 381 } 382 383 static int aia_hmode(CPURISCVState *env, int csrno) 384 { 385 RISCVCPU *cpu = env_archcpu(env); 386 387 if (!cpu->cfg.ext_ssaia) { 388 return RISCV_EXCP_ILLEGAL_INST; 389 } 390 391 return hmode(env, csrno); 392 } 393 394 static int aia_hmode32(CPURISCVState *env, int csrno) 395 { 396 RISCVCPU *cpu = env_archcpu(env); 397 398 if (!cpu->cfg.ext_ssaia) { 399 return RISCV_EXCP_ILLEGAL_INST; 400 } 401 402 return hmode32(env, csrno); 403 } 404 405 static RISCVException pmp(CPURISCVState *env, int csrno) 406 { 407 if (riscv_feature(env, RISCV_FEATURE_PMP)) { 408 return RISCV_EXCP_NONE; 409 } 410 411 return RISCV_EXCP_ILLEGAL_INST; 412 } 413 414 static RISCVException epmp(CPURISCVState *env, int csrno) 415 { 416 if (env->priv == PRV_M && riscv_feature(env, RISCV_FEATURE_EPMP)) { 417 return RISCV_EXCP_NONE; 418 } 419 420 return RISCV_EXCP_ILLEGAL_INST; 421 } 422 423 static RISCVException debug(CPURISCVState *env, int csrno) 424 { 425 if (riscv_feature(env, RISCV_FEATURE_DEBUG)) { 426 return RISCV_EXCP_NONE; 427 } 428 429 return RISCV_EXCP_ILLEGAL_INST; 430 } 431 #endif 432 433 static RISCVException seed(CPURISCVState *env, int csrno) 434 { 435 RISCVCPU *cpu = env_archcpu(env); 436 437 if (!cpu->cfg.ext_zkr) { 438 return RISCV_EXCP_ILLEGAL_INST; 439 } 440 441 #if !defined(CONFIG_USER_ONLY) 442 /* 443 * With a CSR read-write instruction: 444 * 1) The seed CSR is always available in machine mode as normal. 445 * 2) Attempted access to seed from virtual modes VS and VU always raises 446 * an exception(virtual instruction exception only if mseccfg.sseed=1). 447 * 3) Without the corresponding access control bit set to 1, any attempted 448 * access to seed from U, S or HS modes will raise an illegal instruction 449 * exception. 450 */ 451 if (env->priv == PRV_M) { 452 return RISCV_EXCP_NONE; 453 } else if (riscv_cpu_virt_enabled(env)) { 454 if (env->mseccfg & MSECCFG_SSEED) { 455 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 456 } else { 457 return RISCV_EXCP_ILLEGAL_INST; 458 } 459 } else { 460 if (env->priv == PRV_S && (env->mseccfg & MSECCFG_SSEED)) { 461 return RISCV_EXCP_NONE; 462 } else if (env->priv == PRV_U && (env->mseccfg & MSECCFG_USEED)) { 463 return RISCV_EXCP_NONE; 464 } else { 465 return RISCV_EXCP_ILLEGAL_INST; 466 } 467 } 468 #else 469 return RISCV_EXCP_NONE; 470 #endif 471 } 472 473 /* User Floating-Point CSRs */ 474 static RISCVException read_fflags(CPURISCVState *env, int csrno, 475 target_ulong *val) 476 { 477 *val = riscv_cpu_get_fflags(env); 478 return RISCV_EXCP_NONE; 479 } 480 481 static RISCVException write_fflags(CPURISCVState *env, int csrno, 482 target_ulong val) 483 { 484 #if !defined(CONFIG_USER_ONLY) 485 if (riscv_has_ext(env, RVF)) { 486 env->mstatus |= MSTATUS_FS; 487 } 488 #endif 489 riscv_cpu_set_fflags(env, val & (FSR_AEXC >> FSR_AEXC_SHIFT)); 490 return RISCV_EXCP_NONE; 491 } 492 493 static RISCVException read_frm(CPURISCVState *env, int csrno, 494 target_ulong *val) 495 { 496 *val = env->frm; 497 return RISCV_EXCP_NONE; 498 } 499 500 static RISCVException write_frm(CPURISCVState *env, int csrno, 501 target_ulong val) 502 { 503 #if !defined(CONFIG_USER_ONLY) 504 if (riscv_has_ext(env, RVF)) { 505 env->mstatus |= MSTATUS_FS; 506 } 507 #endif 508 env->frm = val & (FSR_RD >> FSR_RD_SHIFT); 509 return RISCV_EXCP_NONE; 510 } 511 512 static RISCVException read_fcsr(CPURISCVState *env, int csrno, 513 target_ulong *val) 514 { 515 *val = (riscv_cpu_get_fflags(env) << FSR_AEXC_SHIFT) 516 | (env->frm << FSR_RD_SHIFT); 517 return RISCV_EXCP_NONE; 518 } 519 520 static RISCVException write_fcsr(CPURISCVState *env, int csrno, 521 target_ulong val) 522 { 523 #if !defined(CONFIG_USER_ONLY) 524 if (riscv_has_ext(env, RVF)) { 525 env->mstatus |= MSTATUS_FS; 526 } 527 #endif 528 env->frm = (val & FSR_RD) >> FSR_RD_SHIFT; 529 riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT); 530 return RISCV_EXCP_NONE; 531 } 532 533 static RISCVException read_vtype(CPURISCVState *env, int csrno, 534 target_ulong *val) 535 { 536 uint64_t vill; 537 switch (env->xl) { 538 case MXL_RV32: 539 vill = (uint32_t)env->vill << 31; 540 break; 541 case MXL_RV64: 542 vill = (uint64_t)env->vill << 63; 543 break; 544 default: 545 g_assert_not_reached(); 546 } 547 *val = (target_ulong)vill | env->vtype; 548 return RISCV_EXCP_NONE; 549 } 550 551 static RISCVException read_vl(CPURISCVState *env, int csrno, 552 target_ulong *val) 553 { 554 *val = env->vl; 555 return RISCV_EXCP_NONE; 556 } 557 558 static int read_vlenb(CPURISCVState *env, int csrno, target_ulong *val) 559 { 560 *val = env_archcpu(env)->cfg.vlen >> 3; 561 return RISCV_EXCP_NONE; 562 } 563 564 static RISCVException read_vxrm(CPURISCVState *env, int csrno, 565 target_ulong *val) 566 { 567 *val = env->vxrm; 568 return RISCV_EXCP_NONE; 569 } 570 571 static RISCVException write_vxrm(CPURISCVState *env, int csrno, 572 target_ulong val) 573 { 574 #if !defined(CONFIG_USER_ONLY) 575 env->mstatus |= MSTATUS_VS; 576 #endif 577 env->vxrm = val; 578 return RISCV_EXCP_NONE; 579 } 580 581 static RISCVException read_vxsat(CPURISCVState *env, int csrno, 582 target_ulong *val) 583 { 584 *val = env->vxsat; 585 return RISCV_EXCP_NONE; 586 } 587 588 static RISCVException write_vxsat(CPURISCVState *env, int csrno, 589 target_ulong val) 590 { 591 #if !defined(CONFIG_USER_ONLY) 592 env->mstatus |= MSTATUS_VS; 593 #endif 594 env->vxsat = val; 595 return RISCV_EXCP_NONE; 596 } 597 598 static RISCVException read_vstart(CPURISCVState *env, int csrno, 599 target_ulong *val) 600 { 601 *val = env->vstart; 602 return RISCV_EXCP_NONE; 603 } 604 605 static RISCVException write_vstart(CPURISCVState *env, int csrno, 606 target_ulong val) 607 { 608 #if !defined(CONFIG_USER_ONLY) 609 env->mstatus |= MSTATUS_VS; 610 #endif 611 /* 612 * The vstart CSR is defined to have only enough writable bits 613 * to hold the largest element index, i.e. lg2(VLEN) bits. 614 */ 615 env->vstart = val & ~(~0ULL << ctzl(env_archcpu(env)->cfg.vlen)); 616 return RISCV_EXCP_NONE; 617 } 618 619 static int read_vcsr(CPURISCVState *env, int csrno, target_ulong *val) 620 { 621 *val = (env->vxrm << VCSR_VXRM_SHIFT) | (env->vxsat << VCSR_VXSAT_SHIFT); 622 return RISCV_EXCP_NONE; 623 } 624 625 static int write_vcsr(CPURISCVState *env, int csrno, target_ulong val) 626 { 627 #if !defined(CONFIG_USER_ONLY) 628 env->mstatus |= MSTATUS_VS; 629 #endif 630 env->vxrm = (val & VCSR_VXRM) >> VCSR_VXRM_SHIFT; 631 env->vxsat = (val & VCSR_VXSAT) >> VCSR_VXSAT_SHIFT; 632 return RISCV_EXCP_NONE; 633 } 634 635 /* User Timers and Counters */ 636 static target_ulong get_ticks(bool shift) 637 { 638 int64_t val; 639 target_ulong result; 640 641 #if !defined(CONFIG_USER_ONLY) 642 if (icount_enabled()) { 643 val = icount_get(); 644 } else { 645 val = cpu_get_host_ticks(); 646 } 647 #else 648 val = cpu_get_host_ticks(); 649 #endif 650 651 if (shift) { 652 result = val >> 32; 653 } else { 654 result = val; 655 } 656 657 return result; 658 } 659 660 #if defined(CONFIG_USER_ONLY) 661 static RISCVException read_time(CPURISCVState *env, int csrno, 662 target_ulong *val) 663 { 664 *val = cpu_get_host_ticks(); 665 return RISCV_EXCP_NONE; 666 } 667 668 static RISCVException read_timeh(CPURISCVState *env, int csrno, 669 target_ulong *val) 670 { 671 *val = cpu_get_host_ticks() >> 32; 672 return RISCV_EXCP_NONE; 673 } 674 675 static int read_hpmcounter(CPURISCVState *env, int csrno, target_ulong *val) 676 { 677 *val = get_ticks(false); 678 return RISCV_EXCP_NONE; 679 } 680 681 static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val) 682 { 683 *val = get_ticks(true); 684 return RISCV_EXCP_NONE; 685 } 686 687 #else /* CONFIG_USER_ONLY */ 688 689 static int read_mhpmevent(CPURISCVState *env, int csrno, target_ulong *val) 690 { 691 int evt_index = csrno - CSR_MCOUNTINHIBIT; 692 693 *val = env->mhpmevent_val[evt_index]; 694 695 return RISCV_EXCP_NONE; 696 } 697 698 static int write_mhpmevent(CPURISCVState *env, int csrno, target_ulong val) 699 { 700 int evt_index = csrno - CSR_MCOUNTINHIBIT; 701 uint64_t mhpmevt_val = val; 702 703 env->mhpmevent_val[evt_index] = val; 704 705 if (riscv_cpu_mxl(env) == MXL_RV32) { 706 mhpmevt_val = mhpmevt_val | 707 ((uint64_t)env->mhpmeventh_val[evt_index] << 32); 708 } 709 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index); 710 711 return RISCV_EXCP_NONE; 712 } 713 714 static int read_mhpmeventh(CPURISCVState *env, int csrno, target_ulong *val) 715 { 716 int evt_index = csrno - CSR_MHPMEVENT3H + 3; 717 718 *val = env->mhpmeventh_val[evt_index]; 719 720 return RISCV_EXCP_NONE; 721 } 722 723 static int write_mhpmeventh(CPURISCVState *env, int csrno, target_ulong val) 724 { 725 int evt_index = csrno - CSR_MHPMEVENT3H + 3; 726 uint64_t mhpmevth_val = val; 727 uint64_t mhpmevt_val = env->mhpmevent_val[evt_index]; 728 729 mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32); 730 env->mhpmeventh_val[evt_index] = val; 731 732 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index); 733 734 return RISCV_EXCP_NONE; 735 } 736 737 static int write_mhpmcounter(CPURISCVState *env, int csrno, target_ulong val) 738 { 739 int ctr_idx = csrno - CSR_MCYCLE; 740 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx]; 741 uint64_t mhpmctr_val = val; 742 743 counter->mhpmcounter_val = val; 744 if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || 745 riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) { 746 counter->mhpmcounter_prev = get_ticks(false); 747 if (ctr_idx > 2) { 748 if (riscv_cpu_mxl(env) == MXL_RV32) { 749 mhpmctr_val = mhpmctr_val | 750 ((uint64_t)counter->mhpmcounterh_val << 32); 751 } 752 riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx); 753 } 754 } else { 755 /* Other counters can keep incrementing from the given value */ 756 counter->mhpmcounter_prev = val; 757 } 758 759 return RISCV_EXCP_NONE; 760 } 761 762 static int write_mhpmcounterh(CPURISCVState *env, int csrno, target_ulong val) 763 { 764 int ctr_idx = csrno - CSR_MCYCLEH; 765 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx]; 766 uint64_t mhpmctr_val = counter->mhpmcounter_val; 767 uint64_t mhpmctrh_val = val; 768 769 counter->mhpmcounterh_val = val; 770 mhpmctr_val = mhpmctr_val | (mhpmctrh_val << 32); 771 if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || 772 riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) { 773 counter->mhpmcounterh_prev = get_ticks(true); 774 if (ctr_idx > 2) { 775 riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx); 776 } 777 } else { 778 counter->mhpmcounterh_prev = val; 779 } 780 781 return RISCV_EXCP_NONE; 782 } 783 784 static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val, 785 bool upper_half, uint32_t ctr_idx) 786 { 787 PMUCTRState counter = env->pmu_ctrs[ctr_idx]; 788 target_ulong ctr_prev = upper_half ? counter.mhpmcounterh_prev : 789 counter.mhpmcounter_prev; 790 target_ulong ctr_val = upper_half ? counter.mhpmcounterh_val : 791 counter.mhpmcounter_val; 792 793 if (get_field(env->mcountinhibit, BIT(ctr_idx))) { 794 /** 795 * Counter should not increment if inhibit bit is set. We can't really 796 * stop the icount counting. Just return the counter value written by 797 * the supervisor to indicate that counter was not incremented. 798 */ 799 if (!counter.started) { 800 *val = ctr_val; 801 return RISCV_EXCP_NONE; 802 } else { 803 /* Mark that the counter has been stopped */ 804 counter.started = false; 805 } 806 } 807 808 /** 809 * The kernel computes the perf delta by subtracting the current value from 810 * the value it initialized previously (ctr_val). 811 */ 812 if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || 813 riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) { 814 *val = get_ticks(upper_half) - ctr_prev + ctr_val; 815 } else { 816 *val = ctr_val; 817 } 818 819 return RISCV_EXCP_NONE; 820 } 821 822 static int read_hpmcounter(CPURISCVState *env, int csrno, target_ulong *val) 823 { 824 uint16_t ctr_index; 825 826 if (csrno >= CSR_MCYCLE && csrno <= CSR_MHPMCOUNTER31) { 827 ctr_index = csrno - CSR_MCYCLE; 828 } else if (csrno >= CSR_CYCLE && csrno <= CSR_HPMCOUNTER31) { 829 ctr_index = csrno - CSR_CYCLE; 830 } else { 831 return RISCV_EXCP_ILLEGAL_INST; 832 } 833 834 return riscv_pmu_read_ctr(env, val, false, ctr_index); 835 } 836 837 static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val) 838 { 839 uint16_t ctr_index; 840 841 if (csrno >= CSR_MCYCLEH && csrno <= CSR_MHPMCOUNTER31H) { 842 ctr_index = csrno - CSR_MCYCLEH; 843 } else if (csrno >= CSR_CYCLEH && csrno <= CSR_HPMCOUNTER31H) { 844 ctr_index = csrno - CSR_CYCLEH; 845 } else { 846 return RISCV_EXCP_ILLEGAL_INST; 847 } 848 849 return riscv_pmu_read_ctr(env, val, true, ctr_index); 850 } 851 852 static int read_scountovf(CPURISCVState *env, int csrno, target_ulong *val) 853 { 854 int mhpmevt_start = CSR_MHPMEVENT3 - CSR_MCOUNTINHIBIT; 855 int i; 856 *val = 0; 857 target_ulong *mhpm_evt_val; 858 uint64_t of_bit_mask; 859 860 if (riscv_cpu_mxl(env) == MXL_RV32) { 861 mhpm_evt_val = env->mhpmeventh_val; 862 of_bit_mask = MHPMEVENTH_BIT_OF; 863 } else { 864 mhpm_evt_val = env->mhpmevent_val; 865 of_bit_mask = MHPMEVENT_BIT_OF; 866 } 867 868 for (i = mhpmevt_start; i < RV_MAX_MHPMEVENTS; i++) { 869 if ((get_field(env->mcounteren, BIT(i))) && 870 (mhpm_evt_val[i] & of_bit_mask)) { 871 *val |= BIT(i); 872 } 873 } 874 875 return RISCV_EXCP_NONE; 876 } 877 878 static RISCVException read_time(CPURISCVState *env, int csrno, 879 target_ulong *val) 880 { 881 uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0; 882 883 if (!env->rdtime_fn) { 884 return RISCV_EXCP_ILLEGAL_INST; 885 } 886 887 *val = env->rdtime_fn(env->rdtime_fn_arg) + delta; 888 return RISCV_EXCP_NONE; 889 } 890 891 static RISCVException read_timeh(CPURISCVState *env, int csrno, 892 target_ulong *val) 893 { 894 uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0; 895 896 if (!env->rdtime_fn) { 897 return RISCV_EXCP_ILLEGAL_INST; 898 } 899 900 *val = (env->rdtime_fn(env->rdtime_fn_arg) + delta) >> 32; 901 return RISCV_EXCP_NONE; 902 } 903 904 static RISCVException sstc(CPURISCVState *env, int csrno) 905 { 906 CPUState *cs = env_cpu(env); 907 RISCVCPU *cpu = RISCV_CPU(cs); 908 bool hmode_check = false; 909 910 if (!cpu->cfg.ext_sstc || !env->rdtime_fn) { 911 return RISCV_EXCP_ILLEGAL_INST; 912 } 913 914 if (env->priv == PRV_M) { 915 return RISCV_EXCP_NONE; 916 } 917 918 /* 919 * No need of separate function for rv32 as menvcfg stores both menvcfg 920 * menvcfgh for RV32. 921 */ 922 if (!(get_field(env->mcounteren, COUNTEREN_TM) && 923 get_field(env->menvcfg, MENVCFG_STCE))) { 924 return RISCV_EXCP_ILLEGAL_INST; 925 } 926 927 if (riscv_cpu_virt_enabled(env)) { 928 if (!(get_field(env->hcounteren, COUNTEREN_TM) & 929 get_field(env->henvcfg, HENVCFG_STCE))) { 930 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 931 } 932 } 933 934 if ((csrno == CSR_VSTIMECMP) || (csrno == CSR_VSTIMECMPH)) { 935 hmode_check = true; 936 } 937 938 return hmode_check ? hmode(env, csrno) : smode(env, csrno); 939 } 940 941 static RISCVException sstc_32(CPURISCVState *env, int csrno) 942 { 943 if (riscv_cpu_mxl(env) != MXL_RV32) { 944 return RISCV_EXCP_ILLEGAL_INST; 945 } 946 947 return sstc(env, csrno); 948 } 949 950 static RISCVException read_vstimecmp(CPURISCVState *env, int csrno, 951 target_ulong *val) 952 { 953 *val = env->vstimecmp; 954 955 return RISCV_EXCP_NONE; 956 } 957 958 static RISCVException read_vstimecmph(CPURISCVState *env, int csrno, 959 target_ulong *val) 960 { 961 *val = env->vstimecmp >> 32; 962 963 return RISCV_EXCP_NONE; 964 } 965 966 static RISCVException write_vstimecmp(CPURISCVState *env, int csrno, 967 target_ulong val) 968 { 969 RISCVCPU *cpu = env_archcpu(env); 970 971 if (riscv_cpu_mxl(env) == MXL_RV32) { 972 env->vstimecmp = deposit64(env->vstimecmp, 0, 32, (uint64_t)val); 973 } else { 974 env->vstimecmp = val; 975 } 976 977 riscv_timer_write_timecmp(cpu, env->vstimer, env->vstimecmp, 978 env->htimedelta, MIP_VSTIP); 979 980 return RISCV_EXCP_NONE; 981 } 982 983 static RISCVException write_vstimecmph(CPURISCVState *env, int csrno, 984 target_ulong val) 985 { 986 RISCVCPU *cpu = env_archcpu(env); 987 988 env->vstimecmp = deposit64(env->vstimecmp, 32, 32, (uint64_t)val); 989 riscv_timer_write_timecmp(cpu, env->vstimer, env->vstimecmp, 990 env->htimedelta, MIP_VSTIP); 991 992 return RISCV_EXCP_NONE; 993 } 994 995 static RISCVException read_stimecmp(CPURISCVState *env, int csrno, 996 target_ulong *val) 997 { 998 if (riscv_cpu_virt_enabled(env)) { 999 *val = env->vstimecmp; 1000 } else { 1001 *val = env->stimecmp; 1002 } 1003 1004 return RISCV_EXCP_NONE; 1005 } 1006 1007 static RISCVException read_stimecmph(CPURISCVState *env, int csrno, 1008 target_ulong *val) 1009 { 1010 if (riscv_cpu_virt_enabled(env)) { 1011 *val = env->vstimecmp >> 32; 1012 } else { 1013 *val = env->stimecmp >> 32; 1014 } 1015 1016 return RISCV_EXCP_NONE; 1017 } 1018 1019 static RISCVException write_stimecmp(CPURISCVState *env, int csrno, 1020 target_ulong val) 1021 { 1022 RISCVCPU *cpu = env_archcpu(env); 1023 1024 if (riscv_cpu_virt_enabled(env)) { 1025 return write_vstimecmp(env, csrno, val); 1026 } 1027 1028 if (riscv_cpu_mxl(env) == MXL_RV32) { 1029 env->stimecmp = deposit64(env->stimecmp, 0, 32, (uint64_t)val); 1030 } else { 1031 env->stimecmp = val; 1032 } 1033 1034 riscv_timer_write_timecmp(cpu, env->stimer, env->stimecmp, 0, MIP_STIP); 1035 1036 return RISCV_EXCP_NONE; 1037 } 1038 1039 static RISCVException write_stimecmph(CPURISCVState *env, int csrno, 1040 target_ulong val) 1041 { 1042 RISCVCPU *cpu = env_archcpu(env); 1043 1044 if (riscv_cpu_virt_enabled(env)) { 1045 return write_vstimecmph(env, csrno, val); 1046 } 1047 1048 env->stimecmp = deposit64(env->stimecmp, 32, 32, (uint64_t)val); 1049 riscv_timer_write_timecmp(cpu, env->stimer, env->stimecmp, 0, MIP_STIP); 1050 1051 return RISCV_EXCP_NONE; 1052 } 1053 1054 /* Machine constants */ 1055 1056 #define M_MODE_INTERRUPTS ((uint64_t)(MIP_MSIP | MIP_MTIP | MIP_MEIP)) 1057 #define S_MODE_INTERRUPTS ((uint64_t)(MIP_SSIP | MIP_STIP | MIP_SEIP | \ 1058 MIP_LCOFIP)) 1059 #define VS_MODE_INTERRUPTS ((uint64_t)(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP)) 1060 #define HS_MODE_INTERRUPTS ((uint64_t)(MIP_SGEIP | VS_MODE_INTERRUPTS)) 1061 1062 #define VSTOPI_NUM_SRCS 5 1063 1064 static const uint64_t delegable_ints = S_MODE_INTERRUPTS | 1065 VS_MODE_INTERRUPTS; 1066 static const uint64_t vs_delegable_ints = VS_MODE_INTERRUPTS; 1067 static const uint64_t all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS | 1068 HS_MODE_INTERRUPTS; 1069 #define DELEGABLE_EXCPS ((1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | \ 1070 (1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | \ 1071 (1ULL << (RISCV_EXCP_ILLEGAL_INST)) | \ 1072 (1ULL << (RISCV_EXCP_BREAKPOINT)) | \ 1073 (1ULL << (RISCV_EXCP_LOAD_ADDR_MIS)) | \ 1074 (1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT)) | \ 1075 (1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS)) | \ 1076 (1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT)) | \ 1077 (1ULL << (RISCV_EXCP_U_ECALL)) | \ 1078 (1ULL << (RISCV_EXCP_S_ECALL)) | \ 1079 (1ULL << (RISCV_EXCP_VS_ECALL)) | \ 1080 (1ULL << (RISCV_EXCP_M_ECALL)) | \ 1081 (1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | \ 1082 (1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | \ 1083 (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | \ 1084 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | \ 1085 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | \ 1086 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | \ 1087 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT))) 1088 static const target_ulong vs_delegable_excps = DELEGABLE_EXCPS & 1089 ~((1ULL << (RISCV_EXCP_S_ECALL)) | 1090 (1ULL << (RISCV_EXCP_VS_ECALL)) | 1091 (1ULL << (RISCV_EXCP_M_ECALL)) | 1092 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | 1093 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | 1094 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | 1095 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT))); 1096 static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE | 1097 SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS | 1098 SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS; 1099 static const target_ulong sip_writable_mask = SIP_SSIP | MIP_USIP | MIP_UEIP | 1100 SIP_LCOFIP; 1101 static const target_ulong hip_writable_mask = MIP_VSSIP; 1102 static const target_ulong hvip_writable_mask = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP; 1103 static const target_ulong vsip_writable_mask = MIP_VSSIP; 1104 1105 static const char valid_vm_1_10_32[16] = { 1106 [VM_1_10_MBARE] = 1, 1107 [VM_1_10_SV32] = 1 1108 }; 1109 1110 static const char valid_vm_1_10_64[16] = { 1111 [VM_1_10_MBARE] = 1, 1112 [VM_1_10_SV39] = 1, 1113 [VM_1_10_SV48] = 1, 1114 [VM_1_10_SV57] = 1 1115 }; 1116 1117 /* Machine Information Registers */ 1118 static RISCVException read_zero(CPURISCVState *env, int csrno, 1119 target_ulong *val) 1120 { 1121 *val = 0; 1122 return RISCV_EXCP_NONE; 1123 } 1124 1125 static RISCVException write_ignore(CPURISCVState *env, int csrno, 1126 target_ulong val) 1127 { 1128 return RISCV_EXCP_NONE; 1129 } 1130 1131 static RISCVException read_mvendorid(CPURISCVState *env, int csrno, 1132 target_ulong *val) 1133 { 1134 CPUState *cs = env_cpu(env); 1135 RISCVCPU *cpu = RISCV_CPU(cs); 1136 1137 *val = cpu->cfg.mvendorid; 1138 return RISCV_EXCP_NONE; 1139 } 1140 1141 static RISCVException read_marchid(CPURISCVState *env, int csrno, 1142 target_ulong *val) 1143 { 1144 CPUState *cs = env_cpu(env); 1145 RISCVCPU *cpu = RISCV_CPU(cs); 1146 1147 *val = cpu->cfg.marchid; 1148 return RISCV_EXCP_NONE; 1149 } 1150 1151 static RISCVException read_mimpid(CPURISCVState *env, int csrno, 1152 target_ulong *val) 1153 { 1154 CPUState *cs = env_cpu(env); 1155 RISCVCPU *cpu = RISCV_CPU(cs); 1156 1157 *val = cpu->cfg.mimpid; 1158 return RISCV_EXCP_NONE; 1159 } 1160 1161 static RISCVException read_mhartid(CPURISCVState *env, int csrno, 1162 target_ulong *val) 1163 { 1164 *val = env->mhartid; 1165 return RISCV_EXCP_NONE; 1166 } 1167 1168 /* Machine Trap Setup */ 1169 1170 /* We do not store SD explicitly, only compute it on demand. */ 1171 static uint64_t add_status_sd(RISCVMXL xl, uint64_t status) 1172 { 1173 if ((status & MSTATUS_FS) == MSTATUS_FS || 1174 (status & MSTATUS_VS) == MSTATUS_VS || 1175 (status & MSTATUS_XS) == MSTATUS_XS) { 1176 switch (xl) { 1177 case MXL_RV32: 1178 return status | MSTATUS32_SD; 1179 case MXL_RV64: 1180 return status | MSTATUS64_SD; 1181 case MXL_RV128: 1182 return MSTATUSH128_SD; 1183 default: 1184 g_assert_not_reached(); 1185 } 1186 } 1187 return status; 1188 } 1189 1190 static RISCVException read_mstatus(CPURISCVState *env, int csrno, 1191 target_ulong *val) 1192 { 1193 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus); 1194 return RISCV_EXCP_NONE; 1195 } 1196 1197 static int validate_vm(CPURISCVState *env, target_ulong vm) 1198 { 1199 if (riscv_cpu_mxl(env) == MXL_RV32) { 1200 return valid_vm_1_10_32[vm & 0xf]; 1201 } else { 1202 return valid_vm_1_10_64[vm & 0xf]; 1203 } 1204 } 1205 1206 static RISCVException write_mstatus(CPURISCVState *env, int csrno, 1207 target_ulong val) 1208 { 1209 uint64_t mstatus = env->mstatus; 1210 uint64_t mask = 0; 1211 RISCVMXL xl = riscv_cpu_mxl(env); 1212 1213 /* flush tlb on mstatus fields that affect VM */ 1214 if ((val ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP | MSTATUS_MPV | 1215 MSTATUS_MPRV | MSTATUS_SUM)) { 1216 tlb_flush(env_cpu(env)); 1217 } 1218 mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE | 1219 MSTATUS_SPP | MSTATUS_MPRV | MSTATUS_SUM | 1220 MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR | 1221 MSTATUS_TW | MSTATUS_VS; 1222 1223 if (riscv_has_ext(env, RVF)) { 1224 mask |= MSTATUS_FS; 1225 } 1226 1227 if (xl != MXL_RV32 || env->debugger) { 1228 /* 1229 * RV32: MPV and GVA are not in mstatus. The current plan is to 1230 * add them to mstatush. For now, we just don't support it. 1231 */ 1232 mask |= MSTATUS_MPV | MSTATUS_GVA; 1233 if ((val & MSTATUS64_UXL) != 0) { 1234 mask |= MSTATUS64_UXL; 1235 } 1236 } 1237 1238 mstatus = (mstatus & ~mask) | (val & mask); 1239 1240 if (xl > MXL_RV32) { 1241 /* SXL field is for now read only */ 1242 mstatus = set_field(mstatus, MSTATUS64_SXL, xl); 1243 } 1244 env->mstatus = mstatus; 1245 env->xl = cpu_recompute_xl(env); 1246 1247 return RISCV_EXCP_NONE; 1248 } 1249 1250 static RISCVException read_mstatush(CPURISCVState *env, int csrno, 1251 target_ulong *val) 1252 { 1253 *val = env->mstatus >> 32; 1254 return RISCV_EXCP_NONE; 1255 } 1256 1257 static RISCVException write_mstatush(CPURISCVState *env, int csrno, 1258 target_ulong val) 1259 { 1260 uint64_t valh = (uint64_t)val << 32; 1261 uint64_t mask = MSTATUS_MPV | MSTATUS_GVA; 1262 1263 if ((valh ^ env->mstatus) & (MSTATUS_MPV)) { 1264 tlb_flush(env_cpu(env)); 1265 } 1266 1267 env->mstatus = (env->mstatus & ~mask) | (valh & mask); 1268 1269 return RISCV_EXCP_NONE; 1270 } 1271 1272 static RISCVException read_mstatus_i128(CPURISCVState *env, int csrno, 1273 Int128 *val) 1274 { 1275 *val = int128_make128(env->mstatus, add_status_sd(MXL_RV128, env->mstatus)); 1276 return RISCV_EXCP_NONE; 1277 } 1278 1279 static RISCVException read_misa_i128(CPURISCVState *env, int csrno, 1280 Int128 *val) 1281 { 1282 *val = int128_make128(env->misa_ext, (uint64_t)MXL_RV128 << 62); 1283 return RISCV_EXCP_NONE; 1284 } 1285 1286 static RISCVException read_misa(CPURISCVState *env, int csrno, 1287 target_ulong *val) 1288 { 1289 target_ulong misa; 1290 1291 switch (env->misa_mxl) { 1292 case MXL_RV32: 1293 misa = (target_ulong)MXL_RV32 << 30; 1294 break; 1295 #ifdef TARGET_RISCV64 1296 case MXL_RV64: 1297 misa = (target_ulong)MXL_RV64 << 62; 1298 break; 1299 #endif 1300 default: 1301 g_assert_not_reached(); 1302 } 1303 1304 *val = misa | env->misa_ext; 1305 return RISCV_EXCP_NONE; 1306 } 1307 1308 static RISCVException write_misa(CPURISCVState *env, int csrno, 1309 target_ulong val) 1310 { 1311 if (!riscv_feature(env, RISCV_FEATURE_MISA)) { 1312 /* drop write to misa */ 1313 return RISCV_EXCP_NONE; 1314 } 1315 1316 /* 'I' or 'E' must be present */ 1317 if (!(val & (RVI | RVE))) { 1318 /* It is not, drop write to misa */ 1319 return RISCV_EXCP_NONE; 1320 } 1321 1322 /* 'E' excludes all other extensions */ 1323 if (val & RVE) { 1324 /* when we support 'E' we can do "val = RVE;" however 1325 * for now we just drop writes if 'E' is present. 1326 */ 1327 return RISCV_EXCP_NONE; 1328 } 1329 1330 /* 1331 * misa.MXL writes are not supported by QEMU. 1332 * Drop writes to those bits. 1333 */ 1334 1335 /* Mask extensions that are not supported by this hart */ 1336 val &= env->misa_ext_mask; 1337 1338 /* Mask extensions that are not supported by QEMU */ 1339 val &= (RVI | RVE | RVM | RVA | RVF | RVD | RVC | RVS | RVU | RVV); 1340 1341 /* 'D' depends on 'F', so clear 'D' if 'F' is not present */ 1342 if ((val & RVD) && !(val & RVF)) { 1343 val &= ~RVD; 1344 } 1345 1346 /* Suppress 'C' if next instruction is not aligned 1347 * TODO: this should check next_pc 1348 */ 1349 if ((val & RVC) && (GETPC() & ~3) != 0) { 1350 val &= ~RVC; 1351 } 1352 1353 /* If nothing changed, do nothing. */ 1354 if (val == env->misa_ext) { 1355 return RISCV_EXCP_NONE; 1356 } 1357 1358 if (!(val & RVF)) { 1359 env->mstatus &= ~MSTATUS_FS; 1360 } 1361 1362 /* flush translation cache */ 1363 tb_flush(env_cpu(env)); 1364 env->misa_ext = val; 1365 env->xl = riscv_cpu_mxl(env); 1366 return RISCV_EXCP_NONE; 1367 } 1368 1369 static RISCVException read_medeleg(CPURISCVState *env, int csrno, 1370 target_ulong *val) 1371 { 1372 *val = env->medeleg; 1373 return RISCV_EXCP_NONE; 1374 } 1375 1376 static RISCVException write_medeleg(CPURISCVState *env, int csrno, 1377 target_ulong val) 1378 { 1379 env->medeleg = (env->medeleg & ~DELEGABLE_EXCPS) | (val & DELEGABLE_EXCPS); 1380 return RISCV_EXCP_NONE; 1381 } 1382 1383 static RISCVException rmw_mideleg64(CPURISCVState *env, int csrno, 1384 uint64_t *ret_val, 1385 uint64_t new_val, uint64_t wr_mask) 1386 { 1387 uint64_t mask = wr_mask & delegable_ints; 1388 1389 if (ret_val) { 1390 *ret_val = env->mideleg; 1391 } 1392 1393 env->mideleg = (env->mideleg & ~mask) | (new_val & mask); 1394 1395 if (riscv_has_ext(env, RVH)) { 1396 env->mideleg |= HS_MODE_INTERRUPTS; 1397 } 1398 1399 return RISCV_EXCP_NONE; 1400 } 1401 1402 static RISCVException rmw_mideleg(CPURISCVState *env, int csrno, 1403 target_ulong *ret_val, 1404 target_ulong new_val, target_ulong wr_mask) 1405 { 1406 uint64_t rval; 1407 RISCVException ret; 1408 1409 ret = rmw_mideleg64(env, csrno, &rval, new_val, wr_mask); 1410 if (ret_val) { 1411 *ret_val = rval; 1412 } 1413 1414 return ret; 1415 } 1416 1417 static RISCVException rmw_midelegh(CPURISCVState *env, int csrno, 1418 target_ulong *ret_val, 1419 target_ulong new_val, 1420 target_ulong wr_mask) 1421 { 1422 uint64_t rval; 1423 RISCVException ret; 1424 1425 ret = rmw_mideleg64(env, csrno, &rval, 1426 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 1427 if (ret_val) { 1428 *ret_val = rval >> 32; 1429 } 1430 1431 return ret; 1432 } 1433 1434 static RISCVException rmw_mie64(CPURISCVState *env, int csrno, 1435 uint64_t *ret_val, 1436 uint64_t new_val, uint64_t wr_mask) 1437 { 1438 uint64_t mask = wr_mask & all_ints; 1439 1440 if (ret_val) { 1441 *ret_val = env->mie; 1442 } 1443 1444 env->mie = (env->mie & ~mask) | (new_val & mask); 1445 1446 if (!riscv_has_ext(env, RVH)) { 1447 env->mie &= ~((uint64_t)MIP_SGEIP); 1448 } 1449 1450 return RISCV_EXCP_NONE; 1451 } 1452 1453 static RISCVException rmw_mie(CPURISCVState *env, int csrno, 1454 target_ulong *ret_val, 1455 target_ulong new_val, target_ulong wr_mask) 1456 { 1457 uint64_t rval; 1458 RISCVException ret; 1459 1460 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask); 1461 if (ret_val) { 1462 *ret_val = rval; 1463 } 1464 1465 return ret; 1466 } 1467 1468 static RISCVException rmw_mieh(CPURISCVState *env, int csrno, 1469 target_ulong *ret_val, 1470 target_ulong new_val, target_ulong wr_mask) 1471 { 1472 uint64_t rval; 1473 RISCVException ret; 1474 1475 ret = rmw_mie64(env, csrno, &rval, 1476 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 1477 if (ret_val) { 1478 *ret_val = rval >> 32; 1479 } 1480 1481 return ret; 1482 } 1483 1484 static int read_mtopi(CPURISCVState *env, int csrno, target_ulong *val) 1485 { 1486 int irq; 1487 uint8_t iprio; 1488 1489 irq = riscv_cpu_mirq_pending(env); 1490 if (irq <= 0 || irq > 63) { 1491 *val = 0; 1492 } else { 1493 iprio = env->miprio[irq]; 1494 if (!iprio) { 1495 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_M) { 1496 iprio = IPRIO_MMAXIPRIO; 1497 } 1498 } 1499 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT; 1500 *val |= iprio; 1501 } 1502 1503 return RISCV_EXCP_NONE; 1504 } 1505 1506 static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno) 1507 { 1508 if (!riscv_cpu_virt_enabled(env)) { 1509 return csrno; 1510 } 1511 1512 switch (csrno) { 1513 case CSR_SISELECT: 1514 return CSR_VSISELECT; 1515 case CSR_SIREG: 1516 return CSR_VSIREG; 1517 case CSR_STOPEI: 1518 return CSR_VSTOPEI; 1519 default: 1520 return csrno; 1521 }; 1522 } 1523 1524 static int rmw_xiselect(CPURISCVState *env, int csrno, target_ulong *val, 1525 target_ulong new_val, target_ulong wr_mask) 1526 { 1527 target_ulong *iselect; 1528 1529 /* Translate CSR number for VS-mode */ 1530 csrno = aia_xlate_vs_csrno(env, csrno); 1531 1532 /* Find the iselect CSR based on CSR number */ 1533 switch (csrno) { 1534 case CSR_MISELECT: 1535 iselect = &env->miselect; 1536 break; 1537 case CSR_SISELECT: 1538 iselect = &env->siselect; 1539 break; 1540 case CSR_VSISELECT: 1541 iselect = &env->vsiselect; 1542 break; 1543 default: 1544 return RISCV_EXCP_ILLEGAL_INST; 1545 }; 1546 1547 if (val) { 1548 *val = *iselect; 1549 } 1550 1551 wr_mask &= ISELECT_MASK; 1552 if (wr_mask) { 1553 *iselect = (*iselect & ~wr_mask) | (new_val & wr_mask); 1554 } 1555 1556 return RISCV_EXCP_NONE; 1557 } 1558 1559 static int rmw_iprio(target_ulong xlen, 1560 target_ulong iselect, uint8_t *iprio, 1561 target_ulong *val, target_ulong new_val, 1562 target_ulong wr_mask, int ext_irq_no) 1563 { 1564 int i, firq, nirqs; 1565 target_ulong old_val; 1566 1567 if (iselect < ISELECT_IPRIO0 || ISELECT_IPRIO15 < iselect) { 1568 return -EINVAL; 1569 } 1570 if (xlen != 32 && iselect & 0x1) { 1571 return -EINVAL; 1572 } 1573 1574 nirqs = 4 * (xlen / 32); 1575 firq = ((iselect - ISELECT_IPRIO0) / (xlen / 32)) * (nirqs); 1576 1577 old_val = 0; 1578 for (i = 0; i < nirqs; i++) { 1579 old_val |= ((target_ulong)iprio[firq + i]) << (IPRIO_IRQ_BITS * i); 1580 } 1581 1582 if (val) { 1583 *val = old_val; 1584 } 1585 1586 if (wr_mask) { 1587 new_val = (old_val & ~wr_mask) | (new_val & wr_mask); 1588 for (i = 0; i < nirqs; i++) { 1589 /* 1590 * M-level and S-level external IRQ priority always read-only 1591 * zero. This means default priority order is always preferred 1592 * for M-level and S-level external IRQs. 1593 */ 1594 if ((firq + i) == ext_irq_no) { 1595 continue; 1596 } 1597 iprio[firq + i] = (new_val >> (IPRIO_IRQ_BITS * i)) & 0xff; 1598 } 1599 } 1600 1601 return 0; 1602 } 1603 1604 static int rmw_xireg(CPURISCVState *env, int csrno, target_ulong *val, 1605 target_ulong new_val, target_ulong wr_mask) 1606 { 1607 bool virt; 1608 uint8_t *iprio; 1609 int ret = -EINVAL; 1610 target_ulong priv, isel, vgein; 1611 1612 /* Translate CSR number for VS-mode */ 1613 csrno = aia_xlate_vs_csrno(env, csrno); 1614 1615 /* Decode register details from CSR number */ 1616 virt = false; 1617 switch (csrno) { 1618 case CSR_MIREG: 1619 iprio = env->miprio; 1620 isel = env->miselect; 1621 priv = PRV_M; 1622 break; 1623 case CSR_SIREG: 1624 iprio = env->siprio; 1625 isel = env->siselect; 1626 priv = PRV_S; 1627 break; 1628 case CSR_VSIREG: 1629 iprio = env->hviprio; 1630 isel = env->vsiselect; 1631 priv = PRV_S; 1632 virt = true; 1633 break; 1634 default: 1635 goto done; 1636 }; 1637 1638 /* Find the selected guest interrupt file */ 1639 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0; 1640 1641 if (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) { 1642 /* Local interrupt priority registers not available for VS-mode */ 1643 if (!virt) { 1644 ret = rmw_iprio(riscv_cpu_mxl_bits(env), 1645 isel, iprio, val, new_val, wr_mask, 1646 (priv == PRV_M) ? IRQ_M_EXT : IRQ_S_EXT); 1647 } 1648 } else if (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST) { 1649 /* IMSIC registers only available when machine implements it. */ 1650 if (env->aia_ireg_rmw_fn[priv]) { 1651 /* Selected guest interrupt file should not be zero */ 1652 if (virt && (!vgein || env->geilen < vgein)) { 1653 goto done; 1654 } 1655 /* Call machine specific IMSIC register emulation */ 1656 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv], 1657 AIA_MAKE_IREG(isel, priv, virt, vgein, 1658 riscv_cpu_mxl_bits(env)), 1659 val, new_val, wr_mask); 1660 } 1661 } 1662 1663 done: 1664 if (ret) { 1665 return (riscv_cpu_virt_enabled(env) && virt) ? 1666 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; 1667 } 1668 return RISCV_EXCP_NONE; 1669 } 1670 1671 static int rmw_xtopei(CPURISCVState *env, int csrno, target_ulong *val, 1672 target_ulong new_val, target_ulong wr_mask) 1673 { 1674 bool virt; 1675 int ret = -EINVAL; 1676 target_ulong priv, vgein; 1677 1678 /* Translate CSR number for VS-mode */ 1679 csrno = aia_xlate_vs_csrno(env, csrno); 1680 1681 /* Decode register details from CSR number */ 1682 virt = false; 1683 switch (csrno) { 1684 case CSR_MTOPEI: 1685 priv = PRV_M; 1686 break; 1687 case CSR_STOPEI: 1688 priv = PRV_S; 1689 break; 1690 case CSR_VSTOPEI: 1691 priv = PRV_S; 1692 virt = true; 1693 break; 1694 default: 1695 goto done; 1696 }; 1697 1698 /* IMSIC CSRs only available when machine implements IMSIC. */ 1699 if (!env->aia_ireg_rmw_fn[priv]) { 1700 goto done; 1701 } 1702 1703 /* Find the selected guest interrupt file */ 1704 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0; 1705 1706 /* Selected guest interrupt file should be valid */ 1707 if (virt && (!vgein || env->geilen < vgein)) { 1708 goto done; 1709 } 1710 1711 /* Call machine specific IMSIC register emulation for TOPEI */ 1712 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv], 1713 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, priv, virt, vgein, 1714 riscv_cpu_mxl_bits(env)), 1715 val, new_val, wr_mask); 1716 1717 done: 1718 if (ret) { 1719 return (riscv_cpu_virt_enabled(env) && virt) ? 1720 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; 1721 } 1722 return RISCV_EXCP_NONE; 1723 } 1724 1725 static RISCVException read_mtvec(CPURISCVState *env, int csrno, 1726 target_ulong *val) 1727 { 1728 *val = env->mtvec; 1729 return RISCV_EXCP_NONE; 1730 } 1731 1732 static RISCVException write_mtvec(CPURISCVState *env, int csrno, 1733 target_ulong val) 1734 { 1735 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */ 1736 if ((val & 3) < 2) { 1737 env->mtvec = val; 1738 } else { 1739 qemu_log_mask(LOG_UNIMP, "CSR_MTVEC: reserved mode not supported\n"); 1740 } 1741 return RISCV_EXCP_NONE; 1742 } 1743 1744 static RISCVException read_mcountinhibit(CPURISCVState *env, int csrno, 1745 target_ulong *val) 1746 { 1747 *val = env->mcountinhibit; 1748 return RISCV_EXCP_NONE; 1749 } 1750 1751 static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno, 1752 target_ulong val) 1753 { 1754 int cidx; 1755 PMUCTRState *counter; 1756 1757 env->mcountinhibit = val; 1758 1759 /* Check if any other counter is also monitoring cycles/instructions */ 1760 for (cidx = 0; cidx < RV_MAX_MHPMCOUNTERS; cidx++) { 1761 if (!get_field(env->mcountinhibit, BIT(cidx))) { 1762 counter = &env->pmu_ctrs[cidx]; 1763 counter->started = true; 1764 } 1765 } 1766 1767 return RISCV_EXCP_NONE; 1768 } 1769 1770 static RISCVException read_mcounteren(CPURISCVState *env, int csrno, 1771 target_ulong *val) 1772 { 1773 *val = env->mcounteren; 1774 return RISCV_EXCP_NONE; 1775 } 1776 1777 static RISCVException write_mcounteren(CPURISCVState *env, int csrno, 1778 target_ulong val) 1779 { 1780 env->mcounteren = val; 1781 return RISCV_EXCP_NONE; 1782 } 1783 1784 /* Machine Trap Handling */ 1785 static RISCVException read_mscratch_i128(CPURISCVState *env, int csrno, 1786 Int128 *val) 1787 { 1788 *val = int128_make128(env->mscratch, env->mscratchh); 1789 return RISCV_EXCP_NONE; 1790 } 1791 1792 static RISCVException write_mscratch_i128(CPURISCVState *env, int csrno, 1793 Int128 val) 1794 { 1795 env->mscratch = int128_getlo(val); 1796 env->mscratchh = int128_gethi(val); 1797 return RISCV_EXCP_NONE; 1798 } 1799 1800 static RISCVException read_mscratch(CPURISCVState *env, int csrno, 1801 target_ulong *val) 1802 { 1803 *val = env->mscratch; 1804 return RISCV_EXCP_NONE; 1805 } 1806 1807 static RISCVException write_mscratch(CPURISCVState *env, int csrno, 1808 target_ulong val) 1809 { 1810 env->mscratch = val; 1811 return RISCV_EXCP_NONE; 1812 } 1813 1814 static RISCVException read_mepc(CPURISCVState *env, int csrno, 1815 target_ulong *val) 1816 { 1817 *val = env->mepc; 1818 return RISCV_EXCP_NONE; 1819 } 1820 1821 static RISCVException write_mepc(CPURISCVState *env, int csrno, 1822 target_ulong val) 1823 { 1824 env->mepc = val; 1825 return RISCV_EXCP_NONE; 1826 } 1827 1828 static RISCVException read_mcause(CPURISCVState *env, int csrno, 1829 target_ulong *val) 1830 { 1831 *val = env->mcause; 1832 return RISCV_EXCP_NONE; 1833 } 1834 1835 static RISCVException write_mcause(CPURISCVState *env, int csrno, 1836 target_ulong val) 1837 { 1838 env->mcause = val; 1839 return RISCV_EXCP_NONE; 1840 } 1841 1842 static RISCVException read_mtval(CPURISCVState *env, int csrno, 1843 target_ulong *val) 1844 { 1845 *val = env->mtval; 1846 return RISCV_EXCP_NONE; 1847 } 1848 1849 static RISCVException write_mtval(CPURISCVState *env, int csrno, 1850 target_ulong val) 1851 { 1852 env->mtval = val; 1853 return RISCV_EXCP_NONE; 1854 } 1855 1856 /* Execution environment configuration setup */ 1857 static RISCVException read_menvcfg(CPURISCVState *env, int csrno, 1858 target_ulong *val) 1859 { 1860 *val = env->menvcfg; 1861 return RISCV_EXCP_NONE; 1862 } 1863 1864 static RISCVException write_menvcfg(CPURISCVState *env, int csrno, 1865 target_ulong val) 1866 { 1867 uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE | MENVCFG_CBZE; 1868 1869 if (riscv_cpu_mxl(env) == MXL_RV64) { 1870 mask |= MENVCFG_PBMTE | MENVCFG_STCE; 1871 } 1872 env->menvcfg = (env->menvcfg & ~mask) | (val & mask); 1873 1874 return RISCV_EXCP_NONE; 1875 } 1876 1877 static RISCVException read_menvcfgh(CPURISCVState *env, int csrno, 1878 target_ulong *val) 1879 { 1880 *val = env->menvcfg >> 32; 1881 return RISCV_EXCP_NONE; 1882 } 1883 1884 static RISCVException write_menvcfgh(CPURISCVState *env, int csrno, 1885 target_ulong val) 1886 { 1887 uint64_t mask = MENVCFG_PBMTE | MENVCFG_STCE; 1888 uint64_t valh = (uint64_t)val << 32; 1889 1890 env->menvcfg = (env->menvcfg & ~mask) | (valh & mask); 1891 1892 return RISCV_EXCP_NONE; 1893 } 1894 1895 static RISCVException read_senvcfg(CPURISCVState *env, int csrno, 1896 target_ulong *val) 1897 { 1898 *val = env->senvcfg; 1899 return RISCV_EXCP_NONE; 1900 } 1901 1902 static RISCVException write_senvcfg(CPURISCVState *env, int csrno, 1903 target_ulong val) 1904 { 1905 uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE; 1906 1907 env->senvcfg = (env->senvcfg & ~mask) | (val & mask); 1908 1909 return RISCV_EXCP_NONE; 1910 } 1911 1912 static RISCVException read_henvcfg(CPURISCVState *env, int csrno, 1913 target_ulong *val) 1914 { 1915 *val = env->henvcfg; 1916 return RISCV_EXCP_NONE; 1917 } 1918 1919 static RISCVException write_henvcfg(CPURISCVState *env, int csrno, 1920 target_ulong val) 1921 { 1922 uint64_t mask = HENVCFG_FIOM | HENVCFG_CBIE | HENVCFG_CBCFE | HENVCFG_CBZE; 1923 1924 if (riscv_cpu_mxl(env) == MXL_RV64) { 1925 mask |= HENVCFG_PBMTE | HENVCFG_STCE; 1926 } 1927 1928 env->henvcfg = (env->henvcfg & ~mask) | (val & mask); 1929 1930 return RISCV_EXCP_NONE; 1931 } 1932 1933 static RISCVException read_henvcfgh(CPURISCVState *env, int csrno, 1934 target_ulong *val) 1935 { 1936 *val = env->henvcfg >> 32; 1937 return RISCV_EXCP_NONE; 1938 } 1939 1940 static RISCVException write_henvcfgh(CPURISCVState *env, int csrno, 1941 target_ulong val) 1942 { 1943 uint64_t mask = HENVCFG_PBMTE | HENVCFG_STCE; 1944 uint64_t valh = (uint64_t)val << 32; 1945 1946 env->henvcfg = (env->henvcfg & ~mask) | (valh & mask); 1947 1948 return RISCV_EXCP_NONE; 1949 } 1950 1951 static RISCVException rmw_mip64(CPURISCVState *env, int csrno, 1952 uint64_t *ret_val, 1953 uint64_t new_val, uint64_t wr_mask) 1954 { 1955 RISCVCPU *cpu = env_archcpu(env); 1956 uint64_t old_mip, mask = wr_mask & delegable_ints; 1957 uint32_t gin; 1958 1959 if (mask & MIP_SEIP) { 1960 env->software_seip = new_val & MIP_SEIP; 1961 new_val |= env->external_seip * MIP_SEIP; 1962 } 1963 1964 if (cpu->cfg.ext_sstc && (env->priv == PRV_M) && 1965 get_field(env->menvcfg, MENVCFG_STCE)) { 1966 /* sstc extension forbids STIP & VSTIP to be writeable in mip */ 1967 mask = mask & ~(MIP_STIP | MIP_VSTIP); 1968 } 1969 1970 if (mask) { 1971 old_mip = riscv_cpu_update_mip(cpu, mask, (new_val & mask)); 1972 } else { 1973 old_mip = env->mip; 1974 } 1975 1976 if (csrno != CSR_HVIP) { 1977 gin = get_field(env->hstatus, HSTATUS_VGEIN); 1978 old_mip |= (env->hgeip & ((target_ulong)1 << gin)) ? MIP_VSEIP : 0; 1979 old_mip |= env->vstime_irq ? MIP_VSTIP : 0; 1980 } 1981 1982 if (ret_val) { 1983 *ret_val = old_mip; 1984 } 1985 1986 return RISCV_EXCP_NONE; 1987 } 1988 1989 static RISCVException rmw_mip(CPURISCVState *env, int csrno, 1990 target_ulong *ret_val, 1991 target_ulong new_val, target_ulong wr_mask) 1992 { 1993 uint64_t rval; 1994 RISCVException ret; 1995 1996 ret = rmw_mip64(env, csrno, &rval, new_val, wr_mask); 1997 if (ret_val) { 1998 *ret_val = rval; 1999 } 2000 2001 return ret; 2002 } 2003 2004 static RISCVException rmw_miph(CPURISCVState *env, int csrno, 2005 target_ulong *ret_val, 2006 target_ulong new_val, target_ulong wr_mask) 2007 { 2008 uint64_t rval; 2009 RISCVException ret; 2010 2011 ret = rmw_mip64(env, csrno, &rval, 2012 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 2013 if (ret_val) { 2014 *ret_val = rval >> 32; 2015 } 2016 2017 return ret; 2018 } 2019 2020 /* Supervisor Trap Setup */ 2021 static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno, 2022 Int128 *val) 2023 { 2024 uint64_t mask = sstatus_v1_10_mask; 2025 uint64_t sstatus = env->mstatus & mask; 2026 if (env->xl != MXL_RV32 || env->debugger) { 2027 mask |= SSTATUS64_UXL; 2028 } 2029 2030 *val = int128_make128(sstatus, add_status_sd(MXL_RV128, sstatus)); 2031 return RISCV_EXCP_NONE; 2032 } 2033 2034 static RISCVException read_sstatus(CPURISCVState *env, int csrno, 2035 target_ulong *val) 2036 { 2037 target_ulong mask = (sstatus_v1_10_mask); 2038 if (env->xl != MXL_RV32 || env->debugger) { 2039 mask |= SSTATUS64_UXL; 2040 } 2041 /* TODO: Use SXL not MXL. */ 2042 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask); 2043 return RISCV_EXCP_NONE; 2044 } 2045 2046 static RISCVException write_sstatus(CPURISCVState *env, int csrno, 2047 target_ulong val) 2048 { 2049 target_ulong mask = (sstatus_v1_10_mask); 2050 2051 if (env->xl != MXL_RV32 || env->debugger) { 2052 if ((val & SSTATUS64_UXL) != 0) { 2053 mask |= SSTATUS64_UXL; 2054 } 2055 } 2056 target_ulong newval = (env->mstatus & ~mask) | (val & mask); 2057 return write_mstatus(env, CSR_MSTATUS, newval); 2058 } 2059 2060 static RISCVException rmw_vsie64(CPURISCVState *env, int csrno, 2061 uint64_t *ret_val, 2062 uint64_t new_val, uint64_t wr_mask) 2063 { 2064 RISCVException ret; 2065 uint64_t rval, vsbits, mask = env->hideleg & VS_MODE_INTERRUPTS; 2066 2067 /* Bring VS-level bits to correct position */ 2068 vsbits = new_val & (VS_MODE_INTERRUPTS >> 1); 2069 new_val &= ~(VS_MODE_INTERRUPTS >> 1); 2070 new_val |= vsbits << 1; 2071 vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1); 2072 wr_mask &= ~(VS_MODE_INTERRUPTS >> 1); 2073 wr_mask |= vsbits << 1; 2074 2075 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & mask); 2076 if (ret_val) { 2077 rval &= mask; 2078 vsbits = rval & VS_MODE_INTERRUPTS; 2079 rval &= ~VS_MODE_INTERRUPTS; 2080 *ret_val = rval | (vsbits >> 1); 2081 } 2082 2083 return ret; 2084 } 2085 2086 static RISCVException rmw_vsie(CPURISCVState *env, int csrno, 2087 target_ulong *ret_val, 2088 target_ulong new_val, target_ulong wr_mask) 2089 { 2090 uint64_t rval; 2091 RISCVException ret; 2092 2093 ret = rmw_vsie64(env, csrno, &rval, new_val, wr_mask); 2094 if (ret_val) { 2095 *ret_val = rval; 2096 } 2097 2098 return ret; 2099 } 2100 2101 static RISCVException rmw_vsieh(CPURISCVState *env, int csrno, 2102 target_ulong *ret_val, 2103 target_ulong new_val, target_ulong wr_mask) 2104 { 2105 uint64_t rval; 2106 RISCVException ret; 2107 2108 ret = rmw_vsie64(env, csrno, &rval, 2109 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 2110 if (ret_val) { 2111 *ret_val = rval >> 32; 2112 } 2113 2114 return ret; 2115 } 2116 2117 static RISCVException rmw_sie64(CPURISCVState *env, int csrno, 2118 uint64_t *ret_val, 2119 uint64_t new_val, uint64_t wr_mask) 2120 { 2121 RISCVException ret; 2122 uint64_t mask = env->mideleg & S_MODE_INTERRUPTS; 2123 2124 if (riscv_cpu_virt_enabled(env)) { 2125 if (env->hvictl & HVICTL_VTI) { 2126 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 2127 } 2128 ret = rmw_vsie64(env, CSR_VSIE, ret_val, new_val, wr_mask); 2129 } else { 2130 ret = rmw_mie64(env, csrno, ret_val, new_val, wr_mask & mask); 2131 } 2132 2133 if (ret_val) { 2134 *ret_val &= mask; 2135 } 2136 2137 return ret; 2138 } 2139 2140 static RISCVException rmw_sie(CPURISCVState *env, int csrno, 2141 target_ulong *ret_val, 2142 target_ulong new_val, target_ulong wr_mask) 2143 { 2144 uint64_t rval; 2145 RISCVException ret; 2146 2147 ret = rmw_sie64(env, csrno, &rval, new_val, wr_mask); 2148 if (ret == RISCV_EXCP_NONE && ret_val) { 2149 *ret_val = rval; 2150 } 2151 2152 return ret; 2153 } 2154 2155 static RISCVException rmw_sieh(CPURISCVState *env, int csrno, 2156 target_ulong *ret_val, 2157 target_ulong new_val, target_ulong wr_mask) 2158 { 2159 uint64_t rval; 2160 RISCVException ret; 2161 2162 ret = rmw_sie64(env, csrno, &rval, 2163 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 2164 if (ret_val) { 2165 *ret_val = rval >> 32; 2166 } 2167 2168 return ret; 2169 } 2170 2171 static RISCVException read_stvec(CPURISCVState *env, int csrno, 2172 target_ulong *val) 2173 { 2174 *val = env->stvec; 2175 return RISCV_EXCP_NONE; 2176 } 2177 2178 static RISCVException write_stvec(CPURISCVState *env, int csrno, 2179 target_ulong val) 2180 { 2181 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */ 2182 if ((val & 3) < 2) { 2183 env->stvec = val; 2184 } else { 2185 qemu_log_mask(LOG_UNIMP, "CSR_STVEC: reserved mode not supported\n"); 2186 } 2187 return RISCV_EXCP_NONE; 2188 } 2189 2190 static RISCVException read_scounteren(CPURISCVState *env, int csrno, 2191 target_ulong *val) 2192 { 2193 *val = env->scounteren; 2194 return RISCV_EXCP_NONE; 2195 } 2196 2197 static RISCVException write_scounteren(CPURISCVState *env, int csrno, 2198 target_ulong val) 2199 { 2200 env->scounteren = val; 2201 return RISCV_EXCP_NONE; 2202 } 2203 2204 /* Supervisor Trap Handling */ 2205 static RISCVException read_sscratch_i128(CPURISCVState *env, int csrno, 2206 Int128 *val) 2207 { 2208 *val = int128_make128(env->sscratch, env->sscratchh); 2209 return RISCV_EXCP_NONE; 2210 } 2211 2212 static RISCVException write_sscratch_i128(CPURISCVState *env, int csrno, 2213 Int128 val) 2214 { 2215 env->sscratch = int128_getlo(val); 2216 env->sscratchh = int128_gethi(val); 2217 return RISCV_EXCP_NONE; 2218 } 2219 2220 static RISCVException read_sscratch(CPURISCVState *env, int csrno, 2221 target_ulong *val) 2222 { 2223 *val = env->sscratch; 2224 return RISCV_EXCP_NONE; 2225 } 2226 2227 static RISCVException write_sscratch(CPURISCVState *env, int csrno, 2228 target_ulong val) 2229 { 2230 env->sscratch = val; 2231 return RISCV_EXCP_NONE; 2232 } 2233 2234 static RISCVException read_sepc(CPURISCVState *env, int csrno, 2235 target_ulong *val) 2236 { 2237 *val = env->sepc; 2238 return RISCV_EXCP_NONE; 2239 } 2240 2241 static RISCVException write_sepc(CPURISCVState *env, int csrno, 2242 target_ulong val) 2243 { 2244 env->sepc = val; 2245 return RISCV_EXCP_NONE; 2246 } 2247 2248 static RISCVException read_scause(CPURISCVState *env, int csrno, 2249 target_ulong *val) 2250 { 2251 *val = env->scause; 2252 return RISCV_EXCP_NONE; 2253 } 2254 2255 static RISCVException write_scause(CPURISCVState *env, int csrno, 2256 target_ulong val) 2257 { 2258 env->scause = val; 2259 return RISCV_EXCP_NONE; 2260 } 2261 2262 static RISCVException read_stval(CPURISCVState *env, int csrno, 2263 target_ulong *val) 2264 { 2265 *val = env->stval; 2266 return RISCV_EXCP_NONE; 2267 } 2268 2269 static RISCVException write_stval(CPURISCVState *env, int csrno, 2270 target_ulong val) 2271 { 2272 env->stval = val; 2273 return RISCV_EXCP_NONE; 2274 } 2275 2276 static RISCVException rmw_vsip64(CPURISCVState *env, int csrno, 2277 uint64_t *ret_val, 2278 uint64_t new_val, uint64_t wr_mask) 2279 { 2280 RISCVException ret; 2281 uint64_t rval, vsbits, mask = env->hideleg & vsip_writable_mask; 2282 2283 /* Bring VS-level bits to correct position */ 2284 vsbits = new_val & (VS_MODE_INTERRUPTS >> 1); 2285 new_val &= ~(VS_MODE_INTERRUPTS >> 1); 2286 new_val |= vsbits << 1; 2287 vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1); 2288 wr_mask &= ~(VS_MODE_INTERRUPTS >> 1); 2289 wr_mask |= vsbits << 1; 2290 2291 ret = rmw_mip64(env, csrno, &rval, new_val, wr_mask & mask); 2292 if (ret_val) { 2293 rval &= mask; 2294 vsbits = rval & VS_MODE_INTERRUPTS; 2295 rval &= ~VS_MODE_INTERRUPTS; 2296 *ret_val = rval | (vsbits >> 1); 2297 } 2298 2299 return ret; 2300 } 2301 2302 static RISCVException rmw_vsip(CPURISCVState *env, int csrno, 2303 target_ulong *ret_val, 2304 target_ulong new_val, target_ulong wr_mask) 2305 { 2306 uint64_t rval; 2307 RISCVException ret; 2308 2309 ret = rmw_vsip64(env, csrno, &rval, new_val, wr_mask); 2310 if (ret_val) { 2311 *ret_val = rval; 2312 } 2313 2314 return ret; 2315 } 2316 2317 static RISCVException rmw_vsiph(CPURISCVState *env, int csrno, 2318 target_ulong *ret_val, 2319 target_ulong new_val, target_ulong wr_mask) 2320 { 2321 uint64_t rval; 2322 RISCVException ret; 2323 2324 ret = rmw_vsip64(env, csrno, &rval, 2325 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 2326 if (ret_val) { 2327 *ret_val = rval >> 32; 2328 } 2329 2330 return ret; 2331 } 2332 2333 static RISCVException rmw_sip64(CPURISCVState *env, int csrno, 2334 uint64_t *ret_val, 2335 uint64_t new_val, uint64_t wr_mask) 2336 { 2337 RISCVException ret; 2338 uint64_t mask = env->mideleg & sip_writable_mask; 2339 2340 if (riscv_cpu_virt_enabled(env)) { 2341 if (env->hvictl & HVICTL_VTI) { 2342 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 2343 } 2344 ret = rmw_vsip64(env, CSR_VSIP, ret_val, new_val, wr_mask); 2345 } else { 2346 ret = rmw_mip64(env, csrno, ret_val, new_val, wr_mask & mask); 2347 } 2348 2349 if (ret_val) { 2350 *ret_val &= env->mideleg & S_MODE_INTERRUPTS; 2351 } 2352 2353 return ret; 2354 } 2355 2356 static RISCVException rmw_sip(CPURISCVState *env, int csrno, 2357 target_ulong *ret_val, 2358 target_ulong new_val, target_ulong wr_mask) 2359 { 2360 uint64_t rval; 2361 RISCVException ret; 2362 2363 ret = rmw_sip64(env, csrno, &rval, new_val, wr_mask); 2364 if (ret_val) { 2365 *ret_val = rval; 2366 } 2367 2368 return ret; 2369 } 2370 2371 static RISCVException rmw_siph(CPURISCVState *env, int csrno, 2372 target_ulong *ret_val, 2373 target_ulong new_val, target_ulong wr_mask) 2374 { 2375 uint64_t rval; 2376 RISCVException ret; 2377 2378 ret = rmw_sip64(env, csrno, &rval, 2379 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 2380 if (ret_val) { 2381 *ret_val = rval >> 32; 2382 } 2383 2384 return ret; 2385 } 2386 2387 /* Supervisor Protection and Translation */ 2388 static RISCVException read_satp(CPURISCVState *env, int csrno, 2389 target_ulong *val) 2390 { 2391 if (!riscv_feature(env, RISCV_FEATURE_MMU)) { 2392 *val = 0; 2393 return RISCV_EXCP_NONE; 2394 } 2395 2396 if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) { 2397 return RISCV_EXCP_ILLEGAL_INST; 2398 } else { 2399 *val = env->satp; 2400 } 2401 2402 return RISCV_EXCP_NONE; 2403 } 2404 2405 static RISCVException write_satp(CPURISCVState *env, int csrno, 2406 target_ulong val) 2407 { 2408 target_ulong vm, mask; 2409 2410 if (!riscv_feature(env, RISCV_FEATURE_MMU)) { 2411 return RISCV_EXCP_NONE; 2412 } 2413 2414 if (riscv_cpu_mxl(env) == MXL_RV32) { 2415 vm = validate_vm(env, get_field(val, SATP32_MODE)); 2416 mask = (val ^ env->satp) & (SATP32_MODE | SATP32_ASID | SATP32_PPN); 2417 } else { 2418 vm = validate_vm(env, get_field(val, SATP64_MODE)); 2419 mask = (val ^ env->satp) & (SATP64_MODE | SATP64_ASID | SATP64_PPN); 2420 } 2421 2422 if (vm && mask) { 2423 if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) { 2424 return RISCV_EXCP_ILLEGAL_INST; 2425 } else { 2426 /* 2427 * The ISA defines SATP.MODE=Bare as "no translation", but we still 2428 * pass these through QEMU's TLB emulation as it improves 2429 * performance. Flushing the TLB on SATP writes with paging 2430 * enabled avoids leaking those invalid cached mappings. 2431 */ 2432 tlb_flush(env_cpu(env)); 2433 env->satp = val; 2434 } 2435 } 2436 return RISCV_EXCP_NONE; 2437 } 2438 2439 static int read_vstopi(CPURISCVState *env, int csrno, target_ulong *val) 2440 { 2441 int irq, ret; 2442 target_ulong topei; 2443 uint64_t vseip, vsgein; 2444 uint32_t iid, iprio, hviid, hviprio, gein; 2445 uint32_t s, scount = 0, siid[VSTOPI_NUM_SRCS], siprio[VSTOPI_NUM_SRCS]; 2446 2447 gein = get_field(env->hstatus, HSTATUS_VGEIN); 2448 hviid = get_field(env->hvictl, HVICTL_IID); 2449 hviprio = get_field(env->hvictl, HVICTL_IPRIO); 2450 2451 if (gein) { 2452 vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0; 2453 vseip = env->mie & (env->mip | vsgein) & MIP_VSEIP; 2454 if (gein <= env->geilen && vseip) { 2455 siid[scount] = IRQ_S_EXT; 2456 siprio[scount] = IPRIO_MMAXIPRIO + 1; 2457 if (env->aia_ireg_rmw_fn[PRV_S]) { 2458 /* 2459 * Call machine specific IMSIC register emulation for 2460 * reading TOPEI. 2461 */ 2462 ret = env->aia_ireg_rmw_fn[PRV_S]( 2463 env->aia_ireg_rmw_fn_arg[PRV_S], 2464 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, PRV_S, true, gein, 2465 riscv_cpu_mxl_bits(env)), 2466 &topei, 0, 0); 2467 if (!ret && topei) { 2468 siprio[scount] = topei & IMSIC_TOPEI_IPRIO_MASK; 2469 } 2470 } 2471 scount++; 2472 } 2473 } else { 2474 if (hviid == IRQ_S_EXT && hviprio) { 2475 siid[scount] = IRQ_S_EXT; 2476 siprio[scount] = hviprio; 2477 scount++; 2478 } 2479 } 2480 2481 if (env->hvictl & HVICTL_VTI) { 2482 if (hviid != IRQ_S_EXT) { 2483 siid[scount] = hviid; 2484 siprio[scount] = hviprio; 2485 scount++; 2486 } 2487 } else { 2488 irq = riscv_cpu_vsirq_pending(env); 2489 if (irq != IRQ_S_EXT && 0 < irq && irq <= 63) { 2490 siid[scount] = irq; 2491 siprio[scount] = env->hviprio[irq]; 2492 scount++; 2493 } 2494 } 2495 2496 iid = 0; 2497 iprio = UINT_MAX; 2498 for (s = 0; s < scount; s++) { 2499 if (siprio[s] < iprio) { 2500 iid = siid[s]; 2501 iprio = siprio[s]; 2502 } 2503 } 2504 2505 if (iid) { 2506 if (env->hvictl & HVICTL_IPRIOM) { 2507 if (iprio > IPRIO_MMAXIPRIO) { 2508 iprio = IPRIO_MMAXIPRIO; 2509 } 2510 if (!iprio) { 2511 if (riscv_cpu_default_priority(iid) > IPRIO_DEFAULT_S) { 2512 iprio = IPRIO_MMAXIPRIO; 2513 } 2514 } 2515 } else { 2516 iprio = 1; 2517 } 2518 } else { 2519 iprio = 0; 2520 } 2521 2522 *val = (iid & TOPI_IID_MASK) << TOPI_IID_SHIFT; 2523 *val |= iprio; 2524 return RISCV_EXCP_NONE; 2525 } 2526 2527 static int read_stopi(CPURISCVState *env, int csrno, target_ulong *val) 2528 { 2529 int irq; 2530 uint8_t iprio; 2531 2532 if (riscv_cpu_virt_enabled(env)) { 2533 return read_vstopi(env, CSR_VSTOPI, val); 2534 } 2535 2536 irq = riscv_cpu_sirq_pending(env); 2537 if (irq <= 0 || irq > 63) { 2538 *val = 0; 2539 } else { 2540 iprio = env->siprio[irq]; 2541 if (!iprio) { 2542 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_S) { 2543 iprio = IPRIO_MMAXIPRIO; 2544 } 2545 } 2546 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT; 2547 *val |= iprio; 2548 } 2549 2550 return RISCV_EXCP_NONE; 2551 } 2552 2553 /* Hypervisor Extensions */ 2554 static RISCVException read_hstatus(CPURISCVState *env, int csrno, 2555 target_ulong *val) 2556 { 2557 *val = env->hstatus; 2558 if (riscv_cpu_mxl(env) != MXL_RV32) { 2559 /* We only support 64-bit VSXL */ 2560 *val = set_field(*val, HSTATUS_VSXL, 2); 2561 } 2562 /* We only support little endian */ 2563 *val = set_field(*val, HSTATUS_VSBE, 0); 2564 return RISCV_EXCP_NONE; 2565 } 2566 2567 static RISCVException write_hstatus(CPURISCVState *env, int csrno, 2568 target_ulong val) 2569 { 2570 env->hstatus = val; 2571 if (riscv_cpu_mxl(env) != MXL_RV32 && get_field(val, HSTATUS_VSXL) != 2) { 2572 qemu_log_mask(LOG_UNIMP, "QEMU does not support mixed HSXLEN options."); 2573 } 2574 if (get_field(val, HSTATUS_VSBE) != 0) { 2575 qemu_log_mask(LOG_UNIMP, "QEMU does not support big endian guests."); 2576 } 2577 return RISCV_EXCP_NONE; 2578 } 2579 2580 static RISCVException read_hedeleg(CPURISCVState *env, int csrno, 2581 target_ulong *val) 2582 { 2583 *val = env->hedeleg; 2584 return RISCV_EXCP_NONE; 2585 } 2586 2587 static RISCVException write_hedeleg(CPURISCVState *env, int csrno, 2588 target_ulong val) 2589 { 2590 env->hedeleg = val & vs_delegable_excps; 2591 return RISCV_EXCP_NONE; 2592 } 2593 2594 static RISCVException rmw_hideleg64(CPURISCVState *env, int csrno, 2595 uint64_t *ret_val, 2596 uint64_t new_val, uint64_t wr_mask) 2597 { 2598 uint64_t mask = wr_mask & vs_delegable_ints; 2599 2600 if (ret_val) { 2601 *ret_val = env->hideleg & vs_delegable_ints; 2602 } 2603 2604 env->hideleg = (env->hideleg & ~mask) | (new_val & mask); 2605 return RISCV_EXCP_NONE; 2606 } 2607 2608 static RISCVException rmw_hideleg(CPURISCVState *env, int csrno, 2609 target_ulong *ret_val, 2610 target_ulong new_val, target_ulong wr_mask) 2611 { 2612 uint64_t rval; 2613 RISCVException ret; 2614 2615 ret = rmw_hideleg64(env, csrno, &rval, new_val, wr_mask); 2616 if (ret_val) { 2617 *ret_val = rval; 2618 } 2619 2620 return ret; 2621 } 2622 2623 static RISCVException rmw_hidelegh(CPURISCVState *env, int csrno, 2624 target_ulong *ret_val, 2625 target_ulong new_val, target_ulong wr_mask) 2626 { 2627 uint64_t rval; 2628 RISCVException ret; 2629 2630 ret = rmw_hideleg64(env, csrno, &rval, 2631 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 2632 if (ret_val) { 2633 *ret_val = rval >> 32; 2634 } 2635 2636 return ret; 2637 } 2638 2639 static RISCVException rmw_hvip64(CPURISCVState *env, int csrno, 2640 uint64_t *ret_val, 2641 uint64_t new_val, uint64_t wr_mask) 2642 { 2643 RISCVException ret; 2644 2645 ret = rmw_mip64(env, csrno, ret_val, new_val, 2646 wr_mask & hvip_writable_mask); 2647 if (ret_val) { 2648 *ret_val &= VS_MODE_INTERRUPTS; 2649 } 2650 2651 return ret; 2652 } 2653 2654 static RISCVException rmw_hvip(CPURISCVState *env, int csrno, 2655 target_ulong *ret_val, 2656 target_ulong new_val, target_ulong wr_mask) 2657 { 2658 uint64_t rval; 2659 RISCVException ret; 2660 2661 ret = rmw_hvip64(env, csrno, &rval, new_val, wr_mask); 2662 if (ret_val) { 2663 *ret_val = rval; 2664 } 2665 2666 return ret; 2667 } 2668 2669 static RISCVException rmw_hviph(CPURISCVState *env, int csrno, 2670 target_ulong *ret_val, 2671 target_ulong new_val, target_ulong wr_mask) 2672 { 2673 uint64_t rval; 2674 RISCVException ret; 2675 2676 ret = rmw_hvip64(env, csrno, &rval, 2677 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); 2678 if (ret_val) { 2679 *ret_val = rval >> 32; 2680 } 2681 2682 return ret; 2683 } 2684 2685 static RISCVException rmw_hip(CPURISCVState *env, int csrno, 2686 target_ulong *ret_value, 2687 target_ulong new_value, target_ulong write_mask) 2688 { 2689 int ret = rmw_mip(env, csrno, ret_value, new_value, 2690 write_mask & hip_writable_mask); 2691 2692 if (ret_value) { 2693 *ret_value &= HS_MODE_INTERRUPTS; 2694 } 2695 return ret; 2696 } 2697 2698 static RISCVException rmw_hie(CPURISCVState *env, int csrno, 2699 target_ulong *ret_val, 2700 target_ulong new_val, target_ulong wr_mask) 2701 { 2702 uint64_t rval; 2703 RISCVException ret; 2704 2705 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & HS_MODE_INTERRUPTS); 2706 if (ret_val) { 2707 *ret_val = rval & HS_MODE_INTERRUPTS; 2708 } 2709 2710 return ret; 2711 } 2712 2713 static RISCVException read_hcounteren(CPURISCVState *env, int csrno, 2714 target_ulong *val) 2715 { 2716 *val = env->hcounteren; 2717 return RISCV_EXCP_NONE; 2718 } 2719 2720 static RISCVException write_hcounteren(CPURISCVState *env, int csrno, 2721 target_ulong val) 2722 { 2723 env->hcounteren = val; 2724 return RISCV_EXCP_NONE; 2725 } 2726 2727 static RISCVException read_hgeie(CPURISCVState *env, int csrno, 2728 target_ulong *val) 2729 { 2730 if (val) { 2731 *val = env->hgeie; 2732 } 2733 return RISCV_EXCP_NONE; 2734 } 2735 2736 static RISCVException write_hgeie(CPURISCVState *env, int csrno, 2737 target_ulong val) 2738 { 2739 /* Only GEILEN:1 bits implemented and BIT0 is never implemented */ 2740 val &= ((((target_ulong)1) << env->geilen) - 1) << 1; 2741 env->hgeie = val; 2742 /* Update mip.SGEIP bit */ 2743 riscv_cpu_update_mip(env_archcpu(env), MIP_SGEIP, 2744 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 2745 return RISCV_EXCP_NONE; 2746 } 2747 2748 static RISCVException read_htval(CPURISCVState *env, int csrno, 2749 target_ulong *val) 2750 { 2751 *val = env->htval; 2752 return RISCV_EXCP_NONE; 2753 } 2754 2755 static RISCVException write_htval(CPURISCVState *env, int csrno, 2756 target_ulong val) 2757 { 2758 env->htval = val; 2759 return RISCV_EXCP_NONE; 2760 } 2761 2762 static RISCVException read_htinst(CPURISCVState *env, int csrno, 2763 target_ulong *val) 2764 { 2765 *val = env->htinst; 2766 return RISCV_EXCP_NONE; 2767 } 2768 2769 static RISCVException write_htinst(CPURISCVState *env, int csrno, 2770 target_ulong val) 2771 { 2772 return RISCV_EXCP_NONE; 2773 } 2774 2775 static RISCVException read_hgeip(CPURISCVState *env, int csrno, 2776 target_ulong *val) 2777 { 2778 if (val) { 2779 *val = env->hgeip; 2780 } 2781 return RISCV_EXCP_NONE; 2782 } 2783 2784 static RISCVException read_hgatp(CPURISCVState *env, int csrno, 2785 target_ulong *val) 2786 { 2787 *val = env->hgatp; 2788 return RISCV_EXCP_NONE; 2789 } 2790 2791 static RISCVException write_hgatp(CPURISCVState *env, int csrno, 2792 target_ulong val) 2793 { 2794 env->hgatp = val; 2795 return RISCV_EXCP_NONE; 2796 } 2797 2798 static RISCVException read_htimedelta(CPURISCVState *env, int csrno, 2799 target_ulong *val) 2800 { 2801 if (!env->rdtime_fn) { 2802 return RISCV_EXCP_ILLEGAL_INST; 2803 } 2804 2805 *val = env->htimedelta; 2806 return RISCV_EXCP_NONE; 2807 } 2808 2809 static RISCVException write_htimedelta(CPURISCVState *env, int csrno, 2810 target_ulong val) 2811 { 2812 if (!env->rdtime_fn) { 2813 return RISCV_EXCP_ILLEGAL_INST; 2814 } 2815 2816 if (riscv_cpu_mxl(env) == MXL_RV32) { 2817 env->htimedelta = deposit64(env->htimedelta, 0, 32, (uint64_t)val); 2818 } else { 2819 env->htimedelta = val; 2820 } 2821 return RISCV_EXCP_NONE; 2822 } 2823 2824 static RISCVException read_htimedeltah(CPURISCVState *env, int csrno, 2825 target_ulong *val) 2826 { 2827 if (!env->rdtime_fn) { 2828 return RISCV_EXCP_ILLEGAL_INST; 2829 } 2830 2831 *val = env->htimedelta >> 32; 2832 return RISCV_EXCP_NONE; 2833 } 2834 2835 static RISCVException write_htimedeltah(CPURISCVState *env, int csrno, 2836 target_ulong val) 2837 { 2838 if (!env->rdtime_fn) { 2839 return RISCV_EXCP_ILLEGAL_INST; 2840 } 2841 2842 env->htimedelta = deposit64(env->htimedelta, 32, 32, (uint64_t)val); 2843 return RISCV_EXCP_NONE; 2844 } 2845 2846 static int read_hvictl(CPURISCVState *env, int csrno, target_ulong *val) 2847 { 2848 *val = env->hvictl; 2849 return RISCV_EXCP_NONE; 2850 } 2851 2852 static int write_hvictl(CPURISCVState *env, int csrno, target_ulong val) 2853 { 2854 env->hvictl = val & HVICTL_VALID_MASK; 2855 return RISCV_EXCP_NONE; 2856 } 2857 2858 static int read_hvipriox(CPURISCVState *env, int first_index, 2859 uint8_t *iprio, target_ulong *val) 2860 { 2861 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32); 2862 2863 /* First index has to be a multiple of number of irqs per register */ 2864 if (first_index % num_irqs) { 2865 return (riscv_cpu_virt_enabled(env)) ? 2866 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; 2867 } 2868 2869 /* Fill-up return value */ 2870 *val = 0; 2871 for (i = 0; i < num_irqs; i++) { 2872 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) { 2873 continue; 2874 } 2875 if (rdzero) { 2876 continue; 2877 } 2878 *val |= ((target_ulong)iprio[irq]) << (i * 8); 2879 } 2880 2881 return RISCV_EXCP_NONE; 2882 } 2883 2884 static int write_hvipriox(CPURISCVState *env, int first_index, 2885 uint8_t *iprio, target_ulong val) 2886 { 2887 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32); 2888 2889 /* First index has to be a multiple of number of irqs per register */ 2890 if (first_index % num_irqs) { 2891 return (riscv_cpu_virt_enabled(env)) ? 2892 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; 2893 } 2894 2895 /* Fill-up priority arrary */ 2896 for (i = 0; i < num_irqs; i++) { 2897 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) { 2898 continue; 2899 } 2900 if (rdzero) { 2901 iprio[irq] = 0; 2902 } else { 2903 iprio[irq] = (val >> (i * 8)) & 0xff; 2904 } 2905 } 2906 2907 return RISCV_EXCP_NONE; 2908 } 2909 2910 static int read_hviprio1(CPURISCVState *env, int csrno, target_ulong *val) 2911 { 2912 return read_hvipriox(env, 0, env->hviprio, val); 2913 } 2914 2915 static int write_hviprio1(CPURISCVState *env, int csrno, target_ulong val) 2916 { 2917 return write_hvipriox(env, 0, env->hviprio, val); 2918 } 2919 2920 static int read_hviprio1h(CPURISCVState *env, int csrno, target_ulong *val) 2921 { 2922 return read_hvipriox(env, 4, env->hviprio, val); 2923 } 2924 2925 static int write_hviprio1h(CPURISCVState *env, int csrno, target_ulong val) 2926 { 2927 return write_hvipriox(env, 4, env->hviprio, val); 2928 } 2929 2930 static int read_hviprio2(CPURISCVState *env, int csrno, target_ulong *val) 2931 { 2932 return read_hvipriox(env, 8, env->hviprio, val); 2933 } 2934 2935 static int write_hviprio2(CPURISCVState *env, int csrno, target_ulong val) 2936 { 2937 return write_hvipriox(env, 8, env->hviprio, val); 2938 } 2939 2940 static int read_hviprio2h(CPURISCVState *env, int csrno, target_ulong *val) 2941 { 2942 return read_hvipriox(env, 12, env->hviprio, val); 2943 } 2944 2945 static int write_hviprio2h(CPURISCVState *env, int csrno, target_ulong val) 2946 { 2947 return write_hvipriox(env, 12, env->hviprio, val); 2948 } 2949 2950 /* Virtual CSR Registers */ 2951 static RISCVException read_vsstatus(CPURISCVState *env, int csrno, 2952 target_ulong *val) 2953 { 2954 *val = env->vsstatus; 2955 return RISCV_EXCP_NONE; 2956 } 2957 2958 static RISCVException write_vsstatus(CPURISCVState *env, int csrno, 2959 target_ulong val) 2960 { 2961 uint64_t mask = (target_ulong)-1; 2962 if ((val & VSSTATUS64_UXL) == 0) { 2963 mask &= ~VSSTATUS64_UXL; 2964 } 2965 env->vsstatus = (env->vsstatus & ~mask) | (uint64_t)val; 2966 return RISCV_EXCP_NONE; 2967 } 2968 2969 static int read_vstvec(CPURISCVState *env, int csrno, target_ulong *val) 2970 { 2971 *val = env->vstvec; 2972 return RISCV_EXCP_NONE; 2973 } 2974 2975 static RISCVException write_vstvec(CPURISCVState *env, int csrno, 2976 target_ulong val) 2977 { 2978 env->vstvec = val; 2979 return RISCV_EXCP_NONE; 2980 } 2981 2982 static RISCVException read_vsscratch(CPURISCVState *env, int csrno, 2983 target_ulong *val) 2984 { 2985 *val = env->vsscratch; 2986 return RISCV_EXCP_NONE; 2987 } 2988 2989 static RISCVException write_vsscratch(CPURISCVState *env, int csrno, 2990 target_ulong val) 2991 { 2992 env->vsscratch = val; 2993 return RISCV_EXCP_NONE; 2994 } 2995 2996 static RISCVException read_vsepc(CPURISCVState *env, int csrno, 2997 target_ulong *val) 2998 { 2999 *val = env->vsepc; 3000 return RISCV_EXCP_NONE; 3001 } 3002 3003 static RISCVException write_vsepc(CPURISCVState *env, int csrno, 3004 target_ulong val) 3005 { 3006 env->vsepc = val; 3007 return RISCV_EXCP_NONE; 3008 } 3009 3010 static RISCVException read_vscause(CPURISCVState *env, int csrno, 3011 target_ulong *val) 3012 { 3013 *val = env->vscause; 3014 return RISCV_EXCP_NONE; 3015 } 3016 3017 static RISCVException write_vscause(CPURISCVState *env, int csrno, 3018 target_ulong val) 3019 { 3020 env->vscause = val; 3021 return RISCV_EXCP_NONE; 3022 } 3023 3024 static RISCVException read_vstval(CPURISCVState *env, int csrno, 3025 target_ulong *val) 3026 { 3027 *val = env->vstval; 3028 return RISCV_EXCP_NONE; 3029 } 3030 3031 static RISCVException write_vstval(CPURISCVState *env, int csrno, 3032 target_ulong val) 3033 { 3034 env->vstval = val; 3035 return RISCV_EXCP_NONE; 3036 } 3037 3038 static RISCVException read_vsatp(CPURISCVState *env, int csrno, 3039 target_ulong *val) 3040 { 3041 *val = env->vsatp; 3042 return RISCV_EXCP_NONE; 3043 } 3044 3045 static RISCVException write_vsatp(CPURISCVState *env, int csrno, 3046 target_ulong val) 3047 { 3048 env->vsatp = val; 3049 return RISCV_EXCP_NONE; 3050 } 3051 3052 static RISCVException read_mtval2(CPURISCVState *env, int csrno, 3053 target_ulong *val) 3054 { 3055 *val = env->mtval2; 3056 return RISCV_EXCP_NONE; 3057 } 3058 3059 static RISCVException write_mtval2(CPURISCVState *env, int csrno, 3060 target_ulong val) 3061 { 3062 env->mtval2 = val; 3063 return RISCV_EXCP_NONE; 3064 } 3065 3066 static RISCVException read_mtinst(CPURISCVState *env, int csrno, 3067 target_ulong *val) 3068 { 3069 *val = env->mtinst; 3070 return RISCV_EXCP_NONE; 3071 } 3072 3073 static RISCVException write_mtinst(CPURISCVState *env, int csrno, 3074 target_ulong val) 3075 { 3076 env->mtinst = val; 3077 return RISCV_EXCP_NONE; 3078 } 3079 3080 /* Physical Memory Protection */ 3081 static RISCVException read_mseccfg(CPURISCVState *env, int csrno, 3082 target_ulong *val) 3083 { 3084 *val = mseccfg_csr_read(env); 3085 return RISCV_EXCP_NONE; 3086 } 3087 3088 static RISCVException write_mseccfg(CPURISCVState *env, int csrno, 3089 target_ulong val) 3090 { 3091 mseccfg_csr_write(env, val); 3092 return RISCV_EXCP_NONE; 3093 } 3094 3095 static bool check_pmp_reg_index(CPURISCVState *env, uint32_t reg_index) 3096 { 3097 /* TODO: RV128 restriction check */ 3098 if ((reg_index & 1) && (riscv_cpu_mxl(env) == MXL_RV64)) { 3099 return false; 3100 } 3101 return true; 3102 } 3103 3104 static RISCVException read_pmpcfg(CPURISCVState *env, int csrno, 3105 target_ulong *val) 3106 { 3107 uint32_t reg_index = csrno - CSR_PMPCFG0; 3108 3109 if (!check_pmp_reg_index(env, reg_index)) { 3110 return RISCV_EXCP_ILLEGAL_INST; 3111 } 3112 *val = pmpcfg_csr_read(env, csrno - CSR_PMPCFG0); 3113 return RISCV_EXCP_NONE; 3114 } 3115 3116 static RISCVException write_pmpcfg(CPURISCVState *env, int csrno, 3117 target_ulong val) 3118 { 3119 uint32_t reg_index = csrno - CSR_PMPCFG0; 3120 3121 if (!check_pmp_reg_index(env, reg_index)) { 3122 return RISCV_EXCP_ILLEGAL_INST; 3123 } 3124 pmpcfg_csr_write(env, csrno - CSR_PMPCFG0, val); 3125 return RISCV_EXCP_NONE; 3126 } 3127 3128 static RISCVException read_pmpaddr(CPURISCVState *env, int csrno, 3129 target_ulong *val) 3130 { 3131 *val = pmpaddr_csr_read(env, csrno - CSR_PMPADDR0); 3132 return RISCV_EXCP_NONE; 3133 } 3134 3135 static RISCVException write_pmpaddr(CPURISCVState *env, int csrno, 3136 target_ulong val) 3137 { 3138 pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val); 3139 return RISCV_EXCP_NONE; 3140 } 3141 3142 static RISCVException read_tselect(CPURISCVState *env, int csrno, 3143 target_ulong *val) 3144 { 3145 *val = tselect_csr_read(env); 3146 return RISCV_EXCP_NONE; 3147 } 3148 3149 static RISCVException write_tselect(CPURISCVState *env, int csrno, 3150 target_ulong val) 3151 { 3152 tselect_csr_write(env, val); 3153 return RISCV_EXCP_NONE; 3154 } 3155 3156 static RISCVException read_tdata(CPURISCVState *env, int csrno, 3157 target_ulong *val) 3158 { 3159 /* return 0 in tdata1 to end the trigger enumeration */ 3160 if (env->trigger_cur >= TRIGGER_NUM && csrno == CSR_TDATA1) { 3161 *val = 0; 3162 return RISCV_EXCP_NONE; 3163 } 3164 3165 if (!tdata_available(env, csrno - CSR_TDATA1)) { 3166 return RISCV_EXCP_ILLEGAL_INST; 3167 } 3168 3169 *val = tdata_csr_read(env, csrno - CSR_TDATA1); 3170 return RISCV_EXCP_NONE; 3171 } 3172 3173 static RISCVException write_tdata(CPURISCVState *env, int csrno, 3174 target_ulong val) 3175 { 3176 if (!tdata_available(env, csrno - CSR_TDATA1)) { 3177 return RISCV_EXCP_ILLEGAL_INST; 3178 } 3179 3180 tdata_csr_write(env, csrno - CSR_TDATA1, val); 3181 return RISCV_EXCP_NONE; 3182 } 3183 3184 /* 3185 * Functions to access Pointer Masking feature registers 3186 * We have to check if current priv lvl could modify 3187 * csr in given mode 3188 */ 3189 static bool check_pm_current_disabled(CPURISCVState *env, int csrno) 3190 { 3191 int csr_priv = get_field(csrno, 0x300); 3192 int pm_current; 3193 3194 if (env->debugger) { 3195 return false; 3196 } 3197 /* 3198 * If priv lvls differ that means we're accessing csr from higher priv lvl, 3199 * so allow the access 3200 */ 3201 if (env->priv != csr_priv) { 3202 return false; 3203 } 3204 switch (env->priv) { 3205 case PRV_M: 3206 pm_current = get_field(env->mmte, M_PM_CURRENT); 3207 break; 3208 case PRV_S: 3209 pm_current = get_field(env->mmte, S_PM_CURRENT); 3210 break; 3211 case PRV_U: 3212 pm_current = get_field(env->mmte, U_PM_CURRENT); 3213 break; 3214 default: 3215 g_assert_not_reached(); 3216 } 3217 /* It's same priv lvl, so we allow to modify csr only if pm.current==1 */ 3218 return !pm_current; 3219 } 3220 3221 static RISCVException read_mmte(CPURISCVState *env, int csrno, 3222 target_ulong *val) 3223 { 3224 *val = env->mmte & MMTE_MASK; 3225 return RISCV_EXCP_NONE; 3226 } 3227 3228 static RISCVException write_mmte(CPURISCVState *env, int csrno, 3229 target_ulong val) 3230 { 3231 uint64_t mstatus; 3232 target_ulong wpri_val = val & MMTE_MASK; 3233 3234 if (val != wpri_val) { 3235 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n", 3236 "MMTE: WPRI violation written 0x", val, 3237 "vs expected 0x", wpri_val); 3238 } 3239 /* for machine mode pm.current is hardwired to 1 */ 3240 wpri_val |= MMTE_M_PM_CURRENT; 3241 3242 /* hardwiring pm.instruction bit to 0, since it's not supported yet */ 3243 wpri_val &= ~(MMTE_M_PM_INSN | MMTE_S_PM_INSN | MMTE_U_PM_INSN); 3244 env->mmte = wpri_val | PM_EXT_DIRTY; 3245 riscv_cpu_update_mask(env); 3246 3247 /* Set XS and SD bits, since PM CSRs are dirty */ 3248 mstatus = env->mstatus | MSTATUS_XS; 3249 write_mstatus(env, csrno, mstatus); 3250 return RISCV_EXCP_NONE; 3251 } 3252 3253 static RISCVException read_smte(CPURISCVState *env, int csrno, 3254 target_ulong *val) 3255 { 3256 *val = env->mmte & SMTE_MASK; 3257 return RISCV_EXCP_NONE; 3258 } 3259 3260 static RISCVException write_smte(CPURISCVState *env, int csrno, 3261 target_ulong val) 3262 { 3263 target_ulong wpri_val = val & SMTE_MASK; 3264 3265 if (val != wpri_val) { 3266 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n", 3267 "SMTE: WPRI violation written 0x", val, 3268 "vs expected 0x", wpri_val); 3269 } 3270 3271 /* if pm.current==0 we can't modify current PM CSRs */ 3272 if (check_pm_current_disabled(env, csrno)) { 3273 return RISCV_EXCP_NONE; 3274 } 3275 3276 wpri_val |= (env->mmte & ~SMTE_MASK); 3277 write_mmte(env, csrno, wpri_val); 3278 return RISCV_EXCP_NONE; 3279 } 3280 3281 static RISCVException read_umte(CPURISCVState *env, int csrno, 3282 target_ulong *val) 3283 { 3284 *val = env->mmte & UMTE_MASK; 3285 return RISCV_EXCP_NONE; 3286 } 3287 3288 static RISCVException write_umte(CPURISCVState *env, int csrno, 3289 target_ulong val) 3290 { 3291 target_ulong wpri_val = val & UMTE_MASK; 3292 3293 if (val != wpri_val) { 3294 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n", 3295 "UMTE: WPRI violation written 0x", val, 3296 "vs expected 0x", wpri_val); 3297 } 3298 3299 if (check_pm_current_disabled(env, csrno)) { 3300 return RISCV_EXCP_NONE; 3301 } 3302 3303 wpri_val |= (env->mmte & ~UMTE_MASK); 3304 write_mmte(env, csrno, wpri_val); 3305 return RISCV_EXCP_NONE; 3306 } 3307 3308 static RISCVException read_mpmmask(CPURISCVState *env, int csrno, 3309 target_ulong *val) 3310 { 3311 *val = env->mpmmask; 3312 return RISCV_EXCP_NONE; 3313 } 3314 3315 static RISCVException write_mpmmask(CPURISCVState *env, int csrno, 3316 target_ulong val) 3317 { 3318 uint64_t mstatus; 3319 3320 env->mpmmask = val; 3321 if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) { 3322 env->cur_pmmask = val; 3323 } 3324 env->mmte |= PM_EXT_DIRTY; 3325 3326 /* Set XS and SD bits, since PM CSRs are dirty */ 3327 mstatus = env->mstatus | MSTATUS_XS; 3328 write_mstatus(env, csrno, mstatus); 3329 return RISCV_EXCP_NONE; 3330 } 3331 3332 static RISCVException read_spmmask(CPURISCVState *env, int csrno, 3333 target_ulong *val) 3334 { 3335 *val = env->spmmask; 3336 return RISCV_EXCP_NONE; 3337 } 3338 3339 static RISCVException write_spmmask(CPURISCVState *env, int csrno, 3340 target_ulong val) 3341 { 3342 uint64_t mstatus; 3343 3344 /* if pm.current==0 we can't modify current PM CSRs */ 3345 if (check_pm_current_disabled(env, csrno)) { 3346 return RISCV_EXCP_NONE; 3347 } 3348 env->spmmask = val; 3349 if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) { 3350 env->cur_pmmask = val; 3351 } 3352 env->mmte |= PM_EXT_DIRTY; 3353 3354 /* Set XS and SD bits, since PM CSRs are dirty */ 3355 mstatus = env->mstatus | MSTATUS_XS; 3356 write_mstatus(env, csrno, mstatus); 3357 return RISCV_EXCP_NONE; 3358 } 3359 3360 static RISCVException read_upmmask(CPURISCVState *env, int csrno, 3361 target_ulong *val) 3362 { 3363 *val = env->upmmask; 3364 return RISCV_EXCP_NONE; 3365 } 3366 3367 static RISCVException write_upmmask(CPURISCVState *env, int csrno, 3368 target_ulong val) 3369 { 3370 uint64_t mstatus; 3371 3372 /* if pm.current==0 we can't modify current PM CSRs */ 3373 if (check_pm_current_disabled(env, csrno)) { 3374 return RISCV_EXCP_NONE; 3375 } 3376 env->upmmask = val; 3377 if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) { 3378 env->cur_pmmask = val; 3379 } 3380 env->mmte |= PM_EXT_DIRTY; 3381 3382 /* Set XS and SD bits, since PM CSRs are dirty */ 3383 mstatus = env->mstatus | MSTATUS_XS; 3384 write_mstatus(env, csrno, mstatus); 3385 return RISCV_EXCP_NONE; 3386 } 3387 3388 static RISCVException read_mpmbase(CPURISCVState *env, int csrno, 3389 target_ulong *val) 3390 { 3391 *val = env->mpmbase; 3392 return RISCV_EXCP_NONE; 3393 } 3394 3395 static RISCVException write_mpmbase(CPURISCVState *env, int csrno, 3396 target_ulong val) 3397 { 3398 uint64_t mstatus; 3399 3400 env->mpmbase = val; 3401 if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) { 3402 env->cur_pmbase = val; 3403 } 3404 env->mmte |= PM_EXT_DIRTY; 3405 3406 /* Set XS and SD bits, since PM CSRs are dirty */ 3407 mstatus = env->mstatus | MSTATUS_XS; 3408 write_mstatus(env, csrno, mstatus); 3409 return RISCV_EXCP_NONE; 3410 } 3411 3412 static RISCVException read_spmbase(CPURISCVState *env, int csrno, 3413 target_ulong *val) 3414 { 3415 *val = env->spmbase; 3416 return RISCV_EXCP_NONE; 3417 } 3418 3419 static RISCVException write_spmbase(CPURISCVState *env, int csrno, 3420 target_ulong val) 3421 { 3422 uint64_t mstatus; 3423 3424 /* if pm.current==0 we can't modify current PM CSRs */ 3425 if (check_pm_current_disabled(env, csrno)) { 3426 return RISCV_EXCP_NONE; 3427 } 3428 env->spmbase = val; 3429 if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) { 3430 env->cur_pmbase = val; 3431 } 3432 env->mmte |= PM_EXT_DIRTY; 3433 3434 /* Set XS and SD bits, since PM CSRs are dirty */ 3435 mstatus = env->mstatus | MSTATUS_XS; 3436 write_mstatus(env, csrno, mstatus); 3437 return RISCV_EXCP_NONE; 3438 } 3439 3440 static RISCVException read_upmbase(CPURISCVState *env, int csrno, 3441 target_ulong *val) 3442 { 3443 *val = env->upmbase; 3444 return RISCV_EXCP_NONE; 3445 } 3446 3447 static RISCVException write_upmbase(CPURISCVState *env, int csrno, 3448 target_ulong val) 3449 { 3450 uint64_t mstatus; 3451 3452 /* if pm.current==0 we can't modify current PM CSRs */ 3453 if (check_pm_current_disabled(env, csrno)) { 3454 return RISCV_EXCP_NONE; 3455 } 3456 env->upmbase = val; 3457 if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) { 3458 env->cur_pmbase = val; 3459 } 3460 env->mmte |= PM_EXT_DIRTY; 3461 3462 /* Set XS and SD bits, since PM CSRs are dirty */ 3463 mstatus = env->mstatus | MSTATUS_XS; 3464 write_mstatus(env, csrno, mstatus); 3465 return RISCV_EXCP_NONE; 3466 } 3467 3468 #endif 3469 3470 /* Crypto Extension */ 3471 static RISCVException rmw_seed(CPURISCVState *env, int csrno, 3472 target_ulong *ret_value, 3473 target_ulong new_value, 3474 target_ulong write_mask) 3475 { 3476 uint16_t random_v; 3477 Error *random_e = NULL; 3478 int random_r; 3479 target_ulong rval; 3480 3481 random_r = qemu_guest_getrandom(&random_v, 2, &random_e); 3482 if (unlikely(random_r < 0)) { 3483 /* 3484 * Failed, for unknown reasons in the crypto subsystem. 3485 * The best we can do is log the reason and return a 3486 * failure indication to the guest. There is no reason 3487 * we know to expect the failure to be transitory, so 3488 * indicate DEAD to avoid having the guest spin on WAIT. 3489 */ 3490 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s", 3491 __func__, error_get_pretty(random_e)); 3492 error_free(random_e); 3493 rval = SEED_OPST_DEAD; 3494 } else { 3495 rval = random_v | SEED_OPST_ES16; 3496 } 3497 3498 if (ret_value) { 3499 *ret_value = rval; 3500 } 3501 3502 return RISCV_EXCP_NONE; 3503 } 3504 3505 /* 3506 * riscv_csrrw - read and/or update control and status register 3507 * 3508 * csrr <-> riscv_csrrw(env, csrno, ret_value, 0, 0); 3509 * csrrw <-> riscv_csrrw(env, csrno, ret_value, value, -1); 3510 * csrrs <-> riscv_csrrw(env, csrno, ret_value, -1, value); 3511 * csrrc <-> riscv_csrrw(env, csrno, ret_value, 0, value); 3512 */ 3513 3514 static inline RISCVException riscv_csrrw_check(CPURISCVState *env, 3515 int csrno, 3516 bool write_mask, 3517 RISCVCPU *cpu) 3518 { 3519 /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */ 3520 int read_only = get_field(csrno, 0xC00) == 3; 3521 int csr_min_priv = csr_ops[csrno].min_priv_ver; 3522 3523 /* ensure the CSR extension is enabled. */ 3524 if (!cpu->cfg.ext_icsr) { 3525 return RISCV_EXCP_ILLEGAL_INST; 3526 } 3527 3528 if (env->priv_ver < csr_min_priv) { 3529 return RISCV_EXCP_ILLEGAL_INST; 3530 } 3531 3532 /* check predicate */ 3533 if (!csr_ops[csrno].predicate) { 3534 return RISCV_EXCP_ILLEGAL_INST; 3535 } 3536 3537 if (write_mask && read_only) { 3538 return RISCV_EXCP_ILLEGAL_INST; 3539 } 3540 3541 RISCVException ret = csr_ops[csrno].predicate(env, csrno); 3542 if (ret != RISCV_EXCP_NONE) { 3543 return ret; 3544 } 3545 3546 #if !defined(CONFIG_USER_ONLY) 3547 int csr_priv, effective_priv = env->priv; 3548 3549 if (riscv_has_ext(env, RVH) && env->priv == PRV_S && 3550 !riscv_cpu_virt_enabled(env)) { 3551 /* 3552 * We are in HS mode. Add 1 to the effective privledge level to 3553 * allow us to access the Hypervisor CSRs. 3554 */ 3555 effective_priv++; 3556 } 3557 3558 csr_priv = get_field(csrno, 0x300); 3559 if (!env->debugger && (effective_priv < csr_priv)) { 3560 if (csr_priv == (PRV_S + 1) && riscv_cpu_virt_enabled(env)) { 3561 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 3562 } 3563 return RISCV_EXCP_ILLEGAL_INST; 3564 } 3565 #endif 3566 return RISCV_EXCP_NONE; 3567 } 3568 3569 static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno, 3570 target_ulong *ret_value, 3571 target_ulong new_value, 3572 target_ulong write_mask) 3573 { 3574 RISCVException ret; 3575 target_ulong old_value; 3576 3577 /* execute combined read/write operation if it exists */ 3578 if (csr_ops[csrno].op) { 3579 return csr_ops[csrno].op(env, csrno, ret_value, new_value, write_mask); 3580 } 3581 3582 /* if no accessor exists then return failure */ 3583 if (!csr_ops[csrno].read) { 3584 return RISCV_EXCP_ILLEGAL_INST; 3585 } 3586 /* read old value */ 3587 ret = csr_ops[csrno].read(env, csrno, &old_value); 3588 if (ret != RISCV_EXCP_NONE) { 3589 return ret; 3590 } 3591 3592 /* write value if writable and write mask set, otherwise drop writes */ 3593 if (write_mask) { 3594 new_value = (old_value & ~write_mask) | (new_value & write_mask); 3595 if (csr_ops[csrno].write) { 3596 ret = csr_ops[csrno].write(env, csrno, new_value); 3597 if (ret != RISCV_EXCP_NONE) { 3598 return ret; 3599 } 3600 } 3601 } 3602 3603 /* return old value */ 3604 if (ret_value) { 3605 *ret_value = old_value; 3606 } 3607 3608 return RISCV_EXCP_NONE; 3609 } 3610 3611 RISCVException riscv_csrrw(CPURISCVState *env, int csrno, 3612 target_ulong *ret_value, 3613 target_ulong new_value, target_ulong write_mask) 3614 { 3615 RISCVCPU *cpu = env_archcpu(env); 3616 3617 RISCVException ret = riscv_csrrw_check(env, csrno, write_mask, cpu); 3618 if (ret != RISCV_EXCP_NONE) { 3619 return ret; 3620 } 3621 3622 return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask); 3623 } 3624 3625 static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno, 3626 Int128 *ret_value, 3627 Int128 new_value, 3628 Int128 write_mask) 3629 { 3630 RISCVException ret; 3631 Int128 old_value; 3632 3633 /* read old value */ 3634 ret = csr_ops[csrno].read128(env, csrno, &old_value); 3635 if (ret != RISCV_EXCP_NONE) { 3636 return ret; 3637 } 3638 3639 /* write value if writable and write mask set, otherwise drop writes */ 3640 if (int128_nz(write_mask)) { 3641 new_value = int128_or(int128_and(old_value, int128_not(write_mask)), 3642 int128_and(new_value, write_mask)); 3643 if (csr_ops[csrno].write128) { 3644 ret = csr_ops[csrno].write128(env, csrno, new_value); 3645 if (ret != RISCV_EXCP_NONE) { 3646 return ret; 3647 } 3648 } else if (csr_ops[csrno].write) { 3649 /* avoids having to write wrappers for all registers */ 3650 ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value)); 3651 if (ret != RISCV_EXCP_NONE) { 3652 return ret; 3653 } 3654 } 3655 } 3656 3657 /* return old value */ 3658 if (ret_value) { 3659 *ret_value = old_value; 3660 } 3661 3662 return RISCV_EXCP_NONE; 3663 } 3664 3665 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno, 3666 Int128 *ret_value, 3667 Int128 new_value, Int128 write_mask) 3668 { 3669 RISCVException ret; 3670 RISCVCPU *cpu = env_archcpu(env); 3671 3672 ret = riscv_csrrw_check(env, csrno, int128_nz(write_mask), cpu); 3673 if (ret != RISCV_EXCP_NONE) { 3674 return ret; 3675 } 3676 3677 if (csr_ops[csrno].read128) { 3678 return riscv_csrrw_do128(env, csrno, ret_value, new_value, write_mask); 3679 } 3680 3681 /* 3682 * Fall back to 64-bit version for now, if the 128-bit alternative isn't 3683 * at all defined. 3684 * Note, some CSRs don't need to extend to MXLEN (64 upper bits non 3685 * significant), for those, this fallback is correctly handling the accesses 3686 */ 3687 target_ulong old_value; 3688 ret = riscv_csrrw_do64(env, csrno, &old_value, 3689 int128_getlo(new_value), 3690 int128_getlo(write_mask)); 3691 if (ret == RISCV_EXCP_NONE && ret_value) { 3692 *ret_value = int128_make64(old_value); 3693 } 3694 return ret; 3695 } 3696 3697 /* 3698 * Debugger support. If not in user mode, set env->debugger before the 3699 * riscv_csrrw call and clear it after the call. 3700 */ 3701 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno, 3702 target_ulong *ret_value, 3703 target_ulong new_value, 3704 target_ulong write_mask) 3705 { 3706 RISCVException ret; 3707 #if !defined(CONFIG_USER_ONLY) 3708 env->debugger = true; 3709 #endif 3710 ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask); 3711 #if !defined(CONFIG_USER_ONLY) 3712 env->debugger = false; 3713 #endif 3714 return ret; 3715 } 3716 3717 /* Control and Status Register function table */ 3718 riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { 3719 /* User Floating-Point CSRs */ 3720 [CSR_FFLAGS] = { "fflags", fs, read_fflags, write_fflags }, 3721 [CSR_FRM] = { "frm", fs, read_frm, write_frm }, 3722 [CSR_FCSR] = { "fcsr", fs, read_fcsr, write_fcsr }, 3723 /* Vector CSRs */ 3724 [CSR_VSTART] = { "vstart", vs, read_vstart, write_vstart, 3725 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3726 [CSR_VXSAT] = { "vxsat", vs, read_vxsat, write_vxsat, 3727 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3728 [CSR_VXRM] = { "vxrm", vs, read_vxrm, write_vxrm, 3729 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3730 [CSR_VCSR] = { "vcsr", vs, read_vcsr, write_vcsr, 3731 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3732 [CSR_VL] = { "vl", vs, read_vl, 3733 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3734 [CSR_VTYPE] = { "vtype", vs, read_vtype, 3735 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3736 [CSR_VLENB] = { "vlenb", vs, read_vlenb, 3737 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3738 /* User Timers and Counters */ 3739 [CSR_CYCLE] = { "cycle", ctr, read_hpmcounter }, 3740 [CSR_INSTRET] = { "instret", ctr, read_hpmcounter }, 3741 [CSR_CYCLEH] = { "cycleh", ctr32, read_hpmcounterh }, 3742 [CSR_INSTRETH] = { "instreth", ctr32, read_hpmcounterh }, 3743 3744 /* 3745 * In privileged mode, the monitor will have to emulate TIME CSRs only if 3746 * rdtime callback is not provided by machine/platform emulation. 3747 */ 3748 [CSR_TIME] = { "time", ctr, read_time }, 3749 [CSR_TIMEH] = { "timeh", ctr32, read_timeh }, 3750 3751 /* Crypto Extension */ 3752 [CSR_SEED] = { "seed", seed, NULL, NULL, rmw_seed }, 3753 3754 #if !defined(CONFIG_USER_ONLY) 3755 /* Machine Timers and Counters */ 3756 [CSR_MCYCLE] = { "mcycle", any, read_hpmcounter, 3757 write_mhpmcounter }, 3758 [CSR_MINSTRET] = { "minstret", any, read_hpmcounter, 3759 write_mhpmcounter }, 3760 [CSR_MCYCLEH] = { "mcycleh", any32, read_hpmcounterh, 3761 write_mhpmcounterh }, 3762 [CSR_MINSTRETH] = { "minstreth", any32, read_hpmcounterh, 3763 write_mhpmcounterh }, 3764 3765 /* Machine Information Registers */ 3766 [CSR_MVENDORID] = { "mvendorid", any, read_mvendorid }, 3767 [CSR_MARCHID] = { "marchid", any, read_marchid }, 3768 [CSR_MIMPID] = { "mimpid", any, read_mimpid }, 3769 [CSR_MHARTID] = { "mhartid", any, read_mhartid }, 3770 3771 [CSR_MCONFIGPTR] = { "mconfigptr", any, read_zero, 3772 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3773 /* Machine Trap Setup */ 3774 [CSR_MSTATUS] = { "mstatus", any, read_mstatus, write_mstatus, 3775 NULL, read_mstatus_i128 }, 3776 [CSR_MISA] = { "misa", any, read_misa, write_misa, 3777 NULL, read_misa_i128 }, 3778 [CSR_MIDELEG] = { "mideleg", any, NULL, NULL, rmw_mideleg }, 3779 [CSR_MEDELEG] = { "medeleg", any, read_medeleg, write_medeleg }, 3780 [CSR_MIE] = { "mie", any, NULL, NULL, rmw_mie }, 3781 [CSR_MTVEC] = { "mtvec", any, read_mtvec, write_mtvec }, 3782 [CSR_MCOUNTEREN] = { "mcounteren", umode, read_mcounteren, 3783 write_mcounteren }, 3784 3785 [CSR_MSTATUSH] = { "mstatush", any32, read_mstatush, 3786 write_mstatush }, 3787 3788 /* Machine Trap Handling */ 3789 [CSR_MSCRATCH] = { "mscratch", any, read_mscratch, write_mscratch, 3790 NULL, read_mscratch_i128, write_mscratch_i128 }, 3791 [CSR_MEPC] = { "mepc", any, read_mepc, write_mepc }, 3792 [CSR_MCAUSE] = { "mcause", any, read_mcause, write_mcause }, 3793 [CSR_MTVAL] = { "mtval", any, read_mtval, write_mtval }, 3794 [CSR_MIP] = { "mip", any, NULL, NULL, rmw_mip }, 3795 3796 /* Machine-Level Window to Indirectly Accessed Registers (AIA) */ 3797 [CSR_MISELECT] = { "miselect", aia_any, NULL, NULL, rmw_xiselect }, 3798 [CSR_MIREG] = { "mireg", aia_any, NULL, NULL, rmw_xireg }, 3799 3800 /* Machine-Level Interrupts (AIA) */ 3801 [CSR_MTOPEI] = { "mtopei", aia_any, NULL, NULL, rmw_xtopei }, 3802 [CSR_MTOPI] = { "mtopi", aia_any, read_mtopi }, 3803 3804 /* Virtual Interrupts for Supervisor Level (AIA) */ 3805 [CSR_MVIEN] = { "mvien", aia_any, read_zero, write_ignore }, 3806 [CSR_MVIP] = { "mvip", aia_any, read_zero, write_ignore }, 3807 3808 /* Machine-Level High-Half CSRs (AIA) */ 3809 [CSR_MIDELEGH] = { "midelegh", aia_any32, NULL, NULL, rmw_midelegh }, 3810 [CSR_MIEH] = { "mieh", aia_any32, NULL, NULL, rmw_mieh }, 3811 [CSR_MVIENH] = { "mvienh", aia_any32, read_zero, write_ignore }, 3812 [CSR_MVIPH] = { "mviph", aia_any32, read_zero, write_ignore }, 3813 [CSR_MIPH] = { "miph", aia_any32, NULL, NULL, rmw_miph }, 3814 3815 /* Execution environment configuration */ 3816 [CSR_MENVCFG] = { "menvcfg", umode, read_menvcfg, write_menvcfg, 3817 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3818 [CSR_MENVCFGH] = { "menvcfgh", umode32, read_menvcfgh, write_menvcfgh, 3819 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3820 [CSR_SENVCFG] = { "senvcfg", smode, read_senvcfg, write_senvcfg, 3821 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3822 [CSR_HENVCFG] = { "henvcfg", hmode, read_henvcfg, write_henvcfg, 3823 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3824 [CSR_HENVCFGH] = { "henvcfgh", hmode32, read_henvcfgh, write_henvcfgh, 3825 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3826 3827 /* Supervisor Trap Setup */ 3828 [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus, 3829 NULL, read_sstatus_i128 }, 3830 [CSR_SIE] = { "sie", smode, NULL, NULL, rmw_sie }, 3831 [CSR_STVEC] = { "stvec", smode, read_stvec, write_stvec }, 3832 [CSR_SCOUNTEREN] = { "scounteren", smode, read_scounteren, 3833 write_scounteren }, 3834 3835 /* Supervisor Trap Handling */ 3836 [CSR_SSCRATCH] = { "sscratch", smode, read_sscratch, write_sscratch, 3837 NULL, read_sscratch_i128, write_sscratch_i128 }, 3838 [CSR_SEPC] = { "sepc", smode, read_sepc, write_sepc }, 3839 [CSR_SCAUSE] = { "scause", smode, read_scause, write_scause }, 3840 [CSR_STVAL] = { "stval", smode, read_stval, write_stval }, 3841 [CSR_SIP] = { "sip", smode, NULL, NULL, rmw_sip }, 3842 [CSR_STIMECMP] = { "stimecmp", sstc, read_stimecmp, write_stimecmp, 3843 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3844 [CSR_STIMECMPH] = { "stimecmph", sstc_32, read_stimecmph, write_stimecmph, 3845 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3846 [CSR_VSTIMECMP] = { "vstimecmp", sstc, read_vstimecmp, 3847 write_vstimecmp, 3848 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3849 [CSR_VSTIMECMPH] = { "vstimecmph", sstc_32, read_vstimecmph, 3850 write_vstimecmph, 3851 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3852 3853 /* Supervisor Protection and Translation */ 3854 [CSR_SATP] = { "satp", smode, read_satp, write_satp }, 3855 3856 /* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */ 3857 [CSR_SISELECT] = { "siselect", aia_smode, NULL, NULL, rmw_xiselect }, 3858 [CSR_SIREG] = { "sireg", aia_smode, NULL, NULL, rmw_xireg }, 3859 3860 /* Supervisor-Level Interrupts (AIA) */ 3861 [CSR_STOPEI] = { "stopei", aia_smode, NULL, NULL, rmw_xtopei }, 3862 [CSR_STOPI] = { "stopi", aia_smode, read_stopi }, 3863 3864 /* Supervisor-Level High-Half CSRs (AIA) */ 3865 [CSR_SIEH] = { "sieh", aia_smode32, NULL, NULL, rmw_sieh }, 3866 [CSR_SIPH] = { "siph", aia_smode32, NULL, NULL, rmw_siph }, 3867 3868 [CSR_HSTATUS] = { "hstatus", hmode, read_hstatus, write_hstatus, 3869 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3870 [CSR_HEDELEG] = { "hedeleg", hmode, read_hedeleg, write_hedeleg, 3871 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3872 [CSR_HIDELEG] = { "hideleg", hmode, NULL, NULL, rmw_hideleg, 3873 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3874 [CSR_HVIP] = { "hvip", hmode, NULL, NULL, rmw_hvip, 3875 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3876 [CSR_HIP] = { "hip", hmode, NULL, NULL, rmw_hip, 3877 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3878 [CSR_HIE] = { "hie", hmode, NULL, NULL, rmw_hie, 3879 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3880 [CSR_HCOUNTEREN] = { "hcounteren", hmode, read_hcounteren, 3881 write_hcounteren, 3882 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3883 [CSR_HGEIE] = { "hgeie", hmode, read_hgeie, write_hgeie, 3884 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3885 [CSR_HTVAL] = { "htval", hmode, read_htval, write_htval, 3886 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3887 [CSR_HTINST] = { "htinst", hmode, read_htinst, write_htinst, 3888 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3889 [CSR_HGEIP] = { "hgeip", hmode, read_hgeip, 3890 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3891 [CSR_HGATP] = { "hgatp", hmode, read_hgatp, write_hgatp, 3892 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3893 [CSR_HTIMEDELTA] = { "htimedelta", hmode, read_htimedelta, 3894 write_htimedelta, 3895 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3896 [CSR_HTIMEDELTAH] = { "htimedeltah", hmode32, read_htimedeltah, 3897 write_htimedeltah, 3898 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3899 3900 [CSR_VSSTATUS] = { "vsstatus", hmode, read_vsstatus, 3901 write_vsstatus, 3902 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3903 [CSR_VSIP] = { "vsip", hmode, NULL, NULL, rmw_vsip, 3904 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3905 [CSR_VSIE] = { "vsie", hmode, NULL, NULL, rmw_vsie , 3906 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3907 [CSR_VSTVEC] = { "vstvec", hmode, read_vstvec, write_vstvec, 3908 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3909 [CSR_VSSCRATCH] = { "vsscratch", hmode, read_vsscratch, 3910 write_vsscratch, 3911 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3912 [CSR_VSEPC] = { "vsepc", hmode, read_vsepc, write_vsepc, 3913 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3914 [CSR_VSCAUSE] = { "vscause", hmode, read_vscause, write_vscause, 3915 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3916 [CSR_VSTVAL] = { "vstval", hmode, read_vstval, write_vstval, 3917 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3918 [CSR_VSATP] = { "vsatp", hmode, read_vsatp, write_vsatp, 3919 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3920 3921 [CSR_MTVAL2] = { "mtval2", hmode, read_mtval2, write_mtval2, 3922 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3923 [CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst, 3924 .min_priv_ver = PRIV_VERSION_1_12_0 }, 3925 3926 /* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */ 3927 [CSR_HVIEN] = { "hvien", aia_hmode, read_zero, write_ignore }, 3928 [CSR_HVICTL] = { "hvictl", aia_hmode, read_hvictl, 3929 write_hvictl }, 3930 [CSR_HVIPRIO1] = { "hviprio1", aia_hmode, read_hviprio1, 3931 write_hviprio1 }, 3932 [CSR_HVIPRIO2] = { "hviprio2", aia_hmode, read_hviprio2, 3933 write_hviprio2 }, 3934 3935 /* 3936 * VS-Level Window to Indirectly Accessed Registers (H-extension with AIA) 3937 */ 3938 [CSR_VSISELECT] = { "vsiselect", aia_hmode, NULL, NULL, 3939 rmw_xiselect }, 3940 [CSR_VSIREG] = { "vsireg", aia_hmode, NULL, NULL, rmw_xireg }, 3941 3942 /* VS-Level Interrupts (H-extension with AIA) */ 3943 [CSR_VSTOPEI] = { "vstopei", aia_hmode, NULL, NULL, rmw_xtopei }, 3944 [CSR_VSTOPI] = { "vstopi", aia_hmode, read_vstopi }, 3945 3946 /* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */ 3947 [CSR_HIDELEGH] = { "hidelegh", aia_hmode32, NULL, NULL, 3948 rmw_hidelegh }, 3949 [CSR_HVIENH] = { "hvienh", aia_hmode32, read_zero, 3950 write_ignore }, 3951 [CSR_HVIPH] = { "hviph", aia_hmode32, NULL, NULL, rmw_hviph }, 3952 [CSR_HVIPRIO1H] = { "hviprio1h", aia_hmode32, read_hviprio1h, 3953 write_hviprio1h }, 3954 [CSR_HVIPRIO2H] = { "hviprio2h", aia_hmode32, read_hviprio2h, 3955 write_hviprio2h }, 3956 [CSR_VSIEH] = { "vsieh", aia_hmode32, NULL, NULL, rmw_vsieh }, 3957 [CSR_VSIPH] = { "vsiph", aia_hmode32, NULL, NULL, rmw_vsiph }, 3958 3959 /* Physical Memory Protection */ 3960 [CSR_MSECCFG] = { "mseccfg", epmp, read_mseccfg, write_mseccfg, 3961 .min_priv_ver = PRIV_VERSION_1_11_0 }, 3962 [CSR_PMPCFG0] = { "pmpcfg0", pmp, read_pmpcfg, write_pmpcfg }, 3963 [CSR_PMPCFG1] = { "pmpcfg1", pmp, read_pmpcfg, write_pmpcfg }, 3964 [CSR_PMPCFG2] = { "pmpcfg2", pmp, read_pmpcfg, write_pmpcfg }, 3965 [CSR_PMPCFG3] = { "pmpcfg3", pmp, read_pmpcfg, write_pmpcfg }, 3966 [CSR_PMPADDR0] = { "pmpaddr0", pmp, read_pmpaddr, write_pmpaddr }, 3967 [CSR_PMPADDR1] = { "pmpaddr1", pmp, read_pmpaddr, write_pmpaddr }, 3968 [CSR_PMPADDR2] = { "pmpaddr2", pmp, read_pmpaddr, write_pmpaddr }, 3969 [CSR_PMPADDR3] = { "pmpaddr3", pmp, read_pmpaddr, write_pmpaddr }, 3970 [CSR_PMPADDR4] = { "pmpaddr4", pmp, read_pmpaddr, write_pmpaddr }, 3971 [CSR_PMPADDR5] = { "pmpaddr5", pmp, read_pmpaddr, write_pmpaddr }, 3972 [CSR_PMPADDR6] = { "pmpaddr6", pmp, read_pmpaddr, write_pmpaddr }, 3973 [CSR_PMPADDR7] = { "pmpaddr7", pmp, read_pmpaddr, write_pmpaddr }, 3974 [CSR_PMPADDR8] = { "pmpaddr8", pmp, read_pmpaddr, write_pmpaddr }, 3975 [CSR_PMPADDR9] = { "pmpaddr9", pmp, read_pmpaddr, write_pmpaddr }, 3976 [CSR_PMPADDR10] = { "pmpaddr10", pmp, read_pmpaddr, write_pmpaddr }, 3977 [CSR_PMPADDR11] = { "pmpaddr11", pmp, read_pmpaddr, write_pmpaddr }, 3978 [CSR_PMPADDR12] = { "pmpaddr12", pmp, read_pmpaddr, write_pmpaddr }, 3979 [CSR_PMPADDR13] = { "pmpaddr13", pmp, read_pmpaddr, write_pmpaddr }, 3980 [CSR_PMPADDR14] = { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr }, 3981 [CSR_PMPADDR15] = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr }, 3982 3983 /* Debug CSRs */ 3984 [CSR_TSELECT] = { "tselect", debug, read_tselect, write_tselect }, 3985 [CSR_TDATA1] = { "tdata1", debug, read_tdata, write_tdata }, 3986 [CSR_TDATA2] = { "tdata2", debug, read_tdata, write_tdata }, 3987 [CSR_TDATA3] = { "tdata3", debug, read_tdata, write_tdata }, 3988 3989 /* User Pointer Masking */ 3990 [CSR_UMTE] = { "umte", pointer_masking, read_umte, write_umte }, 3991 [CSR_UPMMASK] = { "upmmask", pointer_masking, read_upmmask, 3992 write_upmmask }, 3993 [CSR_UPMBASE] = { "upmbase", pointer_masking, read_upmbase, 3994 write_upmbase }, 3995 /* Machine Pointer Masking */ 3996 [CSR_MMTE] = { "mmte", pointer_masking, read_mmte, write_mmte }, 3997 [CSR_MPMMASK] = { "mpmmask", pointer_masking, read_mpmmask, 3998 write_mpmmask }, 3999 [CSR_MPMBASE] = { "mpmbase", pointer_masking, read_mpmbase, 4000 write_mpmbase }, 4001 /* Supervisor Pointer Masking */ 4002 [CSR_SMTE] = { "smte", pointer_masking, read_smte, write_smte }, 4003 [CSR_SPMMASK] = { "spmmask", pointer_masking, read_spmmask, 4004 write_spmmask }, 4005 [CSR_SPMBASE] = { "spmbase", pointer_masking, read_spmbase, 4006 write_spmbase }, 4007 4008 /* Performance Counters */ 4009 [CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_hpmcounter }, 4010 [CSR_HPMCOUNTER4] = { "hpmcounter4", ctr, read_hpmcounter }, 4011 [CSR_HPMCOUNTER5] = { "hpmcounter5", ctr, read_hpmcounter }, 4012 [CSR_HPMCOUNTER6] = { "hpmcounter6", ctr, read_hpmcounter }, 4013 [CSR_HPMCOUNTER7] = { "hpmcounter7", ctr, read_hpmcounter }, 4014 [CSR_HPMCOUNTER8] = { "hpmcounter8", ctr, read_hpmcounter }, 4015 [CSR_HPMCOUNTER9] = { "hpmcounter9", ctr, read_hpmcounter }, 4016 [CSR_HPMCOUNTER10] = { "hpmcounter10", ctr, read_hpmcounter }, 4017 [CSR_HPMCOUNTER11] = { "hpmcounter11", ctr, read_hpmcounter }, 4018 [CSR_HPMCOUNTER12] = { "hpmcounter12", ctr, read_hpmcounter }, 4019 [CSR_HPMCOUNTER13] = { "hpmcounter13", ctr, read_hpmcounter }, 4020 [CSR_HPMCOUNTER14] = { "hpmcounter14", ctr, read_hpmcounter }, 4021 [CSR_HPMCOUNTER15] = { "hpmcounter15", ctr, read_hpmcounter }, 4022 [CSR_HPMCOUNTER16] = { "hpmcounter16", ctr, read_hpmcounter }, 4023 [CSR_HPMCOUNTER17] = { "hpmcounter17", ctr, read_hpmcounter }, 4024 [CSR_HPMCOUNTER18] = { "hpmcounter18", ctr, read_hpmcounter }, 4025 [CSR_HPMCOUNTER19] = { "hpmcounter19", ctr, read_hpmcounter }, 4026 [CSR_HPMCOUNTER20] = { "hpmcounter20", ctr, read_hpmcounter }, 4027 [CSR_HPMCOUNTER21] = { "hpmcounter21", ctr, read_hpmcounter }, 4028 [CSR_HPMCOUNTER22] = { "hpmcounter22", ctr, read_hpmcounter }, 4029 [CSR_HPMCOUNTER23] = { "hpmcounter23", ctr, read_hpmcounter }, 4030 [CSR_HPMCOUNTER24] = { "hpmcounter24", ctr, read_hpmcounter }, 4031 [CSR_HPMCOUNTER25] = { "hpmcounter25", ctr, read_hpmcounter }, 4032 [CSR_HPMCOUNTER26] = { "hpmcounter26", ctr, read_hpmcounter }, 4033 [CSR_HPMCOUNTER27] = { "hpmcounter27", ctr, read_hpmcounter }, 4034 [CSR_HPMCOUNTER28] = { "hpmcounter28", ctr, read_hpmcounter }, 4035 [CSR_HPMCOUNTER29] = { "hpmcounter29", ctr, read_hpmcounter }, 4036 [CSR_HPMCOUNTER30] = { "hpmcounter30", ctr, read_hpmcounter }, 4037 [CSR_HPMCOUNTER31] = { "hpmcounter31", ctr, read_hpmcounter }, 4038 4039 [CSR_MHPMCOUNTER3] = { "mhpmcounter3", mctr, read_hpmcounter, 4040 write_mhpmcounter }, 4041 [CSR_MHPMCOUNTER4] = { "mhpmcounter4", mctr, read_hpmcounter, 4042 write_mhpmcounter }, 4043 [CSR_MHPMCOUNTER5] = { "mhpmcounter5", mctr, read_hpmcounter, 4044 write_mhpmcounter }, 4045 [CSR_MHPMCOUNTER6] = { "mhpmcounter6", mctr, read_hpmcounter, 4046 write_mhpmcounter }, 4047 [CSR_MHPMCOUNTER7] = { "mhpmcounter7", mctr, read_hpmcounter, 4048 write_mhpmcounter }, 4049 [CSR_MHPMCOUNTER8] = { "mhpmcounter8", mctr, read_hpmcounter, 4050 write_mhpmcounter }, 4051 [CSR_MHPMCOUNTER9] = { "mhpmcounter9", mctr, read_hpmcounter, 4052 write_mhpmcounter }, 4053 [CSR_MHPMCOUNTER10] = { "mhpmcounter10", mctr, read_hpmcounter, 4054 write_mhpmcounter }, 4055 [CSR_MHPMCOUNTER11] = { "mhpmcounter11", mctr, read_hpmcounter, 4056 write_mhpmcounter }, 4057 [CSR_MHPMCOUNTER12] = { "mhpmcounter12", mctr, read_hpmcounter, 4058 write_mhpmcounter }, 4059 [CSR_MHPMCOUNTER13] = { "mhpmcounter13", mctr, read_hpmcounter, 4060 write_mhpmcounter }, 4061 [CSR_MHPMCOUNTER14] = { "mhpmcounter14", mctr, read_hpmcounter, 4062 write_mhpmcounter }, 4063 [CSR_MHPMCOUNTER15] = { "mhpmcounter15", mctr, read_hpmcounter, 4064 write_mhpmcounter }, 4065 [CSR_MHPMCOUNTER16] = { "mhpmcounter16", mctr, read_hpmcounter, 4066 write_mhpmcounter }, 4067 [CSR_MHPMCOUNTER17] = { "mhpmcounter17", mctr, read_hpmcounter, 4068 write_mhpmcounter }, 4069 [CSR_MHPMCOUNTER18] = { "mhpmcounter18", mctr, read_hpmcounter, 4070 write_mhpmcounter }, 4071 [CSR_MHPMCOUNTER19] = { "mhpmcounter19", mctr, read_hpmcounter, 4072 write_mhpmcounter }, 4073 [CSR_MHPMCOUNTER20] = { "mhpmcounter20", mctr, read_hpmcounter, 4074 write_mhpmcounter }, 4075 [CSR_MHPMCOUNTER21] = { "mhpmcounter21", mctr, read_hpmcounter, 4076 write_mhpmcounter }, 4077 [CSR_MHPMCOUNTER22] = { "mhpmcounter22", mctr, read_hpmcounter, 4078 write_mhpmcounter }, 4079 [CSR_MHPMCOUNTER23] = { "mhpmcounter23", mctr, read_hpmcounter, 4080 write_mhpmcounter }, 4081 [CSR_MHPMCOUNTER24] = { "mhpmcounter24", mctr, read_hpmcounter, 4082 write_mhpmcounter }, 4083 [CSR_MHPMCOUNTER25] = { "mhpmcounter25", mctr, read_hpmcounter, 4084 write_mhpmcounter }, 4085 [CSR_MHPMCOUNTER26] = { "mhpmcounter26", mctr, read_hpmcounter, 4086 write_mhpmcounter }, 4087 [CSR_MHPMCOUNTER27] = { "mhpmcounter27", mctr, read_hpmcounter, 4088 write_mhpmcounter }, 4089 [CSR_MHPMCOUNTER28] = { "mhpmcounter28", mctr, read_hpmcounter, 4090 write_mhpmcounter }, 4091 [CSR_MHPMCOUNTER29] = { "mhpmcounter29", mctr, read_hpmcounter, 4092 write_mhpmcounter }, 4093 [CSR_MHPMCOUNTER30] = { "mhpmcounter30", mctr, read_hpmcounter, 4094 write_mhpmcounter }, 4095 [CSR_MHPMCOUNTER31] = { "mhpmcounter31", mctr, read_hpmcounter, 4096 write_mhpmcounter }, 4097 4098 [CSR_MCOUNTINHIBIT] = { "mcountinhibit", any, read_mcountinhibit, 4099 write_mcountinhibit, 4100 .min_priv_ver = PRIV_VERSION_1_11_0 }, 4101 4102 [CSR_MHPMEVENT3] = { "mhpmevent3", any, read_mhpmevent, 4103 write_mhpmevent }, 4104 [CSR_MHPMEVENT4] = { "mhpmevent4", any, read_mhpmevent, 4105 write_mhpmevent }, 4106 [CSR_MHPMEVENT5] = { "mhpmevent5", any, read_mhpmevent, 4107 write_mhpmevent }, 4108 [CSR_MHPMEVENT6] = { "mhpmevent6", any, read_mhpmevent, 4109 write_mhpmevent }, 4110 [CSR_MHPMEVENT7] = { "mhpmevent7", any, read_mhpmevent, 4111 write_mhpmevent }, 4112 [CSR_MHPMEVENT8] = { "mhpmevent8", any, read_mhpmevent, 4113 write_mhpmevent }, 4114 [CSR_MHPMEVENT9] = { "mhpmevent9", any, read_mhpmevent, 4115 write_mhpmevent }, 4116 [CSR_MHPMEVENT10] = { "mhpmevent10", any, read_mhpmevent, 4117 write_mhpmevent }, 4118 [CSR_MHPMEVENT11] = { "mhpmevent11", any, read_mhpmevent, 4119 write_mhpmevent }, 4120 [CSR_MHPMEVENT12] = { "mhpmevent12", any, read_mhpmevent, 4121 write_mhpmevent }, 4122 [CSR_MHPMEVENT13] = { "mhpmevent13", any, read_mhpmevent, 4123 write_mhpmevent }, 4124 [CSR_MHPMEVENT14] = { "mhpmevent14", any, read_mhpmevent, 4125 write_mhpmevent }, 4126 [CSR_MHPMEVENT15] = { "mhpmevent15", any, read_mhpmevent, 4127 write_mhpmevent }, 4128 [CSR_MHPMEVENT16] = { "mhpmevent16", any, read_mhpmevent, 4129 write_mhpmevent }, 4130 [CSR_MHPMEVENT17] = { "mhpmevent17", any, read_mhpmevent, 4131 write_mhpmevent }, 4132 [CSR_MHPMEVENT18] = { "mhpmevent18", any, read_mhpmevent, 4133 write_mhpmevent }, 4134 [CSR_MHPMEVENT19] = { "mhpmevent19", any, read_mhpmevent, 4135 write_mhpmevent }, 4136 [CSR_MHPMEVENT20] = { "mhpmevent20", any, read_mhpmevent, 4137 write_mhpmevent }, 4138 [CSR_MHPMEVENT21] = { "mhpmevent21", any, read_mhpmevent, 4139 write_mhpmevent }, 4140 [CSR_MHPMEVENT22] = { "mhpmevent22", any, read_mhpmevent, 4141 write_mhpmevent }, 4142 [CSR_MHPMEVENT23] = { "mhpmevent23", any, read_mhpmevent, 4143 write_mhpmevent }, 4144 [CSR_MHPMEVENT24] = { "mhpmevent24", any, read_mhpmevent, 4145 write_mhpmevent }, 4146 [CSR_MHPMEVENT25] = { "mhpmevent25", any, read_mhpmevent, 4147 write_mhpmevent }, 4148 [CSR_MHPMEVENT26] = { "mhpmevent26", any, read_mhpmevent, 4149 write_mhpmevent }, 4150 [CSR_MHPMEVENT27] = { "mhpmevent27", any, read_mhpmevent, 4151 write_mhpmevent }, 4152 [CSR_MHPMEVENT28] = { "mhpmevent28", any, read_mhpmevent, 4153 write_mhpmevent }, 4154 [CSR_MHPMEVENT29] = { "mhpmevent29", any, read_mhpmevent, 4155 write_mhpmevent }, 4156 [CSR_MHPMEVENT30] = { "mhpmevent30", any, read_mhpmevent, 4157 write_mhpmevent }, 4158 [CSR_MHPMEVENT31] = { "mhpmevent31", any, read_mhpmevent, 4159 write_mhpmevent }, 4160 4161 [CSR_MHPMEVENT3H] = { "mhpmevent3h", sscofpmf, read_mhpmeventh, 4162 write_mhpmeventh }, 4163 [CSR_MHPMEVENT4H] = { "mhpmevent4h", sscofpmf, read_mhpmeventh, 4164 write_mhpmeventh }, 4165 [CSR_MHPMEVENT5H] = { "mhpmevent5h", sscofpmf, read_mhpmeventh, 4166 write_mhpmeventh }, 4167 [CSR_MHPMEVENT6H] = { "mhpmevent6h", sscofpmf, read_mhpmeventh, 4168 write_mhpmeventh }, 4169 [CSR_MHPMEVENT7H] = { "mhpmevent7h", sscofpmf, read_mhpmeventh, 4170 write_mhpmeventh }, 4171 [CSR_MHPMEVENT8H] = { "mhpmevent8h", sscofpmf, read_mhpmeventh, 4172 write_mhpmeventh }, 4173 [CSR_MHPMEVENT9H] = { "mhpmevent9h", sscofpmf, read_mhpmeventh, 4174 write_mhpmeventh }, 4175 [CSR_MHPMEVENT10H] = { "mhpmevent10h", sscofpmf, read_mhpmeventh, 4176 write_mhpmeventh }, 4177 [CSR_MHPMEVENT11H] = { "mhpmevent11h", sscofpmf, read_mhpmeventh, 4178 write_mhpmeventh }, 4179 [CSR_MHPMEVENT12H] = { "mhpmevent12h", sscofpmf, read_mhpmeventh, 4180 write_mhpmeventh }, 4181 [CSR_MHPMEVENT13H] = { "mhpmevent13h", sscofpmf, read_mhpmeventh, 4182 write_mhpmeventh }, 4183 [CSR_MHPMEVENT14H] = { "mhpmevent14h", sscofpmf, read_mhpmeventh, 4184 write_mhpmeventh }, 4185 [CSR_MHPMEVENT15H] = { "mhpmevent15h", sscofpmf, read_mhpmeventh, 4186 write_mhpmeventh }, 4187 [CSR_MHPMEVENT16H] = { "mhpmevent16h", sscofpmf, read_mhpmeventh, 4188 write_mhpmeventh }, 4189 [CSR_MHPMEVENT17H] = { "mhpmevent17h", sscofpmf, read_mhpmeventh, 4190 write_mhpmeventh }, 4191 [CSR_MHPMEVENT18H] = { "mhpmevent18h", sscofpmf, read_mhpmeventh, 4192 write_mhpmeventh }, 4193 [CSR_MHPMEVENT19H] = { "mhpmevent19h", sscofpmf, read_mhpmeventh, 4194 write_mhpmeventh }, 4195 [CSR_MHPMEVENT20H] = { "mhpmevent20h", sscofpmf, read_mhpmeventh, 4196 write_mhpmeventh }, 4197 [CSR_MHPMEVENT21H] = { "mhpmevent21h", sscofpmf, read_mhpmeventh, 4198 write_mhpmeventh }, 4199 [CSR_MHPMEVENT22H] = { "mhpmevent22h", sscofpmf, read_mhpmeventh, 4200 write_mhpmeventh }, 4201 [CSR_MHPMEVENT23H] = { "mhpmevent23h", sscofpmf, read_mhpmeventh, 4202 write_mhpmeventh }, 4203 [CSR_MHPMEVENT24H] = { "mhpmevent24h", sscofpmf, read_mhpmeventh, 4204 write_mhpmeventh }, 4205 [CSR_MHPMEVENT25H] = { "mhpmevent25h", sscofpmf, read_mhpmeventh, 4206 write_mhpmeventh }, 4207 [CSR_MHPMEVENT26H] = { "mhpmevent26h", sscofpmf, read_mhpmeventh, 4208 write_mhpmeventh }, 4209 [CSR_MHPMEVENT27H] = { "mhpmevent27h", sscofpmf, read_mhpmeventh, 4210 write_mhpmeventh }, 4211 [CSR_MHPMEVENT28H] = { "mhpmevent28h", sscofpmf, read_mhpmeventh, 4212 write_mhpmeventh }, 4213 [CSR_MHPMEVENT29H] = { "mhpmevent29h", sscofpmf, read_mhpmeventh, 4214 write_mhpmeventh }, 4215 [CSR_MHPMEVENT30H] = { "mhpmevent30h", sscofpmf, read_mhpmeventh, 4216 write_mhpmeventh }, 4217 [CSR_MHPMEVENT31H] = { "mhpmevent31h", sscofpmf, read_mhpmeventh, 4218 write_mhpmeventh }, 4219 4220 [CSR_HPMCOUNTER3H] = { "hpmcounter3h", ctr32, read_hpmcounterh }, 4221 [CSR_HPMCOUNTER4H] = { "hpmcounter4h", ctr32, read_hpmcounterh }, 4222 [CSR_HPMCOUNTER5H] = { "hpmcounter5h", ctr32, read_hpmcounterh }, 4223 [CSR_HPMCOUNTER6H] = { "hpmcounter6h", ctr32, read_hpmcounterh }, 4224 [CSR_HPMCOUNTER7H] = { "hpmcounter7h", ctr32, read_hpmcounterh }, 4225 [CSR_HPMCOUNTER8H] = { "hpmcounter8h", ctr32, read_hpmcounterh }, 4226 [CSR_HPMCOUNTER9H] = { "hpmcounter9h", ctr32, read_hpmcounterh }, 4227 [CSR_HPMCOUNTER10H] = { "hpmcounter10h", ctr32, read_hpmcounterh }, 4228 [CSR_HPMCOUNTER11H] = { "hpmcounter11h", ctr32, read_hpmcounterh }, 4229 [CSR_HPMCOUNTER12H] = { "hpmcounter12h", ctr32, read_hpmcounterh }, 4230 [CSR_HPMCOUNTER13H] = { "hpmcounter13h", ctr32, read_hpmcounterh }, 4231 [CSR_HPMCOUNTER14H] = { "hpmcounter14h", ctr32, read_hpmcounterh }, 4232 [CSR_HPMCOUNTER15H] = { "hpmcounter15h", ctr32, read_hpmcounterh }, 4233 [CSR_HPMCOUNTER16H] = { "hpmcounter16h", ctr32, read_hpmcounterh }, 4234 [CSR_HPMCOUNTER17H] = { "hpmcounter17h", ctr32, read_hpmcounterh }, 4235 [CSR_HPMCOUNTER18H] = { "hpmcounter18h", ctr32, read_hpmcounterh }, 4236 [CSR_HPMCOUNTER19H] = { "hpmcounter19h", ctr32, read_hpmcounterh }, 4237 [CSR_HPMCOUNTER20H] = { "hpmcounter20h", ctr32, read_hpmcounterh }, 4238 [CSR_HPMCOUNTER21H] = { "hpmcounter21h", ctr32, read_hpmcounterh }, 4239 [CSR_HPMCOUNTER22H] = { "hpmcounter22h", ctr32, read_hpmcounterh }, 4240 [CSR_HPMCOUNTER23H] = { "hpmcounter23h", ctr32, read_hpmcounterh }, 4241 [CSR_HPMCOUNTER24H] = { "hpmcounter24h", ctr32, read_hpmcounterh }, 4242 [CSR_HPMCOUNTER25H] = { "hpmcounter25h", ctr32, read_hpmcounterh }, 4243 [CSR_HPMCOUNTER26H] = { "hpmcounter26h", ctr32, read_hpmcounterh }, 4244 [CSR_HPMCOUNTER27H] = { "hpmcounter27h", ctr32, read_hpmcounterh }, 4245 [CSR_HPMCOUNTER28H] = { "hpmcounter28h", ctr32, read_hpmcounterh }, 4246 [CSR_HPMCOUNTER29H] = { "hpmcounter29h", ctr32, read_hpmcounterh }, 4247 [CSR_HPMCOUNTER30H] = { "hpmcounter30h", ctr32, read_hpmcounterh }, 4248 [CSR_HPMCOUNTER31H] = { "hpmcounter31h", ctr32, read_hpmcounterh }, 4249 4250 [CSR_MHPMCOUNTER3H] = { "mhpmcounter3h", mctr32, read_hpmcounterh, 4251 write_mhpmcounterh }, 4252 [CSR_MHPMCOUNTER4H] = { "mhpmcounter4h", mctr32, read_hpmcounterh, 4253 write_mhpmcounterh }, 4254 [CSR_MHPMCOUNTER5H] = { "mhpmcounter5h", mctr32, read_hpmcounterh, 4255 write_mhpmcounterh }, 4256 [CSR_MHPMCOUNTER6H] = { "mhpmcounter6h", mctr32, read_hpmcounterh, 4257 write_mhpmcounterh }, 4258 [CSR_MHPMCOUNTER7H] = { "mhpmcounter7h", mctr32, read_hpmcounterh, 4259 write_mhpmcounterh }, 4260 [CSR_MHPMCOUNTER8H] = { "mhpmcounter8h", mctr32, read_hpmcounterh, 4261 write_mhpmcounterh }, 4262 [CSR_MHPMCOUNTER9H] = { "mhpmcounter9h", mctr32, read_hpmcounterh, 4263 write_mhpmcounterh }, 4264 [CSR_MHPMCOUNTER10H] = { "mhpmcounter10h", mctr32, read_hpmcounterh, 4265 write_mhpmcounterh }, 4266 [CSR_MHPMCOUNTER11H] = { "mhpmcounter11h", mctr32, read_hpmcounterh, 4267 write_mhpmcounterh }, 4268 [CSR_MHPMCOUNTER12H] = { "mhpmcounter12h", mctr32, read_hpmcounterh, 4269 write_mhpmcounterh }, 4270 [CSR_MHPMCOUNTER13H] = { "mhpmcounter13h", mctr32, read_hpmcounterh, 4271 write_mhpmcounterh }, 4272 [CSR_MHPMCOUNTER14H] = { "mhpmcounter14h", mctr32, read_hpmcounterh, 4273 write_mhpmcounterh }, 4274 [CSR_MHPMCOUNTER15H] = { "mhpmcounter15h", mctr32, read_hpmcounterh, 4275 write_mhpmcounterh }, 4276 [CSR_MHPMCOUNTER16H] = { "mhpmcounter16h", mctr32, read_hpmcounterh, 4277 write_mhpmcounterh }, 4278 [CSR_MHPMCOUNTER17H] = { "mhpmcounter17h", mctr32, read_hpmcounterh, 4279 write_mhpmcounterh }, 4280 [CSR_MHPMCOUNTER18H] = { "mhpmcounter18h", mctr32, read_hpmcounterh, 4281 write_mhpmcounterh }, 4282 [CSR_MHPMCOUNTER19H] = { "mhpmcounter19h", mctr32, read_hpmcounterh, 4283 write_mhpmcounterh }, 4284 [CSR_MHPMCOUNTER20H] = { "mhpmcounter20h", mctr32, read_hpmcounterh, 4285 write_mhpmcounterh }, 4286 [CSR_MHPMCOUNTER21H] = { "mhpmcounter21h", mctr32, read_hpmcounterh, 4287 write_mhpmcounterh }, 4288 [CSR_MHPMCOUNTER22H] = { "mhpmcounter22h", mctr32, read_hpmcounterh, 4289 write_mhpmcounterh }, 4290 [CSR_MHPMCOUNTER23H] = { "mhpmcounter23h", mctr32, read_hpmcounterh, 4291 write_mhpmcounterh }, 4292 [CSR_MHPMCOUNTER24H] = { "mhpmcounter24h", mctr32, read_hpmcounterh, 4293 write_mhpmcounterh }, 4294 [CSR_MHPMCOUNTER25H] = { "mhpmcounter25h", mctr32, read_hpmcounterh, 4295 write_mhpmcounterh }, 4296 [CSR_MHPMCOUNTER26H] = { "mhpmcounter26h", mctr32, read_hpmcounterh, 4297 write_mhpmcounterh }, 4298 [CSR_MHPMCOUNTER27H] = { "mhpmcounter27h", mctr32, read_hpmcounterh, 4299 write_mhpmcounterh }, 4300 [CSR_MHPMCOUNTER28H] = { "mhpmcounter28h", mctr32, read_hpmcounterh, 4301 write_mhpmcounterh }, 4302 [CSR_MHPMCOUNTER29H] = { "mhpmcounter29h", mctr32, read_hpmcounterh, 4303 write_mhpmcounterh }, 4304 [CSR_MHPMCOUNTER30H] = { "mhpmcounter30h", mctr32, read_hpmcounterh, 4305 write_mhpmcounterh }, 4306 [CSR_MHPMCOUNTER31H] = { "mhpmcounter31h", mctr32, read_hpmcounterh, 4307 write_mhpmcounterh }, 4308 [CSR_SCOUNTOVF] = { "scountovf", sscofpmf, read_scountovf }, 4309 4310 #endif /* !CONFIG_USER_ONLY */ 4311 }; 4312