1 /* 2 * RISC-V Control and Status Registers. 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/log.h" 22 #include "cpu.h" 23 #include "qemu/main-loop.h" 24 #include "exec/exec-all.h" 25 26 /* CSR function table public API */ 27 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops) 28 { 29 *ops = csr_ops[csrno & (CSR_TABLE_SIZE - 1)]; 30 } 31 32 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops) 33 { 34 csr_ops[csrno & (CSR_TABLE_SIZE - 1)] = *ops; 35 } 36 37 /* Predicates */ 38 static RISCVException fs(CPURISCVState *env, int csrno) 39 { 40 #if !defined(CONFIG_USER_ONLY) 41 if (!env->debugger && !riscv_cpu_fp_enabled(env)) { 42 return RISCV_EXCP_ILLEGAL_INST; 43 } 44 #endif 45 return RISCV_EXCP_NONE; 46 } 47 48 static RISCVException vs(CPURISCVState *env, int csrno) 49 { 50 CPUState *cs = env_cpu(env); 51 RISCVCPU *cpu = RISCV_CPU(cs); 52 53 if (env->misa_ext & RVV || 54 cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) { 55 #if !defined(CONFIG_USER_ONLY) 56 if (!env->debugger && !riscv_cpu_vector_enabled(env)) { 57 return RISCV_EXCP_ILLEGAL_INST; 58 } 59 #endif 60 return RISCV_EXCP_NONE; 61 } 62 return RISCV_EXCP_ILLEGAL_INST; 63 } 64 65 static RISCVException ctr(CPURISCVState *env, int csrno) 66 { 67 #if !defined(CONFIG_USER_ONLY) 68 CPUState *cs = env_cpu(env); 69 RISCVCPU *cpu = RISCV_CPU(cs); 70 71 if (!cpu->cfg.ext_counters) { 72 /* The Counters extensions is not enabled */ 73 return RISCV_EXCP_ILLEGAL_INST; 74 } 75 76 if (riscv_cpu_virt_enabled(env)) { 77 switch (csrno) { 78 case CSR_CYCLE: 79 if (!get_field(env->hcounteren, COUNTEREN_CY) && 80 get_field(env->mcounteren, COUNTEREN_CY)) { 81 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 82 } 83 break; 84 case CSR_TIME: 85 if (!get_field(env->hcounteren, COUNTEREN_TM) && 86 get_field(env->mcounteren, COUNTEREN_TM)) { 87 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 88 } 89 break; 90 case CSR_INSTRET: 91 if (!get_field(env->hcounteren, COUNTEREN_IR) && 92 get_field(env->mcounteren, COUNTEREN_IR)) { 93 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 94 } 95 break; 96 case CSR_HPMCOUNTER3...CSR_HPMCOUNTER31: 97 if (!get_field(env->hcounteren, 1 << (csrno - CSR_HPMCOUNTER3)) && 98 get_field(env->mcounteren, 1 << (csrno - CSR_HPMCOUNTER3))) { 99 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 100 } 101 break; 102 } 103 if (riscv_cpu_mxl(env) == MXL_RV32) { 104 switch (csrno) { 105 case CSR_CYCLEH: 106 if (!get_field(env->hcounteren, COUNTEREN_CY) && 107 get_field(env->mcounteren, COUNTEREN_CY)) { 108 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 109 } 110 break; 111 case CSR_TIMEH: 112 if (!get_field(env->hcounteren, COUNTEREN_TM) && 113 get_field(env->mcounteren, COUNTEREN_TM)) { 114 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 115 } 116 break; 117 case CSR_INSTRETH: 118 if (!get_field(env->hcounteren, COUNTEREN_IR) && 119 get_field(env->mcounteren, COUNTEREN_IR)) { 120 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 121 } 122 break; 123 case CSR_HPMCOUNTER3H...CSR_HPMCOUNTER31H: 124 if (!get_field(env->hcounteren, 1 << (csrno - CSR_HPMCOUNTER3H)) && 125 get_field(env->mcounteren, 1 << (csrno - CSR_HPMCOUNTER3H))) { 126 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 127 } 128 break; 129 } 130 } 131 } 132 #endif 133 return RISCV_EXCP_NONE; 134 } 135 136 static RISCVException ctr32(CPURISCVState *env, int csrno) 137 { 138 if (riscv_cpu_mxl(env) != MXL_RV32) { 139 return RISCV_EXCP_ILLEGAL_INST; 140 } 141 142 return ctr(env, csrno); 143 } 144 145 #if !defined(CONFIG_USER_ONLY) 146 static RISCVException any(CPURISCVState *env, int csrno) 147 { 148 return RISCV_EXCP_NONE; 149 } 150 151 static RISCVException any32(CPURISCVState *env, int csrno) 152 { 153 if (riscv_cpu_mxl(env) != MXL_RV32) { 154 return RISCV_EXCP_ILLEGAL_INST; 155 } 156 157 return any(env, csrno); 158 159 } 160 161 static RISCVException smode(CPURISCVState *env, int csrno) 162 { 163 if (riscv_has_ext(env, RVS)) { 164 return RISCV_EXCP_NONE; 165 } 166 167 return RISCV_EXCP_ILLEGAL_INST; 168 } 169 170 static RISCVException hmode(CPURISCVState *env, int csrno) 171 { 172 if (riscv_has_ext(env, RVS) && 173 riscv_has_ext(env, RVH)) { 174 /* Hypervisor extension is supported */ 175 if ((env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) || 176 env->priv == PRV_M) { 177 return RISCV_EXCP_NONE; 178 } else { 179 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 180 } 181 } 182 183 return RISCV_EXCP_ILLEGAL_INST; 184 } 185 186 static RISCVException hmode32(CPURISCVState *env, int csrno) 187 { 188 if (riscv_cpu_mxl(env) != MXL_RV32) { 189 if (!riscv_cpu_virt_enabled(env)) { 190 return RISCV_EXCP_ILLEGAL_INST; 191 } else { 192 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; 193 } 194 } 195 196 return hmode(env, csrno); 197 198 } 199 200 /* Checks if PointerMasking registers could be accessed */ 201 static RISCVException pointer_masking(CPURISCVState *env, int csrno) 202 { 203 /* Check if j-ext is present */ 204 if (riscv_has_ext(env, RVJ)) { 205 return RISCV_EXCP_NONE; 206 } 207 return RISCV_EXCP_ILLEGAL_INST; 208 } 209 210 static RISCVException pmp(CPURISCVState *env, int csrno) 211 { 212 if (riscv_feature(env, RISCV_FEATURE_PMP)) { 213 return RISCV_EXCP_NONE; 214 } 215 216 return RISCV_EXCP_ILLEGAL_INST; 217 } 218 219 static RISCVException epmp(CPURISCVState *env, int csrno) 220 { 221 if (env->priv == PRV_M && riscv_feature(env, RISCV_FEATURE_EPMP)) { 222 return RISCV_EXCP_NONE; 223 } 224 225 return RISCV_EXCP_ILLEGAL_INST; 226 } 227 #endif 228 229 /* User Floating-Point CSRs */ 230 static RISCVException read_fflags(CPURISCVState *env, int csrno, 231 target_ulong *val) 232 { 233 *val = riscv_cpu_get_fflags(env); 234 return RISCV_EXCP_NONE; 235 } 236 237 static RISCVException write_fflags(CPURISCVState *env, int csrno, 238 target_ulong val) 239 { 240 #if !defined(CONFIG_USER_ONLY) 241 env->mstatus |= MSTATUS_FS; 242 #endif 243 riscv_cpu_set_fflags(env, val & (FSR_AEXC >> FSR_AEXC_SHIFT)); 244 return RISCV_EXCP_NONE; 245 } 246 247 static RISCVException read_frm(CPURISCVState *env, int csrno, 248 target_ulong *val) 249 { 250 *val = env->frm; 251 return RISCV_EXCP_NONE; 252 } 253 254 static RISCVException write_frm(CPURISCVState *env, int csrno, 255 target_ulong val) 256 { 257 #if !defined(CONFIG_USER_ONLY) 258 env->mstatus |= MSTATUS_FS; 259 #endif 260 env->frm = val & (FSR_RD >> FSR_RD_SHIFT); 261 return RISCV_EXCP_NONE; 262 } 263 264 static RISCVException read_fcsr(CPURISCVState *env, int csrno, 265 target_ulong *val) 266 { 267 *val = (riscv_cpu_get_fflags(env) << FSR_AEXC_SHIFT) 268 | (env->frm << FSR_RD_SHIFT); 269 return RISCV_EXCP_NONE; 270 } 271 272 static RISCVException write_fcsr(CPURISCVState *env, int csrno, 273 target_ulong val) 274 { 275 #if !defined(CONFIG_USER_ONLY) 276 env->mstatus |= MSTATUS_FS; 277 #endif 278 env->frm = (val & FSR_RD) >> FSR_RD_SHIFT; 279 riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT); 280 return RISCV_EXCP_NONE; 281 } 282 283 static RISCVException read_vtype(CPURISCVState *env, int csrno, 284 target_ulong *val) 285 { 286 uint64_t vill; 287 switch (env->xl) { 288 case MXL_RV32: 289 vill = (uint32_t)env->vill << 31; 290 break; 291 case MXL_RV64: 292 vill = (uint64_t)env->vill << 63; 293 break; 294 default: 295 g_assert_not_reached(); 296 } 297 *val = (target_ulong)vill | env->vtype; 298 return RISCV_EXCP_NONE; 299 } 300 301 static RISCVException read_vl(CPURISCVState *env, int csrno, 302 target_ulong *val) 303 { 304 *val = env->vl; 305 return RISCV_EXCP_NONE; 306 } 307 308 static int read_vlenb(CPURISCVState *env, int csrno, target_ulong *val) 309 { 310 *val = env_archcpu(env)->cfg.vlen >> 3; 311 return RISCV_EXCP_NONE; 312 } 313 314 static RISCVException read_vxrm(CPURISCVState *env, int csrno, 315 target_ulong *val) 316 { 317 *val = env->vxrm; 318 return RISCV_EXCP_NONE; 319 } 320 321 static RISCVException write_vxrm(CPURISCVState *env, int csrno, 322 target_ulong val) 323 { 324 #if !defined(CONFIG_USER_ONLY) 325 env->mstatus |= MSTATUS_VS; 326 #endif 327 env->vxrm = val; 328 return RISCV_EXCP_NONE; 329 } 330 331 static RISCVException read_vxsat(CPURISCVState *env, int csrno, 332 target_ulong *val) 333 { 334 *val = env->vxsat; 335 return RISCV_EXCP_NONE; 336 } 337 338 static RISCVException write_vxsat(CPURISCVState *env, int csrno, 339 target_ulong val) 340 { 341 #if !defined(CONFIG_USER_ONLY) 342 env->mstatus |= MSTATUS_VS; 343 #endif 344 env->vxsat = val; 345 return RISCV_EXCP_NONE; 346 } 347 348 static RISCVException read_vstart(CPURISCVState *env, int csrno, 349 target_ulong *val) 350 { 351 *val = env->vstart; 352 return RISCV_EXCP_NONE; 353 } 354 355 static RISCVException write_vstart(CPURISCVState *env, int csrno, 356 target_ulong val) 357 { 358 #if !defined(CONFIG_USER_ONLY) 359 env->mstatus |= MSTATUS_VS; 360 #endif 361 /* 362 * The vstart CSR is defined to have only enough writable bits 363 * to hold the largest element index, i.e. lg2(VLEN) bits. 364 */ 365 env->vstart = val & ~(~0ULL << ctzl(env_archcpu(env)->cfg.vlen)); 366 return RISCV_EXCP_NONE; 367 } 368 369 static int read_vcsr(CPURISCVState *env, int csrno, target_ulong *val) 370 { 371 *val = (env->vxrm << VCSR_VXRM_SHIFT) | (env->vxsat << VCSR_VXSAT_SHIFT); 372 return RISCV_EXCP_NONE; 373 } 374 375 static int write_vcsr(CPURISCVState *env, int csrno, target_ulong val) 376 { 377 #if !defined(CONFIG_USER_ONLY) 378 env->mstatus |= MSTATUS_VS; 379 #endif 380 env->vxrm = (val & VCSR_VXRM) >> VCSR_VXRM_SHIFT; 381 env->vxsat = (val & VCSR_VXSAT) >> VCSR_VXSAT_SHIFT; 382 return RISCV_EXCP_NONE; 383 } 384 385 /* User Timers and Counters */ 386 static RISCVException read_instret(CPURISCVState *env, int csrno, 387 target_ulong *val) 388 { 389 #if !defined(CONFIG_USER_ONLY) 390 if (icount_enabled()) { 391 *val = icount_get(); 392 } else { 393 *val = cpu_get_host_ticks(); 394 } 395 #else 396 *val = cpu_get_host_ticks(); 397 #endif 398 return RISCV_EXCP_NONE; 399 } 400 401 static RISCVException read_instreth(CPURISCVState *env, int csrno, 402 target_ulong *val) 403 { 404 #if !defined(CONFIG_USER_ONLY) 405 if (icount_enabled()) { 406 *val = icount_get() >> 32; 407 } else { 408 *val = cpu_get_host_ticks() >> 32; 409 } 410 #else 411 *val = cpu_get_host_ticks() >> 32; 412 #endif 413 return RISCV_EXCP_NONE; 414 } 415 416 #if defined(CONFIG_USER_ONLY) 417 static RISCVException read_time(CPURISCVState *env, int csrno, 418 target_ulong *val) 419 { 420 *val = cpu_get_host_ticks(); 421 return RISCV_EXCP_NONE; 422 } 423 424 static RISCVException read_timeh(CPURISCVState *env, int csrno, 425 target_ulong *val) 426 { 427 *val = cpu_get_host_ticks() >> 32; 428 return RISCV_EXCP_NONE; 429 } 430 431 #else /* CONFIG_USER_ONLY */ 432 433 static RISCVException read_time(CPURISCVState *env, int csrno, 434 target_ulong *val) 435 { 436 uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0; 437 438 if (!env->rdtime_fn) { 439 return RISCV_EXCP_ILLEGAL_INST; 440 } 441 442 *val = env->rdtime_fn(env->rdtime_fn_arg) + delta; 443 return RISCV_EXCP_NONE; 444 } 445 446 static RISCVException read_timeh(CPURISCVState *env, int csrno, 447 target_ulong *val) 448 { 449 uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0; 450 451 if (!env->rdtime_fn) { 452 return RISCV_EXCP_ILLEGAL_INST; 453 } 454 455 *val = (env->rdtime_fn(env->rdtime_fn_arg) + delta) >> 32; 456 return RISCV_EXCP_NONE; 457 } 458 459 /* Machine constants */ 460 461 #define M_MODE_INTERRUPTS (MIP_MSIP | MIP_MTIP | MIP_MEIP) 462 #define S_MODE_INTERRUPTS (MIP_SSIP | MIP_STIP | MIP_SEIP) 463 #define VS_MODE_INTERRUPTS (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP) 464 #define HS_MODE_INTERRUPTS (MIP_SGEIP | VS_MODE_INTERRUPTS) 465 466 static const target_ulong delegable_ints = S_MODE_INTERRUPTS | 467 VS_MODE_INTERRUPTS; 468 static const target_ulong vs_delegable_ints = VS_MODE_INTERRUPTS; 469 static const target_ulong all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS | 470 HS_MODE_INTERRUPTS; 471 #define DELEGABLE_EXCPS ((1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | \ 472 (1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | \ 473 (1ULL << (RISCV_EXCP_ILLEGAL_INST)) | \ 474 (1ULL << (RISCV_EXCP_BREAKPOINT)) | \ 475 (1ULL << (RISCV_EXCP_LOAD_ADDR_MIS)) | \ 476 (1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT)) | \ 477 (1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS)) | \ 478 (1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT)) | \ 479 (1ULL << (RISCV_EXCP_U_ECALL)) | \ 480 (1ULL << (RISCV_EXCP_S_ECALL)) | \ 481 (1ULL << (RISCV_EXCP_VS_ECALL)) | \ 482 (1ULL << (RISCV_EXCP_M_ECALL)) | \ 483 (1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | \ 484 (1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | \ 485 (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | \ 486 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | \ 487 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | \ 488 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | \ 489 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT))) 490 static const target_ulong vs_delegable_excps = DELEGABLE_EXCPS & 491 ~((1ULL << (RISCV_EXCP_S_ECALL)) | 492 (1ULL << (RISCV_EXCP_VS_ECALL)) | 493 (1ULL << (RISCV_EXCP_M_ECALL)) | 494 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | 495 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | 496 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | 497 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT))); 498 static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE | 499 SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS | 500 SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS; 501 static const target_ulong sip_writable_mask = SIP_SSIP | MIP_USIP | MIP_UEIP; 502 static const target_ulong hip_writable_mask = MIP_VSSIP; 503 static const target_ulong hvip_writable_mask = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP; 504 static const target_ulong vsip_writable_mask = MIP_VSSIP; 505 506 static const char valid_vm_1_10_32[16] = { 507 [VM_1_10_MBARE] = 1, 508 [VM_1_10_SV32] = 1 509 }; 510 511 static const char valid_vm_1_10_64[16] = { 512 [VM_1_10_MBARE] = 1, 513 [VM_1_10_SV39] = 1, 514 [VM_1_10_SV48] = 1, 515 [VM_1_10_SV57] = 1 516 }; 517 518 /* Machine Information Registers */ 519 static RISCVException read_zero(CPURISCVState *env, int csrno, 520 target_ulong *val) 521 { 522 *val = 0; 523 return RISCV_EXCP_NONE; 524 } 525 526 static RISCVException read_mhartid(CPURISCVState *env, int csrno, 527 target_ulong *val) 528 { 529 *val = env->mhartid; 530 return RISCV_EXCP_NONE; 531 } 532 533 /* Machine Trap Setup */ 534 535 /* We do not store SD explicitly, only compute it on demand. */ 536 static uint64_t add_status_sd(RISCVMXL xl, uint64_t status) 537 { 538 if ((status & MSTATUS_FS) == MSTATUS_FS || 539 (status & MSTATUS_VS) == MSTATUS_VS || 540 (status & MSTATUS_XS) == MSTATUS_XS) { 541 switch (xl) { 542 case MXL_RV32: 543 return status | MSTATUS32_SD; 544 case MXL_RV64: 545 return status | MSTATUS64_SD; 546 case MXL_RV128: 547 return MSTATUSH128_SD; 548 default: 549 g_assert_not_reached(); 550 } 551 } 552 return status; 553 } 554 555 static RISCVException read_mstatus(CPURISCVState *env, int csrno, 556 target_ulong *val) 557 { 558 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus); 559 return RISCV_EXCP_NONE; 560 } 561 562 static int validate_vm(CPURISCVState *env, target_ulong vm) 563 { 564 if (riscv_cpu_mxl(env) == MXL_RV32) { 565 return valid_vm_1_10_32[vm & 0xf]; 566 } else { 567 return valid_vm_1_10_64[vm & 0xf]; 568 } 569 } 570 571 static RISCVException write_mstatus(CPURISCVState *env, int csrno, 572 target_ulong val) 573 { 574 uint64_t mstatus = env->mstatus; 575 uint64_t mask = 0; 576 RISCVMXL xl = riscv_cpu_mxl(env); 577 578 /* flush tlb on mstatus fields that affect VM */ 579 if ((val ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP | MSTATUS_MPV | 580 MSTATUS_MPRV | MSTATUS_SUM)) { 581 tlb_flush(env_cpu(env)); 582 } 583 mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE | 584 MSTATUS_SPP | MSTATUS_FS | MSTATUS_MPRV | MSTATUS_SUM | 585 MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR | 586 MSTATUS_TW | MSTATUS_VS; 587 588 if (xl != MXL_RV32 || env->debugger) { 589 /* 590 * RV32: MPV and GVA are not in mstatus. The current plan is to 591 * add them to mstatush. For now, we just don't support it. 592 */ 593 mask |= MSTATUS_MPV | MSTATUS_GVA; 594 if ((val & MSTATUS64_UXL) != 0) { 595 mask |= MSTATUS64_UXL; 596 } 597 } 598 599 mstatus = (mstatus & ~mask) | (val & mask); 600 601 if (xl > MXL_RV32) { 602 /* SXL field is for now read only */ 603 mstatus = set_field(mstatus, MSTATUS64_SXL, xl); 604 } 605 env->mstatus = mstatus; 606 env->xl = cpu_recompute_xl(env); 607 608 return RISCV_EXCP_NONE; 609 } 610 611 static RISCVException read_mstatush(CPURISCVState *env, int csrno, 612 target_ulong *val) 613 { 614 *val = env->mstatus >> 32; 615 return RISCV_EXCP_NONE; 616 } 617 618 static RISCVException write_mstatush(CPURISCVState *env, int csrno, 619 target_ulong val) 620 { 621 uint64_t valh = (uint64_t)val << 32; 622 uint64_t mask = MSTATUS_MPV | MSTATUS_GVA; 623 624 if ((valh ^ env->mstatus) & (MSTATUS_MPV)) { 625 tlb_flush(env_cpu(env)); 626 } 627 628 env->mstatus = (env->mstatus & ~mask) | (valh & mask); 629 630 return RISCV_EXCP_NONE; 631 } 632 633 static RISCVException read_mstatus_i128(CPURISCVState *env, int csrno, 634 Int128 *val) 635 { 636 *val = int128_make128(env->mstatus, add_status_sd(MXL_RV128, env->mstatus)); 637 return RISCV_EXCP_NONE; 638 } 639 640 static RISCVException read_misa_i128(CPURISCVState *env, int csrno, 641 Int128 *val) 642 { 643 *val = int128_make128(env->misa_ext, (uint64_t)MXL_RV128 << 62); 644 return RISCV_EXCP_NONE; 645 } 646 647 static RISCVException read_misa(CPURISCVState *env, int csrno, 648 target_ulong *val) 649 { 650 target_ulong misa; 651 652 switch (env->misa_mxl) { 653 case MXL_RV32: 654 misa = (target_ulong)MXL_RV32 << 30; 655 break; 656 #ifdef TARGET_RISCV64 657 case MXL_RV64: 658 misa = (target_ulong)MXL_RV64 << 62; 659 break; 660 #endif 661 default: 662 g_assert_not_reached(); 663 } 664 665 *val = misa | env->misa_ext; 666 return RISCV_EXCP_NONE; 667 } 668 669 static RISCVException write_misa(CPURISCVState *env, int csrno, 670 target_ulong val) 671 { 672 if (!riscv_feature(env, RISCV_FEATURE_MISA)) { 673 /* drop write to misa */ 674 return RISCV_EXCP_NONE; 675 } 676 677 /* 'I' or 'E' must be present */ 678 if (!(val & (RVI | RVE))) { 679 /* It is not, drop write to misa */ 680 return RISCV_EXCP_NONE; 681 } 682 683 /* 'E' excludes all other extensions */ 684 if (val & RVE) { 685 /* when we support 'E' we can do "val = RVE;" however 686 * for now we just drop writes if 'E' is present. 687 */ 688 return RISCV_EXCP_NONE; 689 } 690 691 /* 692 * misa.MXL writes are not supported by QEMU. 693 * Drop writes to those bits. 694 */ 695 696 /* Mask extensions that are not supported by this hart */ 697 val &= env->misa_ext_mask; 698 699 /* Mask extensions that are not supported by QEMU */ 700 val &= (RVI | RVE | RVM | RVA | RVF | RVD | RVC | RVS | RVU | RVV); 701 702 /* 'D' depends on 'F', so clear 'D' if 'F' is not present */ 703 if ((val & RVD) && !(val & RVF)) { 704 val &= ~RVD; 705 } 706 707 /* Suppress 'C' if next instruction is not aligned 708 * TODO: this should check next_pc 709 */ 710 if ((val & RVC) && (GETPC() & ~3) != 0) { 711 val &= ~RVC; 712 } 713 714 /* If nothing changed, do nothing. */ 715 if (val == env->misa_ext) { 716 return RISCV_EXCP_NONE; 717 } 718 719 /* flush translation cache */ 720 tb_flush(env_cpu(env)); 721 env->misa_ext = val; 722 env->xl = riscv_cpu_mxl(env); 723 return RISCV_EXCP_NONE; 724 } 725 726 static RISCVException read_medeleg(CPURISCVState *env, int csrno, 727 target_ulong *val) 728 { 729 *val = env->medeleg; 730 return RISCV_EXCP_NONE; 731 } 732 733 static RISCVException write_medeleg(CPURISCVState *env, int csrno, 734 target_ulong val) 735 { 736 env->medeleg = (env->medeleg & ~DELEGABLE_EXCPS) | (val & DELEGABLE_EXCPS); 737 return RISCV_EXCP_NONE; 738 } 739 740 static RISCVException read_mideleg(CPURISCVState *env, int csrno, 741 target_ulong *val) 742 { 743 *val = env->mideleg; 744 return RISCV_EXCP_NONE; 745 } 746 747 static RISCVException write_mideleg(CPURISCVState *env, int csrno, 748 target_ulong val) 749 { 750 env->mideleg = (env->mideleg & ~delegable_ints) | (val & delegable_ints); 751 if (riscv_has_ext(env, RVH)) { 752 env->mideleg |= HS_MODE_INTERRUPTS; 753 } 754 return RISCV_EXCP_NONE; 755 } 756 757 static RISCVException read_mie(CPURISCVState *env, int csrno, 758 target_ulong *val) 759 { 760 *val = env->mie; 761 return RISCV_EXCP_NONE; 762 } 763 764 static RISCVException write_mie(CPURISCVState *env, int csrno, 765 target_ulong val) 766 { 767 env->mie = (env->mie & ~all_ints) | (val & all_ints); 768 if (!riscv_has_ext(env, RVH)) { 769 env->mie &= ~MIP_SGEIP; 770 } 771 return RISCV_EXCP_NONE; 772 } 773 774 static RISCVException read_mtvec(CPURISCVState *env, int csrno, 775 target_ulong *val) 776 { 777 *val = env->mtvec; 778 return RISCV_EXCP_NONE; 779 } 780 781 static RISCVException write_mtvec(CPURISCVState *env, int csrno, 782 target_ulong val) 783 { 784 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */ 785 if ((val & 3) < 2) { 786 env->mtvec = val; 787 } else { 788 qemu_log_mask(LOG_UNIMP, "CSR_MTVEC: reserved mode not supported\n"); 789 } 790 return RISCV_EXCP_NONE; 791 } 792 793 static RISCVException read_mcounteren(CPURISCVState *env, int csrno, 794 target_ulong *val) 795 { 796 *val = env->mcounteren; 797 return RISCV_EXCP_NONE; 798 } 799 800 static RISCVException write_mcounteren(CPURISCVState *env, int csrno, 801 target_ulong val) 802 { 803 env->mcounteren = val; 804 return RISCV_EXCP_NONE; 805 } 806 807 /* Machine Trap Handling */ 808 static RISCVException read_mscratch_i128(CPURISCVState *env, int csrno, 809 Int128 *val) 810 { 811 *val = int128_make128(env->mscratch, env->mscratchh); 812 return RISCV_EXCP_NONE; 813 } 814 815 static RISCVException write_mscratch_i128(CPURISCVState *env, int csrno, 816 Int128 val) 817 { 818 env->mscratch = int128_getlo(val); 819 env->mscratchh = int128_gethi(val); 820 return RISCV_EXCP_NONE; 821 } 822 823 static RISCVException read_mscratch(CPURISCVState *env, int csrno, 824 target_ulong *val) 825 { 826 *val = env->mscratch; 827 return RISCV_EXCP_NONE; 828 } 829 830 static RISCVException write_mscratch(CPURISCVState *env, int csrno, 831 target_ulong val) 832 { 833 env->mscratch = val; 834 return RISCV_EXCP_NONE; 835 } 836 837 static RISCVException read_mepc(CPURISCVState *env, int csrno, 838 target_ulong *val) 839 { 840 *val = env->mepc; 841 return RISCV_EXCP_NONE; 842 } 843 844 static RISCVException write_mepc(CPURISCVState *env, int csrno, 845 target_ulong val) 846 { 847 env->mepc = val; 848 return RISCV_EXCP_NONE; 849 } 850 851 static RISCVException read_mcause(CPURISCVState *env, int csrno, 852 target_ulong *val) 853 { 854 *val = env->mcause; 855 return RISCV_EXCP_NONE; 856 } 857 858 static RISCVException write_mcause(CPURISCVState *env, int csrno, 859 target_ulong val) 860 { 861 env->mcause = val; 862 return RISCV_EXCP_NONE; 863 } 864 865 static RISCVException read_mtval(CPURISCVState *env, int csrno, 866 target_ulong *val) 867 { 868 *val = env->mtval; 869 return RISCV_EXCP_NONE; 870 } 871 872 static RISCVException write_mtval(CPURISCVState *env, int csrno, 873 target_ulong val) 874 { 875 env->mtval = val; 876 return RISCV_EXCP_NONE; 877 } 878 879 static RISCVException rmw_mip(CPURISCVState *env, int csrno, 880 target_ulong *ret_value, 881 target_ulong new_value, target_ulong write_mask) 882 { 883 RISCVCPU *cpu = env_archcpu(env); 884 /* Allow software control of delegable interrupts not claimed by hardware */ 885 target_ulong mask = write_mask & delegable_ints & ~env->miclaim; 886 uint32_t old_mip; 887 888 if (mask) { 889 old_mip = riscv_cpu_update_mip(cpu, mask, (new_value & mask)); 890 } else { 891 old_mip = env->mip; 892 } 893 894 if (ret_value) { 895 *ret_value = old_mip; 896 } 897 898 return RISCV_EXCP_NONE; 899 } 900 901 /* Supervisor Trap Setup */ 902 static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno, 903 Int128 *val) 904 { 905 uint64_t mask = sstatus_v1_10_mask; 906 uint64_t sstatus = env->mstatus & mask; 907 if (env->xl != MXL_RV32 || env->debugger) { 908 mask |= SSTATUS64_UXL; 909 } 910 911 *val = int128_make128(sstatus, add_status_sd(MXL_RV128, sstatus)); 912 return RISCV_EXCP_NONE; 913 } 914 915 static RISCVException read_sstatus(CPURISCVState *env, int csrno, 916 target_ulong *val) 917 { 918 target_ulong mask = (sstatus_v1_10_mask); 919 if (env->xl != MXL_RV32 || env->debugger) { 920 mask |= SSTATUS64_UXL; 921 } 922 /* TODO: Use SXL not MXL. */ 923 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask); 924 return RISCV_EXCP_NONE; 925 } 926 927 static RISCVException write_sstatus(CPURISCVState *env, int csrno, 928 target_ulong val) 929 { 930 target_ulong mask = (sstatus_v1_10_mask); 931 932 if (env->xl != MXL_RV32 || env->debugger) { 933 if ((val & SSTATUS64_UXL) != 0) { 934 mask |= SSTATUS64_UXL; 935 } 936 } 937 target_ulong newval = (env->mstatus & ~mask) | (val & mask); 938 return write_mstatus(env, CSR_MSTATUS, newval); 939 } 940 941 static RISCVException read_vsie(CPURISCVState *env, int csrno, 942 target_ulong *val) 943 { 944 /* Shift the VS bits to their S bit location in vsie */ 945 *val = (env->mie & env->hideleg & VS_MODE_INTERRUPTS) >> 1; 946 return RISCV_EXCP_NONE; 947 } 948 949 static RISCVException read_sie(CPURISCVState *env, int csrno, 950 target_ulong *val) 951 { 952 if (riscv_cpu_virt_enabled(env)) { 953 read_vsie(env, CSR_VSIE, val); 954 } else { 955 *val = env->mie & env->mideleg; 956 } 957 return RISCV_EXCP_NONE; 958 } 959 960 static RISCVException write_vsie(CPURISCVState *env, int csrno, 961 target_ulong val) 962 { 963 /* Shift the S bits to their VS bit location in mie */ 964 target_ulong newval = (env->mie & ~VS_MODE_INTERRUPTS) | 965 ((val << 1) & env->hideleg & VS_MODE_INTERRUPTS); 966 return write_mie(env, CSR_MIE, newval); 967 } 968 969 static int write_sie(CPURISCVState *env, int csrno, target_ulong val) 970 { 971 if (riscv_cpu_virt_enabled(env)) { 972 write_vsie(env, CSR_VSIE, val); 973 } else { 974 target_ulong newval = (env->mie & ~S_MODE_INTERRUPTS) | 975 (val & S_MODE_INTERRUPTS); 976 write_mie(env, CSR_MIE, newval); 977 } 978 979 return RISCV_EXCP_NONE; 980 } 981 982 static RISCVException read_stvec(CPURISCVState *env, int csrno, 983 target_ulong *val) 984 { 985 *val = env->stvec; 986 return RISCV_EXCP_NONE; 987 } 988 989 static RISCVException write_stvec(CPURISCVState *env, int csrno, 990 target_ulong val) 991 { 992 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */ 993 if ((val & 3) < 2) { 994 env->stvec = val; 995 } else { 996 qemu_log_mask(LOG_UNIMP, "CSR_STVEC: reserved mode not supported\n"); 997 } 998 return RISCV_EXCP_NONE; 999 } 1000 1001 static RISCVException read_scounteren(CPURISCVState *env, int csrno, 1002 target_ulong *val) 1003 { 1004 *val = env->scounteren; 1005 return RISCV_EXCP_NONE; 1006 } 1007 1008 static RISCVException write_scounteren(CPURISCVState *env, int csrno, 1009 target_ulong val) 1010 { 1011 env->scounteren = val; 1012 return RISCV_EXCP_NONE; 1013 } 1014 1015 /* Supervisor Trap Handling */ 1016 static RISCVException read_sscratch_i128(CPURISCVState *env, int csrno, 1017 Int128 *val) 1018 { 1019 *val = int128_make128(env->sscratch, env->sscratchh); 1020 return RISCV_EXCP_NONE; 1021 } 1022 1023 static RISCVException write_sscratch_i128(CPURISCVState *env, int csrno, 1024 Int128 val) 1025 { 1026 env->sscratch = int128_getlo(val); 1027 env->sscratchh = int128_gethi(val); 1028 return RISCV_EXCP_NONE; 1029 } 1030 1031 static RISCVException read_sscratch(CPURISCVState *env, int csrno, 1032 target_ulong *val) 1033 { 1034 *val = env->sscratch; 1035 return RISCV_EXCP_NONE; 1036 } 1037 1038 static RISCVException write_sscratch(CPURISCVState *env, int csrno, 1039 target_ulong val) 1040 { 1041 env->sscratch = val; 1042 return RISCV_EXCP_NONE; 1043 } 1044 1045 static RISCVException read_sepc(CPURISCVState *env, int csrno, 1046 target_ulong *val) 1047 { 1048 *val = env->sepc; 1049 return RISCV_EXCP_NONE; 1050 } 1051 1052 static RISCVException write_sepc(CPURISCVState *env, int csrno, 1053 target_ulong val) 1054 { 1055 env->sepc = val; 1056 return RISCV_EXCP_NONE; 1057 } 1058 1059 static RISCVException read_scause(CPURISCVState *env, int csrno, 1060 target_ulong *val) 1061 { 1062 *val = env->scause; 1063 return RISCV_EXCP_NONE; 1064 } 1065 1066 static RISCVException write_scause(CPURISCVState *env, int csrno, 1067 target_ulong val) 1068 { 1069 env->scause = val; 1070 return RISCV_EXCP_NONE; 1071 } 1072 1073 static RISCVException read_stval(CPURISCVState *env, int csrno, 1074 target_ulong *val) 1075 { 1076 *val = env->stval; 1077 return RISCV_EXCP_NONE; 1078 } 1079 1080 static RISCVException write_stval(CPURISCVState *env, int csrno, 1081 target_ulong val) 1082 { 1083 env->stval = val; 1084 return RISCV_EXCP_NONE; 1085 } 1086 1087 static RISCVException rmw_vsip(CPURISCVState *env, int csrno, 1088 target_ulong *ret_value, 1089 target_ulong new_value, target_ulong write_mask) 1090 { 1091 /* Shift the S bits to their VS bit location in mip */ 1092 int ret = rmw_mip(env, 0, ret_value, new_value << 1, 1093 (write_mask << 1) & vsip_writable_mask & env->hideleg); 1094 1095 if (ret_value) { 1096 *ret_value &= VS_MODE_INTERRUPTS; 1097 /* Shift the VS bits to their S bit location in vsip */ 1098 *ret_value >>= 1; 1099 } 1100 return ret; 1101 } 1102 1103 static RISCVException rmw_sip(CPURISCVState *env, int csrno, 1104 target_ulong *ret_value, 1105 target_ulong new_value, target_ulong write_mask) 1106 { 1107 int ret; 1108 1109 if (riscv_cpu_virt_enabled(env)) { 1110 ret = rmw_vsip(env, CSR_VSIP, ret_value, new_value, write_mask); 1111 } else { 1112 ret = rmw_mip(env, CSR_MSTATUS, ret_value, new_value, 1113 write_mask & env->mideleg & sip_writable_mask); 1114 } 1115 1116 if (ret_value) { 1117 *ret_value &= env->mideleg & S_MODE_INTERRUPTS; 1118 } 1119 return ret; 1120 } 1121 1122 /* Supervisor Protection and Translation */ 1123 static RISCVException read_satp(CPURISCVState *env, int csrno, 1124 target_ulong *val) 1125 { 1126 if (!riscv_feature(env, RISCV_FEATURE_MMU)) { 1127 *val = 0; 1128 return RISCV_EXCP_NONE; 1129 } 1130 1131 if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) { 1132 return RISCV_EXCP_ILLEGAL_INST; 1133 } else { 1134 *val = env->satp; 1135 } 1136 1137 return RISCV_EXCP_NONE; 1138 } 1139 1140 static RISCVException write_satp(CPURISCVState *env, int csrno, 1141 target_ulong val) 1142 { 1143 target_ulong vm, mask, asid; 1144 1145 if (!riscv_feature(env, RISCV_FEATURE_MMU)) { 1146 return RISCV_EXCP_NONE; 1147 } 1148 1149 if (riscv_cpu_mxl(env) == MXL_RV32) { 1150 vm = validate_vm(env, get_field(val, SATP32_MODE)); 1151 mask = (val ^ env->satp) & (SATP32_MODE | SATP32_ASID | SATP32_PPN); 1152 asid = (val ^ env->satp) & SATP32_ASID; 1153 } else { 1154 vm = validate_vm(env, get_field(val, SATP64_MODE)); 1155 mask = (val ^ env->satp) & (SATP64_MODE | SATP64_ASID | SATP64_PPN); 1156 asid = (val ^ env->satp) & SATP64_ASID; 1157 } 1158 1159 if (vm && mask) { 1160 if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) { 1161 return RISCV_EXCP_ILLEGAL_INST; 1162 } else { 1163 if (asid) { 1164 tlb_flush(env_cpu(env)); 1165 } 1166 env->satp = val; 1167 } 1168 } 1169 return RISCV_EXCP_NONE; 1170 } 1171 1172 /* Hypervisor Extensions */ 1173 static RISCVException read_hstatus(CPURISCVState *env, int csrno, 1174 target_ulong *val) 1175 { 1176 *val = env->hstatus; 1177 if (riscv_cpu_mxl(env) != MXL_RV32) { 1178 /* We only support 64-bit VSXL */ 1179 *val = set_field(*val, HSTATUS_VSXL, 2); 1180 } 1181 /* We only support little endian */ 1182 *val = set_field(*val, HSTATUS_VSBE, 0); 1183 return RISCV_EXCP_NONE; 1184 } 1185 1186 static RISCVException write_hstatus(CPURISCVState *env, int csrno, 1187 target_ulong val) 1188 { 1189 env->hstatus = val; 1190 if (riscv_cpu_mxl(env) != MXL_RV32 && get_field(val, HSTATUS_VSXL) != 2) { 1191 qemu_log_mask(LOG_UNIMP, "QEMU does not support mixed HSXLEN options."); 1192 } 1193 if (get_field(val, HSTATUS_VSBE) != 0) { 1194 qemu_log_mask(LOG_UNIMP, "QEMU does not support big endian guests."); 1195 } 1196 return RISCV_EXCP_NONE; 1197 } 1198 1199 static RISCVException read_hedeleg(CPURISCVState *env, int csrno, 1200 target_ulong *val) 1201 { 1202 *val = env->hedeleg; 1203 return RISCV_EXCP_NONE; 1204 } 1205 1206 static RISCVException write_hedeleg(CPURISCVState *env, int csrno, 1207 target_ulong val) 1208 { 1209 env->hedeleg = val & vs_delegable_excps; 1210 return RISCV_EXCP_NONE; 1211 } 1212 1213 static RISCVException read_hideleg(CPURISCVState *env, int csrno, 1214 target_ulong *val) 1215 { 1216 *val = env->hideleg; 1217 return RISCV_EXCP_NONE; 1218 } 1219 1220 static RISCVException write_hideleg(CPURISCVState *env, int csrno, 1221 target_ulong val) 1222 { 1223 env->hideleg = val & vs_delegable_ints; 1224 return RISCV_EXCP_NONE; 1225 } 1226 1227 static RISCVException rmw_hvip(CPURISCVState *env, int csrno, 1228 target_ulong *ret_value, 1229 target_ulong new_value, target_ulong write_mask) 1230 { 1231 int ret = rmw_mip(env, 0, ret_value, new_value, 1232 write_mask & hvip_writable_mask); 1233 1234 if (ret_value) { 1235 *ret_value &= VS_MODE_INTERRUPTS; 1236 } 1237 return ret; 1238 } 1239 1240 static RISCVException rmw_hip(CPURISCVState *env, int csrno, 1241 target_ulong *ret_value, 1242 target_ulong new_value, target_ulong write_mask) 1243 { 1244 int ret = rmw_mip(env, 0, ret_value, new_value, 1245 write_mask & hip_writable_mask); 1246 1247 if (ret_value) { 1248 *ret_value &= HS_MODE_INTERRUPTS; 1249 } 1250 return ret; 1251 } 1252 1253 static RISCVException read_hie(CPURISCVState *env, int csrno, 1254 target_ulong *val) 1255 { 1256 *val = env->mie & HS_MODE_INTERRUPTS; 1257 return RISCV_EXCP_NONE; 1258 } 1259 1260 static RISCVException write_hie(CPURISCVState *env, int csrno, 1261 target_ulong val) 1262 { 1263 target_ulong newval = (env->mie & ~HS_MODE_INTERRUPTS) | (val & HS_MODE_INTERRUPTS); 1264 return write_mie(env, CSR_MIE, newval); 1265 } 1266 1267 static RISCVException read_hcounteren(CPURISCVState *env, int csrno, 1268 target_ulong *val) 1269 { 1270 *val = env->hcounteren; 1271 return RISCV_EXCP_NONE; 1272 } 1273 1274 static RISCVException write_hcounteren(CPURISCVState *env, int csrno, 1275 target_ulong val) 1276 { 1277 env->hcounteren = val; 1278 return RISCV_EXCP_NONE; 1279 } 1280 1281 static RISCVException write_hgeie(CPURISCVState *env, int csrno, 1282 target_ulong val) 1283 { 1284 if (val) { 1285 qemu_log_mask(LOG_UNIMP, "No support for a non-zero GEILEN."); 1286 } 1287 return RISCV_EXCP_NONE; 1288 } 1289 1290 static RISCVException read_htval(CPURISCVState *env, int csrno, 1291 target_ulong *val) 1292 { 1293 *val = env->htval; 1294 return RISCV_EXCP_NONE; 1295 } 1296 1297 static RISCVException write_htval(CPURISCVState *env, int csrno, 1298 target_ulong val) 1299 { 1300 env->htval = val; 1301 return RISCV_EXCP_NONE; 1302 } 1303 1304 static RISCVException read_htinst(CPURISCVState *env, int csrno, 1305 target_ulong *val) 1306 { 1307 *val = env->htinst; 1308 return RISCV_EXCP_NONE; 1309 } 1310 1311 static RISCVException write_htinst(CPURISCVState *env, int csrno, 1312 target_ulong val) 1313 { 1314 return RISCV_EXCP_NONE; 1315 } 1316 1317 static RISCVException write_hgeip(CPURISCVState *env, int csrno, 1318 target_ulong val) 1319 { 1320 if (val) { 1321 qemu_log_mask(LOG_UNIMP, "No support for a non-zero GEILEN."); 1322 } 1323 return RISCV_EXCP_NONE; 1324 } 1325 1326 static RISCVException read_hgatp(CPURISCVState *env, int csrno, 1327 target_ulong *val) 1328 { 1329 *val = env->hgatp; 1330 return RISCV_EXCP_NONE; 1331 } 1332 1333 static RISCVException write_hgatp(CPURISCVState *env, int csrno, 1334 target_ulong val) 1335 { 1336 env->hgatp = val; 1337 return RISCV_EXCP_NONE; 1338 } 1339 1340 static RISCVException read_htimedelta(CPURISCVState *env, int csrno, 1341 target_ulong *val) 1342 { 1343 if (!env->rdtime_fn) { 1344 return RISCV_EXCP_ILLEGAL_INST; 1345 } 1346 1347 *val = env->htimedelta; 1348 return RISCV_EXCP_NONE; 1349 } 1350 1351 static RISCVException write_htimedelta(CPURISCVState *env, int csrno, 1352 target_ulong val) 1353 { 1354 if (!env->rdtime_fn) { 1355 return RISCV_EXCP_ILLEGAL_INST; 1356 } 1357 1358 if (riscv_cpu_mxl(env) == MXL_RV32) { 1359 env->htimedelta = deposit64(env->htimedelta, 0, 32, (uint64_t)val); 1360 } else { 1361 env->htimedelta = val; 1362 } 1363 return RISCV_EXCP_NONE; 1364 } 1365 1366 static RISCVException read_htimedeltah(CPURISCVState *env, int csrno, 1367 target_ulong *val) 1368 { 1369 if (!env->rdtime_fn) { 1370 return RISCV_EXCP_ILLEGAL_INST; 1371 } 1372 1373 *val = env->htimedelta >> 32; 1374 return RISCV_EXCP_NONE; 1375 } 1376 1377 static RISCVException write_htimedeltah(CPURISCVState *env, int csrno, 1378 target_ulong val) 1379 { 1380 if (!env->rdtime_fn) { 1381 return RISCV_EXCP_ILLEGAL_INST; 1382 } 1383 1384 env->htimedelta = deposit64(env->htimedelta, 32, 32, (uint64_t)val); 1385 return RISCV_EXCP_NONE; 1386 } 1387 1388 /* Virtual CSR Registers */ 1389 static RISCVException read_vsstatus(CPURISCVState *env, int csrno, 1390 target_ulong *val) 1391 { 1392 *val = env->vsstatus; 1393 return RISCV_EXCP_NONE; 1394 } 1395 1396 static RISCVException write_vsstatus(CPURISCVState *env, int csrno, 1397 target_ulong val) 1398 { 1399 uint64_t mask = (target_ulong)-1; 1400 if ((val & VSSTATUS64_UXL) == 0) { 1401 mask &= ~VSSTATUS64_UXL; 1402 } 1403 env->vsstatus = (env->vsstatus & ~mask) | (uint64_t)val; 1404 return RISCV_EXCP_NONE; 1405 } 1406 1407 static int read_vstvec(CPURISCVState *env, int csrno, target_ulong *val) 1408 { 1409 *val = env->vstvec; 1410 return RISCV_EXCP_NONE; 1411 } 1412 1413 static RISCVException write_vstvec(CPURISCVState *env, int csrno, 1414 target_ulong val) 1415 { 1416 env->vstvec = val; 1417 return RISCV_EXCP_NONE; 1418 } 1419 1420 static RISCVException read_vsscratch(CPURISCVState *env, int csrno, 1421 target_ulong *val) 1422 { 1423 *val = env->vsscratch; 1424 return RISCV_EXCP_NONE; 1425 } 1426 1427 static RISCVException write_vsscratch(CPURISCVState *env, int csrno, 1428 target_ulong val) 1429 { 1430 env->vsscratch = val; 1431 return RISCV_EXCP_NONE; 1432 } 1433 1434 static RISCVException read_vsepc(CPURISCVState *env, int csrno, 1435 target_ulong *val) 1436 { 1437 *val = env->vsepc; 1438 return RISCV_EXCP_NONE; 1439 } 1440 1441 static RISCVException write_vsepc(CPURISCVState *env, int csrno, 1442 target_ulong val) 1443 { 1444 env->vsepc = val; 1445 return RISCV_EXCP_NONE; 1446 } 1447 1448 static RISCVException read_vscause(CPURISCVState *env, int csrno, 1449 target_ulong *val) 1450 { 1451 *val = env->vscause; 1452 return RISCV_EXCP_NONE; 1453 } 1454 1455 static RISCVException write_vscause(CPURISCVState *env, int csrno, 1456 target_ulong val) 1457 { 1458 env->vscause = val; 1459 return RISCV_EXCP_NONE; 1460 } 1461 1462 static RISCVException read_vstval(CPURISCVState *env, int csrno, 1463 target_ulong *val) 1464 { 1465 *val = env->vstval; 1466 return RISCV_EXCP_NONE; 1467 } 1468 1469 static RISCVException write_vstval(CPURISCVState *env, int csrno, 1470 target_ulong val) 1471 { 1472 env->vstval = val; 1473 return RISCV_EXCP_NONE; 1474 } 1475 1476 static RISCVException read_vsatp(CPURISCVState *env, int csrno, 1477 target_ulong *val) 1478 { 1479 *val = env->vsatp; 1480 return RISCV_EXCP_NONE; 1481 } 1482 1483 static RISCVException write_vsatp(CPURISCVState *env, int csrno, 1484 target_ulong val) 1485 { 1486 env->vsatp = val; 1487 return RISCV_EXCP_NONE; 1488 } 1489 1490 static RISCVException read_mtval2(CPURISCVState *env, int csrno, 1491 target_ulong *val) 1492 { 1493 *val = env->mtval2; 1494 return RISCV_EXCP_NONE; 1495 } 1496 1497 static RISCVException write_mtval2(CPURISCVState *env, int csrno, 1498 target_ulong val) 1499 { 1500 env->mtval2 = val; 1501 return RISCV_EXCP_NONE; 1502 } 1503 1504 static RISCVException read_mtinst(CPURISCVState *env, int csrno, 1505 target_ulong *val) 1506 { 1507 *val = env->mtinst; 1508 return RISCV_EXCP_NONE; 1509 } 1510 1511 static RISCVException write_mtinst(CPURISCVState *env, int csrno, 1512 target_ulong val) 1513 { 1514 env->mtinst = val; 1515 return RISCV_EXCP_NONE; 1516 } 1517 1518 /* Physical Memory Protection */ 1519 static RISCVException read_mseccfg(CPURISCVState *env, int csrno, 1520 target_ulong *val) 1521 { 1522 *val = mseccfg_csr_read(env); 1523 return RISCV_EXCP_NONE; 1524 } 1525 1526 static RISCVException write_mseccfg(CPURISCVState *env, int csrno, 1527 target_ulong val) 1528 { 1529 mseccfg_csr_write(env, val); 1530 return RISCV_EXCP_NONE; 1531 } 1532 1533 static bool check_pmp_reg_index(CPURISCVState *env, uint32_t reg_index) 1534 { 1535 /* TODO: RV128 restriction check */ 1536 if ((reg_index & 1) && (riscv_cpu_mxl(env) == MXL_RV64)) { 1537 return false; 1538 } 1539 return true; 1540 } 1541 1542 static RISCVException read_pmpcfg(CPURISCVState *env, int csrno, 1543 target_ulong *val) 1544 { 1545 uint32_t reg_index = csrno - CSR_PMPCFG0; 1546 1547 if (!check_pmp_reg_index(env, reg_index)) { 1548 return RISCV_EXCP_ILLEGAL_INST; 1549 } 1550 *val = pmpcfg_csr_read(env, csrno - CSR_PMPCFG0); 1551 return RISCV_EXCP_NONE; 1552 } 1553 1554 static RISCVException write_pmpcfg(CPURISCVState *env, int csrno, 1555 target_ulong val) 1556 { 1557 uint32_t reg_index = csrno - CSR_PMPCFG0; 1558 1559 if (!check_pmp_reg_index(env, reg_index)) { 1560 return RISCV_EXCP_ILLEGAL_INST; 1561 } 1562 pmpcfg_csr_write(env, csrno - CSR_PMPCFG0, val); 1563 return RISCV_EXCP_NONE; 1564 } 1565 1566 static RISCVException read_pmpaddr(CPURISCVState *env, int csrno, 1567 target_ulong *val) 1568 { 1569 *val = pmpaddr_csr_read(env, csrno - CSR_PMPADDR0); 1570 return RISCV_EXCP_NONE; 1571 } 1572 1573 static RISCVException write_pmpaddr(CPURISCVState *env, int csrno, 1574 target_ulong val) 1575 { 1576 pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val); 1577 return RISCV_EXCP_NONE; 1578 } 1579 1580 /* 1581 * Functions to access Pointer Masking feature registers 1582 * We have to check if current priv lvl could modify 1583 * csr in given mode 1584 */ 1585 static bool check_pm_current_disabled(CPURISCVState *env, int csrno) 1586 { 1587 int csr_priv = get_field(csrno, 0x300); 1588 int pm_current; 1589 1590 if (env->debugger) { 1591 return false; 1592 } 1593 /* 1594 * If priv lvls differ that means we're accessing csr from higher priv lvl, 1595 * so allow the access 1596 */ 1597 if (env->priv != csr_priv) { 1598 return false; 1599 } 1600 switch (env->priv) { 1601 case PRV_M: 1602 pm_current = get_field(env->mmte, M_PM_CURRENT); 1603 break; 1604 case PRV_S: 1605 pm_current = get_field(env->mmte, S_PM_CURRENT); 1606 break; 1607 case PRV_U: 1608 pm_current = get_field(env->mmte, U_PM_CURRENT); 1609 break; 1610 default: 1611 g_assert_not_reached(); 1612 } 1613 /* It's same priv lvl, so we allow to modify csr only if pm.current==1 */ 1614 return !pm_current; 1615 } 1616 1617 static RISCVException read_mmte(CPURISCVState *env, int csrno, 1618 target_ulong *val) 1619 { 1620 *val = env->mmte & MMTE_MASK; 1621 return RISCV_EXCP_NONE; 1622 } 1623 1624 static RISCVException write_mmte(CPURISCVState *env, int csrno, 1625 target_ulong val) 1626 { 1627 uint64_t mstatus; 1628 target_ulong wpri_val = val & MMTE_MASK; 1629 1630 if (val != wpri_val) { 1631 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n", 1632 "MMTE: WPRI violation written 0x", val, 1633 "vs expected 0x", wpri_val); 1634 } 1635 /* for machine mode pm.current is hardwired to 1 */ 1636 wpri_val |= MMTE_M_PM_CURRENT; 1637 1638 /* hardwiring pm.instruction bit to 0, since it's not supported yet */ 1639 wpri_val &= ~(MMTE_M_PM_INSN | MMTE_S_PM_INSN | MMTE_U_PM_INSN); 1640 env->mmte = wpri_val | PM_EXT_DIRTY; 1641 riscv_cpu_update_mask(env); 1642 1643 /* Set XS and SD bits, since PM CSRs are dirty */ 1644 mstatus = env->mstatus | MSTATUS_XS; 1645 write_mstatus(env, csrno, mstatus); 1646 return RISCV_EXCP_NONE; 1647 } 1648 1649 static RISCVException read_smte(CPURISCVState *env, int csrno, 1650 target_ulong *val) 1651 { 1652 *val = env->mmte & SMTE_MASK; 1653 return RISCV_EXCP_NONE; 1654 } 1655 1656 static RISCVException write_smte(CPURISCVState *env, int csrno, 1657 target_ulong val) 1658 { 1659 target_ulong wpri_val = val & SMTE_MASK; 1660 1661 if (val != wpri_val) { 1662 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n", 1663 "SMTE: WPRI violation written 0x", val, 1664 "vs expected 0x", wpri_val); 1665 } 1666 1667 /* if pm.current==0 we can't modify current PM CSRs */ 1668 if (check_pm_current_disabled(env, csrno)) { 1669 return RISCV_EXCP_NONE; 1670 } 1671 1672 wpri_val |= (env->mmte & ~SMTE_MASK); 1673 write_mmte(env, csrno, wpri_val); 1674 return RISCV_EXCP_NONE; 1675 } 1676 1677 static RISCVException read_umte(CPURISCVState *env, int csrno, 1678 target_ulong *val) 1679 { 1680 *val = env->mmte & UMTE_MASK; 1681 return RISCV_EXCP_NONE; 1682 } 1683 1684 static RISCVException write_umte(CPURISCVState *env, int csrno, 1685 target_ulong val) 1686 { 1687 target_ulong wpri_val = val & UMTE_MASK; 1688 1689 if (val != wpri_val) { 1690 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n", 1691 "UMTE: WPRI violation written 0x", val, 1692 "vs expected 0x", wpri_val); 1693 } 1694 1695 if (check_pm_current_disabled(env, csrno)) { 1696 return RISCV_EXCP_NONE; 1697 } 1698 1699 wpri_val |= (env->mmte & ~UMTE_MASK); 1700 write_mmte(env, csrno, wpri_val); 1701 return RISCV_EXCP_NONE; 1702 } 1703 1704 static RISCVException read_mpmmask(CPURISCVState *env, int csrno, 1705 target_ulong *val) 1706 { 1707 *val = env->mpmmask; 1708 return RISCV_EXCP_NONE; 1709 } 1710 1711 static RISCVException write_mpmmask(CPURISCVState *env, int csrno, 1712 target_ulong val) 1713 { 1714 uint64_t mstatus; 1715 1716 env->mpmmask = val; 1717 if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) { 1718 env->cur_pmmask = val; 1719 } 1720 env->mmte |= PM_EXT_DIRTY; 1721 1722 /* Set XS and SD bits, since PM CSRs are dirty */ 1723 mstatus = env->mstatus | MSTATUS_XS; 1724 write_mstatus(env, csrno, mstatus); 1725 return RISCV_EXCP_NONE; 1726 } 1727 1728 static RISCVException read_spmmask(CPURISCVState *env, int csrno, 1729 target_ulong *val) 1730 { 1731 *val = env->spmmask; 1732 return RISCV_EXCP_NONE; 1733 } 1734 1735 static RISCVException write_spmmask(CPURISCVState *env, int csrno, 1736 target_ulong val) 1737 { 1738 uint64_t mstatus; 1739 1740 /* if pm.current==0 we can't modify current PM CSRs */ 1741 if (check_pm_current_disabled(env, csrno)) { 1742 return RISCV_EXCP_NONE; 1743 } 1744 env->spmmask = val; 1745 if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) { 1746 env->cur_pmmask = val; 1747 } 1748 env->mmte |= PM_EXT_DIRTY; 1749 1750 /* Set XS and SD bits, since PM CSRs are dirty */ 1751 mstatus = env->mstatus | MSTATUS_XS; 1752 write_mstatus(env, csrno, mstatus); 1753 return RISCV_EXCP_NONE; 1754 } 1755 1756 static RISCVException read_upmmask(CPURISCVState *env, int csrno, 1757 target_ulong *val) 1758 { 1759 *val = env->upmmask; 1760 return RISCV_EXCP_NONE; 1761 } 1762 1763 static RISCVException write_upmmask(CPURISCVState *env, int csrno, 1764 target_ulong val) 1765 { 1766 uint64_t mstatus; 1767 1768 /* if pm.current==0 we can't modify current PM CSRs */ 1769 if (check_pm_current_disabled(env, csrno)) { 1770 return RISCV_EXCP_NONE; 1771 } 1772 env->upmmask = val; 1773 if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) { 1774 env->cur_pmmask = val; 1775 } 1776 env->mmte |= PM_EXT_DIRTY; 1777 1778 /* Set XS and SD bits, since PM CSRs are dirty */ 1779 mstatus = env->mstatus | MSTATUS_XS; 1780 write_mstatus(env, csrno, mstatus); 1781 return RISCV_EXCP_NONE; 1782 } 1783 1784 static RISCVException read_mpmbase(CPURISCVState *env, int csrno, 1785 target_ulong *val) 1786 { 1787 *val = env->mpmbase; 1788 return RISCV_EXCP_NONE; 1789 } 1790 1791 static RISCVException write_mpmbase(CPURISCVState *env, int csrno, 1792 target_ulong val) 1793 { 1794 uint64_t mstatus; 1795 1796 env->mpmbase = val; 1797 if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) { 1798 env->cur_pmbase = val; 1799 } 1800 env->mmte |= PM_EXT_DIRTY; 1801 1802 /* Set XS and SD bits, since PM CSRs are dirty */ 1803 mstatus = env->mstatus | MSTATUS_XS; 1804 write_mstatus(env, csrno, mstatus); 1805 return RISCV_EXCP_NONE; 1806 } 1807 1808 static RISCVException read_spmbase(CPURISCVState *env, int csrno, 1809 target_ulong *val) 1810 { 1811 *val = env->spmbase; 1812 return RISCV_EXCP_NONE; 1813 } 1814 1815 static RISCVException write_spmbase(CPURISCVState *env, int csrno, 1816 target_ulong val) 1817 { 1818 uint64_t mstatus; 1819 1820 /* if pm.current==0 we can't modify current PM CSRs */ 1821 if (check_pm_current_disabled(env, csrno)) { 1822 return RISCV_EXCP_NONE; 1823 } 1824 env->spmbase = val; 1825 if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) { 1826 env->cur_pmbase = val; 1827 } 1828 env->mmte |= PM_EXT_DIRTY; 1829 1830 /* Set XS and SD bits, since PM CSRs are dirty */ 1831 mstatus = env->mstatus | MSTATUS_XS; 1832 write_mstatus(env, csrno, mstatus); 1833 return RISCV_EXCP_NONE; 1834 } 1835 1836 static RISCVException read_upmbase(CPURISCVState *env, int csrno, 1837 target_ulong *val) 1838 { 1839 *val = env->upmbase; 1840 return RISCV_EXCP_NONE; 1841 } 1842 1843 static RISCVException write_upmbase(CPURISCVState *env, int csrno, 1844 target_ulong val) 1845 { 1846 uint64_t mstatus; 1847 1848 /* if pm.current==0 we can't modify current PM CSRs */ 1849 if (check_pm_current_disabled(env, csrno)) { 1850 return RISCV_EXCP_NONE; 1851 } 1852 env->upmbase = val; 1853 if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) { 1854 env->cur_pmbase = val; 1855 } 1856 env->mmte |= PM_EXT_DIRTY; 1857 1858 /* Set XS and SD bits, since PM CSRs are dirty */ 1859 mstatus = env->mstatus | MSTATUS_XS; 1860 write_mstatus(env, csrno, mstatus); 1861 return RISCV_EXCP_NONE; 1862 } 1863 1864 #endif 1865 1866 /* 1867 * riscv_csrrw - read and/or update control and status register 1868 * 1869 * csrr <-> riscv_csrrw(env, csrno, ret_value, 0, 0); 1870 * csrrw <-> riscv_csrrw(env, csrno, ret_value, value, -1); 1871 * csrrs <-> riscv_csrrw(env, csrno, ret_value, -1, value); 1872 * csrrc <-> riscv_csrrw(env, csrno, ret_value, 0, value); 1873 */ 1874 1875 static inline RISCVException riscv_csrrw_check(CPURISCVState *env, 1876 int csrno, 1877 bool write_mask, 1878 RISCVCPU *cpu) 1879 { 1880 /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */ 1881 int read_only = get_field(csrno, 0xC00) == 3; 1882 #if !defined(CONFIG_USER_ONLY) 1883 int effective_priv = env->priv; 1884 1885 if (riscv_has_ext(env, RVH) && 1886 env->priv == PRV_S && 1887 !riscv_cpu_virt_enabled(env)) { 1888 /* 1889 * We are in S mode without virtualisation, therefore we are in HS Mode. 1890 * Add 1 to the effective privledge level to allow us to access the 1891 * Hypervisor CSRs. 1892 */ 1893 effective_priv++; 1894 } 1895 1896 if (!env->debugger && (effective_priv < get_field(csrno, 0x300))) { 1897 return RISCV_EXCP_ILLEGAL_INST; 1898 } 1899 #endif 1900 if (write_mask && read_only) { 1901 return RISCV_EXCP_ILLEGAL_INST; 1902 } 1903 1904 /* ensure the CSR extension is enabled. */ 1905 if (!cpu->cfg.ext_icsr) { 1906 return RISCV_EXCP_ILLEGAL_INST; 1907 } 1908 1909 /* check predicate */ 1910 if (!csr_ops[csrno].predicate) { 1911 return RISCV_EXCP_ILLEGAL_INST; 1912 } 1913 1914 return csr_ops[csrno].predicate(env, csrno); 1915 } 1916 1917 static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno, 1918 target_ulong *ret_value, 1919 target_ulong new_value, 1920 target_ulong write_mask) 1921 { 1922 RISCVException ret; 1923 target_ulong old_value; 1924 1925 /* execute combined read/write operation if it exists */ 1926 if (csr_ops[csrno].op) { 1927 return csr_ops[csrno].op(env, csrno, ret_value, new_value, write_mask); 1928 } 1929 1930 /* if no accessor exists then return failure */ 1931 if (!csr_ops[csrno].read) { 1932 return RISCV_EXCP_ILLEGAL_INST; 1933 } 1934 /* read old value */ 1935 ret = csr_ops[csrno].read(env, csrno, &old_value); 1936 if (ret != RISCV_EXCP_NONE) { 1937 return ret; 1938 } 1939 1940 /* write value if writable and write mask set, otherwise drop writes */ 1941 if (write_mask) { 1942 new_value = (old_value & ~write_mask) | (new_value & write_mask); 1943 if (csr_ops[csrno].write) { 1944 ret = csr_ops[csrno].write(env, csrno, new_value); 1945 if (ret != RISCV_EXCP_NONE) { 1946 return ret; 1947 } 1948 } 1949 } 1950 1951 /* return old value */ 1952 if (ret_value) { 1953 *ret_value = old_value; 1954 } 1955 1956 return RISCV_EXCP_NONE; 1957 } 1958 1959 RISCVException riscv_csrrw(CPURISCVState *env, int csrno, 1960 target_ulong *ret_value, 1961 target_ulong new_value, target_ulong write_mask) 1962 { 1963 RISCVCPU *cpu = env_archcpu(env); 1964 1965 RISCVException ret = riscv_csrrw_check(env, csrno, write_mask, cpu); 1966 if (ret != RISCV_EXCP_NONE) { 1967 return ret; 1968 } 1969 1970 return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask); 1971 } 1972 1973 static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno, 1974 Int128 *ret_value, 1975 Int128 new_value, 1976 Int128 write_mask) 1977 { 1978 RISCVException ret; 1979 Int128 old_value; 1980 1981 /* read old value */ 1982 ret = csr_ops[csrno].read128(env, csrno, &old_value); 1983 if (ret != RISCV_EXCP_NONE) { 1984 return ret; 1985 } 1986 1987 /* write value if writable and write mask set, otherwise drop writes */ 1988 if (int128_nz(write_mask)) { 1989 new_value = int128_or(int128_and(old_value, int128_not(write_mask)), 1990 int128_and(new_value, write_mask)); 1991 if (csr_ops[csrno].write128) { 1992 ret = csr_ops[csrno].write128(env, csrno, new_value); 1993 if (ret != RISCV_EXCP_NONE) { 1994 return ret; 1995 } 1996 } else if (csr_ops[csrno].write) { 1997 /* avoids having to write wrappers for all registers */ 1998 ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value)); 1999 if (ret != RISCV_EXCP_NONE) { 2000 return ret; 2001 } 2002 } 2003 } 2004 2005 /* return old value */ 2006 if (ret_value) { 2007 *ret_value = old_value; 2008 } 2009 2010 return RISCV_EXCP_NONE; 2011 } 2012 2013 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno, 2014 Int128 *ret_value, 2015 Int128 new_value, Int128 write_mask) 2016 { 2017 RISCVException ret; 2018 RISCVCPU *cpu = env_archcpu(env); 2019 2020 ret = riscv_csrrw_check(env, csrno, int128_nz(write_mask), cpu); 2021 if (ret != RISCV_EXCP_NONE) { 2022 return ret; 2023 } 2024 2025 if (csr_ops[csrno].read128) { 2026 return riscv_csrrw_do128(env, csrno, ret_value, new_value, write_mask); 2027 } 2028 2029 /* 2030 * Fall back to 64-bit version for now, if the 128-bit alternative isn't 2031 * at all defined. 2032 * Note, some CSRs don't need to extend to MXLEN (64 upper bits non 2033 * significant), for those, this fallback is correctly handling the accesses 2034 */ 2035 target_ulong old_value; 2036 ret = riscv_csrrw_do64(env, csrno, &old_value, 2037 int128_getlo(new_value), 2038 int128_getlo(write_mask)); 2039 if (ret == RISCV_EXCP_NONE && ret_value) { 2040 *ret_value = int128_make64(old_value); 2041 } 2042 return ret; 2043 } 2044 2045 /* 2046 * Debugger support. If not in user mode, set env->debugger before the 2047 * riscv_csrrw call and clear it after the call. 2048 */ 2049 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno, 2050 target_ulong *ret_value, 2051 target_ulong new_value, 2052 target_ulong write_mask) 2053 { 2054 RISCVException ret; 2055 #if !defined(CONFIG_USER_ONLY) 2056 env->debugger = true; 2057 #endif 2058 ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask); 2059 #if !defined(CONFIG_USER_ONLY) 2060 env->debugger = false; 2061 #endif 2062 return ret; 2063 } 2064 2065 /* Control and Status Register function table */ 2066 riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { 2067 /* User Floating-Point CSRs */ 2068 [CSR_FFLAGS] = { "fflags", fs, read_fflags, write_fflags }, 2069 [CSR_FRM] = { "frm", fs, read_frm, write_frm }, 2070 [CSR_FCSR] = { "fcsr", fs, read_fcsr, write_fcsr }, 2071 /* Vector CSRs */ 2072 [CSR_VSTART] = { "vstart", vs, read_vstart, write_vstart }, 2073 [CSR_VXSAT] = { "vxsat", vs, read_vxsat, write_vxsat }, 2074 [CSR_VXRM] = { "vxrm", vs, read_vxrm, write_vxrm }, 2075 [CSR_VCSR] = { "vcsr", vs, read_vcsr, write_vcsr }, 2076 [CSR_VL] = { "vl", vs, read_vl }, 2077 [CSR_VTYPE] = { "vtype", vs, read_vtype }, 2078 [CSR_VLENB] = { "vlenb", vs, read_vlenb }, 2079 /* User Timers and Counters */ 2080 [CSR_CYCLE] = { "cycle", ctr, read_instret }, 2081 [CSR_INSTRET] = { "instret", ctr, read_instret }, 2082 [CSR_CYCLEH] = { "cycleh", ctr32, read_instreth }, 2083 [CSR_INSTRETH] = { "instreth", ctr32, read_instreth }, 2084 2085 /* 2086 * In privileged mode, the monitor will have to emulate TIME CSRs only if 2087 * rdtime callback is not provided by machine/platform emulation. 2088 */ 2089 [CSR_TIME] = { "time", ctr, read_time }, 2090 [CSR_TIMEH] = { "timeh", ctr32, read_timeh }, 2091 2092 #if !defined(CONFIG_USER_ONLY) 2093 /* Machine Timers and Counters */ 2094 [CSR_MCYCLE] = { "mcycle", any, read_instret }, 2095 [CSR_MINSTRET] = { "minstret", any, read_instret }, 2096 [CSR_MCYCLEH] = { "mcycleh", any32, read_instreth }, 2097 [CSR_MINSTRETH] = { "minstreth", any32, read_instreth }, 2098 2099 /* Machine Information Registers */ 2100 [CSR_MVENDORID] = { "mvendorid", any, read_zero }, 2101 [CSR_MARCHID] = { "marchid", any, read_zero }, 2102 [CSR_MIMPID] = { "mimpid", any, read_zero }, 2103 [CSR_MHARTID] = { "mhartid", any, read_mhartid }, 2104 2105 /* Machine Trap Setup */ 2106 [CSR_MSTATUS] = { "mstatus", any, read_mstatus, write_mstatus, NULL, 2107 read_mstatus_i128 }, 2108 [CSR_MISA] = { "misa", any, read_misa, write_misa, NULL, 2109 read_misa_i128 }, 2110 [CSR_MIDELEG] = { "mideleg", any, read_mideleg, write_mideleg }, 2111 [CSR_MEDELEG] = { "medeleg", any, read_medeleg, write_medeleg }, 2112 [CSR_MIE] = { "mie", any, read_mie, write_mie }, 2113 [CSR_MTVEC] = { "mtvec", any, read_mtvec, write_mtvec }, 2114 [CSR_MCOUNTEREN] = { "mcounteren", any, read_mcounteren, write_mcounteren }, 2115 2116 [CSR_MSTATUSH] = { "mstatush", any32, read_mstatush, write_mstatush }, 2117 2118 /* Machine Trap Handling */ 2119 [CSR_MSCRATCH] = { "mscratch", any, read_mscratch, write_mscratch, NULL, 2120 read_mscratch_i128, write_mscratch_i128 }, 2121 [CSR_MEPC] = { "mepc", any, read_mepc, write_mepc }, 2122 [CSR_MCAUSE] = { "mcause", any, read_mcause, write_mcause }, 2123 [CSR_MTVAL] = { "mtval", any, read_mtval, write_mtval }, 2124 [CSR_MIP] = { "mip", any, NULL, NULL, rmw_mip }, 2125 2126 /* Supervisor Trap Setup */ 2127 [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus, NULL, 2128 read_sstatus_i128 }, 2129 [CSR_SIE] = { "sie", smode, read_sie, write_sie }, 2130 [CSR_STVEC] = { "stvec", smode, read_stvec, write_stvec }, 2131 [CSR_SCOUNTEREN] = { "scounteren", smode, read_scounteren, write_scounteren }, 2132 2133 /* Supervisor Trap Handling */ 2134 [CSR_SSCRATCH] = { "sscratch", smode, read_sscratch, write_sscratch, NULL, 2135 read_sscratch_i128, write_sscratch_i128 }, 2136 [CSR_SEPC] = { "sepc", smode, read_sepc, write_sepc }, 2137 [CSR_SCAUSE] = { "scause", smode, read_scause, write_scause }, 2138 [CSR_STVAL] = { "stval", smode, read_stval, write_stval }, 2139 [CSR_SIP] = { "sip", smode, NULL, NULL, rmw_sip }, 2140 2141 /* Supervisor Protection and Translation */ 2142 [CSR_SATP] = { "satp", smode, read_satp, write_satp }, 2143 2144 [CSR_HSTATUS] = { "hstatus", hmode, read_hstatus, write_hstatus }, 2145 [CSR_HEDELEG] = { "hedeleg", hmode, read_hedeleg, write_hedeleg }, 2146 [CSR_HIDELEG] = { "hideleg", hmode, read_hideleg, write_hideleg }, 2147 [CSR_HVIP] = { "hvip", hmode, NULL, NULL, rmw_hvip }, 2148 [CSR_HIP] = { "hip", hmode, NULL, NULL, rmw_hip }, 2149 [CSR_HIE] = { "hie", hmode, read_hie, write_hie }, 2150 [CSR_HCOUNTEREN] = { "hcounteren", hmode, read_hcounteren, write_hcounteren }, 2151 [CSR_HGEIE] = { "hgeie", hmode, read_zero, write_hgeie }, 2152 [CSR_HTVAL] = { "htval", hmode, read_htval, write_htval }, 2153 [CSR_HTINST] = { "htinst", hmode, read_htinst, write_htinst }, 2154 [CSR_HGEIP] = { "hgeip", hmode, read_zero, write_hgeip }, 2155 [CSR_HGATP] = { "hgatp", hmode, read_hgatp, write_hgatp }, 2156 [CSR_HTIMEDELTA] = { "htimedelta", hmode, read_htimedelta, write_htimedelta }, 2157 [CSR_HTIMEDELTAH] = { "htimedeltah", hmode32, read_htimedeltah, write_htimedeltah }, 2158 2159 [CSR_VSSTATUS] = { "vsstatus", hmode, read_vsstatus, write_vsstatus }, 2160 [CSR_VSIP] = { "vsip", hmode, NULL, NULL, rmw_vsip }, 2161 [CSR_VSIE] = { "vsie", hmode, read_vsie, write_vsie }, 2162 [CSR_VSTVEC] = { "vstvec", hmode, read_vstvec, write_vstvec }, 2163 [CSR_VSSCRATCH] = { "vsscratch", hmode, read_vsscratch, write_vsscratch }, 2164 [CSR_VSEPC] = { "vsepc", hmode, read_vsepc, write_vsepc }, 2165 [CSR_VSCAUSE] = { "vscause", hmode, read_vscause, write_vscause }, 2166 [CSR_VSTVAL] = { "vstval", hmode, read_vstval, write_vstval }, 2167 [CSR_VSATP] = { "vsatp", hmode, read_vsatp, write_vsatp }, 2168 2169 [CSR_MTVAL2] = { "mtval2", hmode, read_mtval2, write_mtval2 }, 2170 [CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst }, 2171 2172 /* Physical Memory Protection */ 2173 [CSR_MSECCFG] = { "mseccfg", epmp, read_mseccfg, write_mseccfg }, 2174 [CSR_PMPCFG0] = { "pmpcfg0", pmp, read_pmpcfg, write_pmpcfg }, 2175 [CSR_PMPCFG1] = { "pmpcfg1", pmp, read_pmpcfg, write_pmpcfg }, 2176 [CSR_PMPCFG2] = { "pmpcfg2", pmp, read_pmpcfg, write_pmpcfg }, 2177 [CSR_PMPCFG3] = { "pmpcfg3", pmp, read_pmpcfg, write_pmpcfg }, 2178 [CSR_PMPADDR0] = { "pmpaddr0", pmp, read_pmpaddr, write_pmpaddr }, 2179 [CSR_PMPADDR1] = { "pmpaddr1", pmp, read_pmpaddr, write_pmpaddr }, 2180 [CSR_PMPADDR2] = { "pmpaddr2", pmp, read_pmpaddr, write_pmpaddr }, 2181 [CSR_PMPADDR3] = { "pmpaddr3", pmp, read_pmpaddr, write_pmpaddr }, 2182 [CSR_PMPADDR4] = { "pmpaddr4", pmp, read_pmpaddr, write_pmpaddr }, 2183 [CSR_PMPADDR5] = { "pmpaddr5", pmp, read_pmpaddr, write_pmpaddr }, 2184 [CSR_PMPADDR6] = { "pmpaddr6", pmp, read_pmpaddr, write_pmpaddr }, 2185 [CSR_PMPADDR7] = { "pmpaddr7", pmp, read_pmpaddr, write_pmpaddr }, 2186 [CSR_PMPADDR8] = { "pmpaddr8", pmp, read_pmpaddr, write_pmpaddr }, 2187 [CSR_PMPADDR9] = { "pmpaddr9", pmp, read_pmpaddr, write_pmpaddr }, 2188 [CSR_PMPADDR10] = { "pmpaddr10", pmp, read_pmpaddr, write_pmpaddr }, 2189 [CSR_PMPADDR11] = { "pmpaddr11", pmp, read_pmpaddr, write_pmpaddr }, 2190 [CSR_PMPADDR12] = { "pmpaddr12", pmp, read_pmpaddr, write_pmpaddr }, 2191 [CSR_PMPADDR13] = { "pmpaddr13", pmp, read_pmpaddr, write_pmpaddr }, 2192 [CSR_PMPADDR14] = { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr }, 2193 [CSR_PMPADDR15] = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr }, 2194 2195 /* User Pointer Masking */ 2196 [CSR_UMTE] = { "umte", pointer_masking, read_umte, write_umte }, 2197 [CSR_UPMMASK] = { "upmmask", pointer_masking, read_upmmask, write_upmmask }, 2198 [CSR_UPMBASE] = { "upmbase", pointer_masking, read_upmbase, write_upmbase }, 2199 /* Machine Pointer Masking */ 2200 [CSR_MMTE] = { "mmte", pointer_masking, read_mmte, write_mmte }, 2201 [CSR_MPMMASK] = { "mpmmask", pointer_masking, read_mpmmask, write_mpmmask }, 2202 [CSR_MPMBASE] = { "mpmbase", pointer_masking, read_mpmbase, write_mpmbase }, 2203 /* Supervisor Pointer Masking */ 2204 [CSR_SMTE] = { "smte", pointer_masking, read_smte, write_smte }, 2205 [CSR_SPMMASK] = { "spmmask", pointer_masking, read_spmmask, write_spmmask }, 2206 [CSR_SPMBASE] = { "spmbase", pointer_masking, read_spmbase, write_spmbase }, 2207 2208 /* Performance Counters */ 2209 [CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_zero }, 2210 [CSR_HPMCOUNTER4] = { "hpmcounter4", ctr, read_zero }, 2211 [CSR_HPMCOUNTER5] = { "hpmcounter5", ctr, read_zero }, 2212 [CSR_HPMCOUNTER6] = { "hpmcounter6", ctr, read_zero }, 2213 [CSR_HPMCOUNTER7] = { "hpmcounter7", ctr, read_zero }, 2214 [CSR_HPMCOUNTER8] = { "hpmcounter8", ctr, read_zero }, 2215 [CSR_HPMCOUNTER9] = { "hpmcounter9", ctr, read_zero }, 2216 [CSR_HPMCOUNTER10] = { "hpmcounter10", ctr, read_zero }, 2217 [CSR_HPMCOUNTER11] = { "hpmcounter11", ctr, read_zero }, 2218 [CSR_HPMCOUNTER12] = { "hpmcounter12", ctr, read_zero }, 2219 [CSR_HPMCOUNTER13] = { "hpmcounter13", ctr, read_zero }, 2220 [CSR_HPMCOUNTER14] = { "hpmcounter14", ctr, read_zero }, 2221 [CSR_HPMCOUNTER15] = { "hpmcounter15", ctr, read_zero }, 2222 [CSR_HPMCOUNTER16] = { "hpmcounter16", ctr, read_zero }, 2223 [CSR_HPMCOUNTER17] = { "hpmcounter17", ctr, read_zero }, 2224 [CSR_HPMCOUNTER18] = { "hpmcounter18", ctr, read_zero }, 2225 [CSR_HPMCOUNTER19] = { "hpmcounter19", ctr, read_zero }, 2226 [CSR_HPMCOUNTER20] = { "hpmcounter20", ctr, read_zero }, 2227 [CSR_HPMCOUNTER21] = { "hpmcounter21", ctr, read_zero }, 2228 [CSR_HPMCOUNTER22] = { "hpmcounter22", ctr, read_zero }, 2229 [CSR_HPMCOUNTER23] = { "hpmcounter23", ctr, read_zero }, 2230 [CSR_HPMCOUNTER24] = { "hpmcounter24", ctr, read_zero }, 2231 [CSR_HPMCOUNTER25] = { "hpmcounter25", ctr, read_zero }, 2232 [CSR_HPMCOUNTER26] = { "hpmcounter26", ctr, read_zero }, 2233 [CSR_HPMCOUNTER27] = { "hpmcounter27", ctr, read_zero }, 2234 [CSR_HPMCOUNTER28] = { "hpmcounter28", ctr, read_zero }, 2235 [CSR_HPMCOUNTER29] = { "hpmcounter29", ctr, read_zero }, 2236 [CSR_HPMCOUNTER30] = { "hpmcounter30", ctr, read_zero }, 2237 [CSR_HPMCOUNTER31] = { "hpmcounter31", ctr, read_zero }, 2238 2239 [CSR_MHPMCOUNTER3] = { "mhpmcounter3", any, read_zero }, 2240 [CSR_MHPMCOUNTER4] = { "mhpmcounter4", any, read_zero }, 2241 [CSR_MHPMCOUNTER5] = { "mhpmcounter5", any, read_zero }, 2242 [CSR_MHPMCOUNTER6] = { "mhpmcounter6", any, read_zero }, 2243 [CSR_MHPMCOUNTER7] = { "mhpmcounter7", any, read_zero }, 2244 [CSR_MHPMCOUNTER8] = { "mhpmcounter8", any, read_zero }, 2245 [CSR_MHPMCOUNTER9] = { "mhpmcounter9", any, read_zero }, 2246 [CSR_MHPMCOUNTER10] = { "mhpmcounter10", any, read_zero }, 2247 [CSR_MHPMCOUNTER11] = { "mhpmcounter11", any, read_zero }, 2248 [CSR_MHPMCOUNTER12] = { "mhpmcounter12", any, read_zero }, 2249 [CSR_MHPMCOUNTER13] = { "mhpmcounter13", any, read_zero }, 2250 [CSR_MHPMCOUNTER14] = { "mhpmcounter14", any, read_zero }, 2251 [CSR_MHPMCOUNTER15] = { "mhpmcounter15", any, read_zero }, 2252 [CSR_MHPMCOUNTER16] = { "mhpmcounter16", any, read_zero }, 2253 [CSR_MHPMCOUNTER17] = { "mhpmcounter17", any, read_zero }, 2254 [CSR_MHPMCOUNTER18] = { "mhpmcounter18", any, read_zero }, 2255 [CSR_MHPMCOUNTER19] = { "mhpmcounter19", any, read_zero }, 2256 [CSR_MHPMCOUNTER20] = { "mhpmcounter20", any, read_zero }, 2257 [CSR_MHPMCOUNTER21] = { "mhpmcounter21", any, read_zero }, 2258 [CSR_MHPMCOUNTER22] = { "mhpmcounter22", any, read_zero }, 2259 [CSR_MHPMCOUNTER23] = { "mhpmcounter23", any, read_zero }, 2260 [CSR_MHPMCOUNTER24] = { "mhpmcounter24", any, read_zero }, 2261 [CSR_MHPMCOUNTER25] = { "mhpmcounter25", any, read_zero }, 2262 [CSR_MHPMCOUNTER26] = { "mhpmcounter26", any, read_zero }, 2263 [CSR_MHPMCOUNTER27] = { "mhpmcounter27", any, read_zero }, 2264 [CSR_MHPMCOUNTER28] = { "mhpmcounter28", any, read_zero }, 2265 [CSR_MHPMCOUNTER29] = { "mhpmcounter29", any, read_zero }, 2266 [CSR_MHPMCOUNTER30] = { "mhpmcounter30", any, read_zero }, 2267 [CSR_MHPMCOUNTER31] = { "mhpmcounter31", any, read_zero }, 2268 2269 [CSR_MHPMEVENT3] = { "mhpmevent3", any, read_zero }, 2270 [CSR_MHPMEVENT4] = { "mhpmevent4", any, read_zero }, 2271 [CSR_MHPMEVENT5] = { "mhpmevent5", any, read_zero }, 2272 [CSR_MHPMEVENT6] = { "mhpmevent6", any, read_zero }, 2273 [CSR_MHPMEVENT7] = { "mhpmevent7", any, read_zero }, 2274 [CSR_MHPMEVENT8] = { "mhpmevent8", any, read_zero }, 2275 [CSR_MHPMEVENT9] = { "mhpmevent9", any, read_zero }, 2276 [CSR_MHPMEVENT10] = { "mhpmevent10", any, read_zero }, 2277 [CSR_MHPMEVENT11] = { "mhpmevent11", any, read_zero }, 2278 [CSR_MHPMEVENT12] = { "mhpmevent12", any, read_zero }, 2279 [CSR_MHPMEVENT13] = { "mhpmevent13", any, read_zero }, 2280 [CSR_MHPMEVENT14] = { "mhpmevent14", any, read_zero }, 2281 [CSR_MHPMEVENT15] = { "mhpmevent15", any, read_zero }, 2282 [CSR_MHPMEVENT16] = { "mhpmevent16", any, read_zero }, 2283 [CSR_MHPMEVENT17] = { "mhpmevent17", any, read_zero }, 2284 [CSR_MHPMEVENT18] = { "mhpmevent18", any, read_zero }, 2285 [CSR_MHPMEVENT19] = { "mhpmevent19", any, read_zero }, 2286 [CSR_MHPMEVENT20] = { "mhpmevent20", any, read_zero }, 2287 [CSR_MHPMEVENT21] = { "mhpmevent21", any, read_zero }, 2288 [CSR_MHPMEVENT22] = { "mhpmevent22", any, read_zero }, 2289 [CSR_MHPMEVENT23] = { "mhpmevent23", any, read_zero }, 2290 [CSR_MHPMEVENT24] = { "mhpmevent24", any, read_zero }, 2291 [CSR_MHPMEVENT25] = { "mhpmevent25", any, read_zero }, 2292 [CSR_MHPMEVENT26] = { "mhpmevent26", any, read_zero }, 2293 [CSR_MHPMEVENT27] = { "mhpmevent27", any, read_zero }, 2294 [CSR_MHPMEVENT28] = { "mhpmevent28", any, read_zero }, 2295 [CSR_MHPMEVENT29] = { "mhpmevent29", any, read_zero }, 2296 [CSR_MHPMEVENT30] = { "mhpmevent30", any, read_zero }, 2297 [CSR_MHPMEVENT31] = { "mhpmevent31", any, read_zero }, 2298 2299 [CSR_HPMCOUNTER3H] = { "hpmcounter3h", ctr32, read_zero }, 2300 [CSR_HPMCOUNTER4H] = { "hpmcounter4h", ctr32, read_zero }, 2301 [CSR_HPMCOUNTER5H] = { "hpmcounter5h", ctr32, read_zero }, 2302 [CSR_HPMCOUNTER6H] = { "hpmcounter6h", ctr32, read_zero }, 2303 [CSR_HPMCOUNTER7H] = { "hpmcounter7h", ctr32, read_zero }, 2304 [CSR_HPMCOUNTER8H] = { "hpmcounter8h", ctr32, read_zero }, 2305 [CSR_HPMCOUNTER9H] = { "hpmcounter9h", ctr32, read_zero }, 2306 [CSR_HPMCOUNTER10H] = { "hpmcounter10h", ctr32, read_zero }, 2307 [CSR_HPMCOUNTER11H] = { "hpmcounter11h", ctr32, read_zero }, 2308 [CSR_HPMCOUNTER12H] = { "hpmcounter12h", ctr32, read_zero }, 2309 [CSR_HPMCOUNTER13H] = { "hpmcounter13h", ctr32, read_zero }, 2310 [CSR_HPMCOUNTER14H] = { "hpmcounter14h", ctr32, read_zero }, 2311 [CSR_HPMCOUNTER15H] = { "hpmcounter15h", ctr32, read_zero }, 2312 [CSR_HPMCOUNTER16H] = { "hpmcounter16h", ctr32, read_zero }, 2313 [CSR_HPMCOUNTER17H] = { "hpmcounter17h", ctr32, read_zero }, 2314 [CSR_HPMCOUNTER18H] = { "hpmcounter18h", ctr32, read_zero }, 2315 [CSR_HPMCOUNTER19H] = { "hpmcounter19h", ctr32, read_zero }, 2316 [CSR_HPMCOUNTER20H] = { "hpmcounter20h", ctr32, read_zero }, 2317 [CSR_HPMCOUNTER21H] = { "hpmcounter21h", ctr32, read_zero }, 2318 [CSR_HPMCOUNTER22H] = { "hpmcounter22h", ctr32, read_zero }, 2319 [CSR_HPMCOUNTER23H] = { "hpmcounter23h", ctr32, read_zero }, 2320 [CSR_HPMCOUNTER24H] = { "hpmcounter24h", ctr32, read_zero }, 2321 [CSR_HPMCOUNTER25H] = { "hpmcounter25h", ctr32, read_zero }, 2322 [CSR_HPMCOUNTER26H] = { "hpmcounter26h", ctr32, read_zero }, 2323 [CSR_HPMCOUNTER27H] = { "hpmcounter27h", ctr32, read_zero }, 2324 [CSR_HPMCOUNTER28H] = { "hpmcounter28h", ctr32, read_zero }, 2325 [CSR_HPMCOUNTER29H] = { "hpmcounter29h", ctr32, read_zero }, 2326 [CSR_HPMCOUNTER30H] = { "hpmcounter30h", ctr32, read_zero }, 2327 [CSR_HPMCOUNTER31H] = { "hpmcounter31h", ctr32, read_zero }, 2328 2329 [CSR_MHPMCOUNTER3H] = { "mhpmcounter3h", any32, read_zero }, 2330 [CSR_MHPMCOUNTER4H] = { "mhpmcounter4h", any32, read_zero }, 2331 [CSR_MHPMCOUNTER5H] = { "mhpmcounter5h", any32, read_zero }, 2332 [CSR_MHPMCOUNTER6H] = { "mhpmcounter6h", any32, read_zero }, 2333 [CSR_MHPMCOUNTER7H] = { "mhpmcounter7h", any32, read_zero }, 2334 [CSR_MHPMCOUNTER8H] = { "mhpmcounter8h", any32, read_zero }, 2335 [CSR_MHPMCOUNTER9H] = { "mhpmcounter9h", any32, read_zero }, 2336 [CSR_MHPMCOUNTER10H] = { "mhpmcounter10h", any32, read_zero }, 2337 [CSR_MHPMCOUNTER11H] = { "mhpmcounter11h", any32, read_zero }, 2338 [CSR_MHPMCOUNTER12H] = { "mhpmcounter12h", any32, read_zero }, 2339 [CSR_MHPMCOUNTER13H] = { "mhpmcounter13h", any32, read_zero }, 2340 [CSR_MHPMCOUNTER14H] = { "mhpmcounter14h", any32, read_zero }, 2341 [CSR_MHPMCOUNTER15H] = { "mhpmcounter15h", any32, read_zero }, 2342 [CSR_MHPMCOUNTER16H] = { "mhpmcounter16h", any32, read_zero }, 2343 [CSR_MHPMCOUNTER17H] = { "mhpmcounter17h", any32, read_zero }, 2344 [CSR_MHPMCOUNTER18H] = { "mhpmcounter18h", any32, read_zero }, 2345 [CSR_MHPMCOUNTER19H] = { "mhpmcounter19h", any32, read_zero }, 2346 [CSR_MHPMCOUNTER20H] = { "mhpmcounter20h", any32, read_zero }, 2347 [CSR_MHPMCOUNTER21H] = { "mhpmcounter21h", any32, read_zero }, 2348 [CSR_MHPMCOUNTER22H] = { "mhpmcounter22h", any32, read_zero }, 2349 [CSR_MHPMCOUNTER23H] = { "mhpmcounter23h", any32, read_zero }, 2350 [CSR_MHPMCOUNTER24H] = { "mhpmcounter24h", any32, read_zero }, 2351 [CSR_MHPMCOUNTER25H] = { "mhpmcounter25h", any32, read_zero }, 2352 [CSR_MHPMCOUNTER26H] = { "mhpmcounter26h", any32, read_zero }, 2353 [CSR_MHPMCOUNTER27H] = { "mhpmcounter27h", any32, read_zero }, 2354 [CSR_MHPMCOUNTER28H] = { "mhpmcounter28h", any32, read_zero }, 2355 [CSR_MHPMCOUNTER29H] = { "mhpmcounter29h", any32, read_zero }, 2356 [CSR_MHPMCOUNTER30H] = { "mhpmcounter30h", any32, read_zero }, 2357 [CSR_MHPMCOUNTER31H] = { "mhpmcounter31h", any32, read_zero }, 2358 #endif /* !CONFIG_USER_ONLY */ 2359 }; 2360