1 /* 2 * RISC-V CPU helpers for qemu. 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/log.h" 22 #include "qemu/main-loop.h" 23 #include "cpu.h" 24 #include "pmu.h" 25 #include "exec/exec-all.h" 26 #include "instmap.h" 27 #include "tcg/tcg-op.h" 28 #include "trace.h" 29 #include "semihosting/common-semi.h" 30 #include "sysemu/cpu-timers.h" 31 #include "cpu_bits.h" 32 #include "debug.h" 33 34 int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch) 35 { 36 #ifdef CONFIG_USER_ONLY 37 return 0; 38 #else 39 return env->priv; 40 #endif 41 } 42 43 void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc, 44 target_ulong *cs_base, uint32_t *pflags) 45 { 46 CPUState *cs = env_cpu(env); 47 RISCVCPU *cpu = RISCV_CPU(cs); 48 49 uint32_t flags = 0; 50 51 *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc; 52 *cs_base = 0; 53 54 if (cpu->cfg.ext_zve32f) { 55 /* 56 * If env->vl equals to VLMAX, we can use generic vector operation 57 * expanders (GVEC) to accerlate the vector operations. 58 * However, as LMUL could be a fractional number. The maximum 59 * vector size can be operated might be less than 8 bytes, 60 * which is not supported by GVEC. So we set vl_eq_vlmax flag to true 61 * only when maxsz >= 8 bytes. 62 */ 63 uint32_t vlmax = vext_get_vlmax(cpu, env->vtype); 64 uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW); 65 uint32_t maxsz = vlmax << sew; 66 bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) && 67 (maxsz >= 8); 68 flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill); 69 flags = FIELD_DP32(flags, TB_FLAGS, SEW, sew); 70 flags = FIELD_DP32(flags, TB_FLAGS, LMUL, 71 FIELD_EX64(env->vtype, VTYPE, VLMUL)); 72 flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax); 73 flags = FIELD_DP32(flags, TB_FLAGS, VTA, 74 FIELD_EX64(env->vtype, VTYPE, VTA)); 75 flags = FIELD_DP32(flags, TB_FLAGS, VMA, 76 FIELD_EX64(env->vtype, VTYPE, VMA)); 77 } else { 78 flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1); 79 } 80 81 #ifdef CONFIG_USER_ONLY 82 flags |= TB_FLAGS_MSTATUS_FS; 83 flags |= TB_FLAGS_MSTATUS_VS; 84 #else 85 flags |= cpu_mmu_index(env, 0); 86 if (riscv_cpu_fp_enabled(env)) { 87 flags |= env->mstatus & MSTATUS_FS; 88 } 89 90 if (riscv_cpu_vector_enabled(env)) { 91 flags |= env->mstatus & MSTATUS_VS; 92 } 93 94 if (riscv_has_ext(env, RVH)) { 95 if (env->priv == PRV_M || 96 (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) || 97 (env->priv == PRV_U && !riscv_cpu_virt_enabled(env) && 98 get_field(env->hstatus, HSTATUS_HU))) { 99 flags = FIELD_DP32(flags, TB_FLAGS, HLSX, 1); 100 } 101 102 flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_FS, 103 get_field(env->mstatus_hs, MSTATUS_FS)); 104 105 flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_VS, 106 get_field(env->mstatus_hs, MSTATUS_VS)); 107 } 108 if (cpu->cfg.debug && !icount_enabled()) { 109 flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled); 110 } 111 #endif 112 113 flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl); 114 if (env->cur_pmmask < (env->xl == MXL_RV32 ? UINT32_MAX : UINT64_MAX)) { 115 flags = FIELD_DP32(flags, TB_FLAGS, PM_MASK_ENABLED, 1); 116 } 117 if (env->cur_pmbase != 0) { 118 flags = FIELD_DP32(flags, TB_FLAGS, PM_BASE_ENABLED, 1); 119 } 120 121 *pflags = flags; 122 } 123 124 void riscv_cpu_update_mask(CPURISCVState *env) 125 { 126 target_ulong mask = -1, base = 0; 127 /* 128 * TODO: Current RVJ spec does not specify 129 * how the extension interacts with XLEN. 130 */ 131 #ifndef CONFIG_USER_ONLY 132 if (riscv_has_ext(env, RVJ)) { 133 switch (env->priv) { 134 case PRV_M: 135 if (env->mmte & M_PM_ENABLE) { 136 mask = env->mpmmask; 137 base = env->mpmbase; 138 } 139 break; 140 case PRV_S: 141 if (env->mmte & S_PM_ENABLE) { 142 mask = env->spmmask; 143 base = env->spmbase; 144 } 145 break; 146 case PRV_U: 147 if (env->mmte & U_PM_ENABLE) { 148 mask = env->upmmask; 149 base = env->upmbase; 150 } 151 break; 152 default: 153 g_assert_not_reached(); 154 } 155 } 156 #endif 157 if (env->xl == MXL_RV32) { 158 env->cur_pmmask = mask & UINT32_MAX; 159 env->cur_pmbase = base & UINT32_MAX; 160 } else { 161 env->cur_pmmask = mask; 162 env->cur_pmbase = base; 163 } 164 } 165 166 #ifndef CONFIG_USER_ONLY 167 168 /* 169 * The HS-mode is allowed to configure priority only for the 170 * following VS-mode local interrupts: 171 * 172 * 0 (Reserved interrupt, reads as zero) 173 * 1 Supervisor software interrupt 174 * 4 (Reserved interrupt, reads as zero) 175 * 5 Supervisor timer interrupt 176 * 8 (Reserved interrupt, reads as zero) 177 * 13 (Reserved interrupt) 178 * 14 " 179 * 15 " 180 * 16 " 181 * 17 " 182 * 18 " 183 * 19 " 184 * 20 " 185 * 21 " 186 * 22 " 187 * 23 " 188 */ 189 190 static const int hviprio_index2irq[] = { 191 0, 1, 4, 5, 8, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 }; 192 static const int hviprio_index2rdzero[] = { 193 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 194 195 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero) 196 { 197 if (index < 0 || ARRAY_SIZE(hviprio_index2irq) <= index) { 198 return -EINVAL; 199 } 200 201 if (out_irq) { 202 *out_irq = hviprio_index2irq[index]; 203 } 204 205 if (out_rdzero) { 206 *out_rdzero = hviprio_index2rdzero[index]; 207 } 208 209 return 0; 210 } 211 212 /* 213 * Default priorities of local interrupts are defined in the 214 * RISC-V Advanced Interrupt Architecture specification. 215 * 216 * ---------------------------------------------------------------- 217 * Default | 218 * Priority | Major Interrupt Numbers 219 * ---------------------------------------------------------------- 220 * Highest | 47, 23, 46, 45, 22, 44, 221 * | 43, 21, 42, 41, 20, 40 222 * | 223 * | 11 (0b), 3 (03), 7 (07) 224 * | 9 (09), 1 (01), 5 (05) 225 * | 12 (0c) 226 * | 10 (0a), 2 (02), 6 (06) 227 * | 228 * | 39, 19, 38, 37, 18, 36, 229 * Lowest | 35, 17, 34, 33, 16, 32 230 * ---------------------------------------------------------------- 231 */ 232 static const uint8_t default_iprio[64] = { 233 /* Custom interrupts 48 to 63 */ 234 [63] = IPRIO_MMAXIPRIO, 235 [62] = IPRIO_MMAXIPRIO, 236 [61] = IPRIO_MMAXIPRIO, 237 [60] = IPRIO_MMAXIPRIO, 238 [59] = IPRIO_MMAXIPRIO, 239 [58] = IPRIO_MMAXIPRIO, 240 [57] = IPRIO_MMAXIPRIO, 241 [56] = IPRIO_MMAXIPRIO, 242 [55] = IPRIO_MMAXIPRIO, 243 [54] = IPRIO_MMAXIPRIO, 244 [53] = IPRIO_MMAXIPRIO, 245 [52] = IPRIO_MMAXIPRIO, 246 [51] = IPRIO_MMAXIPRIO, 247 [50] = IPRIO_MMAXIPRIO, 248 [49] = IPRIO_MMAXIPRIO, 249 [48] = IPRIO_MMAXIPRIO, 250 251 /* Custom interrupts 24 to 31 */ 252 [31] = IPRIO_MMAXIPRIO, 253 [30] = IPRIO_MMAXIPRIO, 254 [29] = IPRIO_MMAXIPRIO, 255 [28] = IPRIO_MMAXIPRIO, 256 [27] = IPRIO_MMAXIPRIO, 257 [26] = IPRIO_MMAXIPRIO, 258 [25] = IPRIO_MMAXIPRIO, 259 [24] = IPRIO_MMAXIPRIO, 260 261 [47] = IPRIO_DEFAULT_UPPER, 262 [23] = IPRIO_DEFAULT_UPPER + 1, 263 [46] = IPRIO_DEFAULT_UPPER + 2, 264 [45] = IPRIO_DEFAULT_UPPER + 3, 265 [22] = IPRIO_DEFAULT_UPPER + 4, 266 [44] = IPRIO_DEFAULT_UPPER + 5, 267 268 [43] = IPRIO_DEFAULT_UPPER + 6, 269 [21] = IPRIO_DEFAULT_UPPER + 7, 270 [42] = IPRIO_DEFAULT_UPPER + 8, 271 [41] = IPRIO_DEFAULT_UPPER + 9, 272 [20] = IPRIO_DEFAULT_UPPER + 10, 273 [40] = IPRIO_DEFAULT_UPPER + 11, 274 275 [11] = IPRIO_DEFAULT_M, 276 [3] = IPRIO_DEFAULT_M + 1, 277 [7] = IPRIO_DEFAULT_M + 2, 278 279 [9] = IPRIO_DEFAULT_S, 280 [1] = IPRIO_DEFAULT_S + 1, 281 [5] = IPRIO_DEFAULT_S + 2, 282 283 [12] = IPRIO_DEFAULT_SGEXT, 284 285 [10] = IPRIO_DEFAULT_VS, 286 [2] = IPRIO_DEFAULT_VS + 1, 287 [6] = IPRIO_DEFAULT_VS + 2, 288 289 [39] = IPRIO_DEFAULT_LOWER, 290 [19] = IPRIO_DEFAULT_LOWER + 1, 291 [38] = IPRIO_DEFAULT_LOWER + 2, 292 [37] = IPRIO_DEFAULT_LOWER + 3, 293 [18] = IPRIO_DEFAULT_LOWER + 4, 294 [36] = IPRIO_DEFAULT_LOWER + 5, 295 296 [35] = IPRIO_DEFAULT_LOWER + 6, 297 [17] = IPRIO_DEFAULT_LOWER + 7, 298 [34] = IPRIO_DEFAULT_LOWER + 8, 299 [33] = IPRIO_DEFAULT_LOWER + 9, 300 [16] = IPRIO_DEFAULT_LOWER + 10, 301 [32] = IPRIO_DEFAULT_LOWER + 11, 302 }; 303 304 uint8_t riscv_cpu_default_priority(int irq) 305 { 306 if (irq < 0 || irq > 63) { 307 return IPRIO_MMAXIPRIO; 308 } 309 310 return default_iprio[irq] ? default_iprio[irq] : IPRIO_MMAXIPRIO; 311 }; 312 313 static int riscv_cpu_pending_to_irq(CPURISCVState *env, 314 int extirq, unsigned int extirq_def_prio, 315 uint64_t pending, uint8_t *iprio) 316 { 317 RISCVCPU *cpu = env_archcpu(env); 318 int irq, best_irq = RISCV_EXCP_NONE; 319 unsigned int prio, best_prio = UINT_MAX; 320 321 if (!pending) { 322 return RISCV_EXCP_NONE; 323 } 324 325 irq = ctz64(pending); 326 if (!((extirq == IRQ_M_EXT) ? cpu->cfg.ext_smaia : cpu->cfg.ext_ssaia)) { 327 return irq; 328 } 329 330 pending = pending >> irq; 331 while (pending) { 332 prio = iprio[irq]; 333 if (!prio) { 334 if (irq == extirq) { 335 prio = extirq_def_prio; 336 } else { 337 prio = (riscv_cpu_default_priority(irq) < extirq_def_prio) ? 338 1 : IPRIO_MMAXIPRIO; 339 } 340 } 341 if ((pending & 0x1) && (prio <= best_prio)) { 342 best_irq = irq; 343 best_prio = prio; 344 } 345 irq++; 346 pending = pending >> 1; 347 } 348 349 return best_irq; 350 } 351 352 uint64_t riscv_cpu_all_pending(CPURISCVState *env) 353 { 354 uint32_t gein = get_field(env->hstatus, HSTATUS_VGEIN); 355 uint64_t vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0; 356 uint64_t vstip = (env->vstime_irq) ? MIP_VSTIP : 0; 357 358 return (env->mip | vsgein | vstip) & env->mie; 359 } 360 361 int riscv_cpu_mirq_pending(CPURISCVState *env) 362 { 363 uint64_t irqs = riscv_cpu_all_pending(env) & ~env->mideleg & 364 ~(MIP_SGEIP | MIP_VSSIP | MIP_VSTIP | MIP_VSEIP); 365 366 return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M, 367 irqs, env->miprio); 368 } 369 370 int riscv_cpu_sirq_pending(CPURISCVState *env) 371 { 372 uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg & 373 ~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP); 374 375 return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S, 376 irqs, env->siprio); 377 } 378 379 int riscv_cpu_vsirq_pending(CPURISCVState *env) 380 { 381 uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg & 382 (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP); 383 384 return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S, 385 irqs >> 1, env->hviprio); 386 } 387 388 static int riscv_cpu_local_irq_pending(CPURISCVState *env) 389 { 390 int virq; 391 uint64_t irqs, pending, mie, hsie, vsie; 392 393 /* Determine interrupt enable state of all privilege modes */ 394 if (riscv_cpu_virt_enabled(env)) { 395 mie = 1; 396 hsie = 1; 397 vsie = (env->priv < PRV_S) || 398 (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE)); 399 } else { 400 mie = (env->priv < PRV_M) || 401 (env->priv == PRV_M && get_field(env->mstatus, MSTATUS_MIE)); 402 hsie = (env->priv < PRV_S) || 403 (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE)); 404 vsie = 0; 405 } 406 407 /* Determine all pending interrupts */ 408 pending = riscv_cpu_all_pending(env); 409 410 /* Check M-mode interrupts */ 411 irqs = pending & ~env->mideleg & -mie; 412 if (irqs) { 413 return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M, 414 irqs, env->miprio); 415 } 416 417 /* Check HS-mode interrupts */ 418 irqs = pending & env->mideleg & ~env->hideleg & -hsie; 419 if (irqs) { 420 return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S, 421 irqs, env->siprio); 422 } 423 424 /* Check VS-mode interrupts */ 425 irqs = pending & env->mideleg & env->hideleg & -vsie; 426 if (irqs) { 427 virq = riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S, 428 irqs >> 1, env->hviprio); 429 return (virq <= 0) ? virq : virq + 1; 430 } 431 432 /* Indicate no pending interrupt */ 433 return RISCV_EXCP_NONE; 434 } 435 436 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 437 { 438 if (interrupt_request & CPU_INTERRUPT_HARD) { 439 RISCVCPU *cpu = RISCV_CPU(cs); 440 CPURISCVState *env = &cpu->env; 441 int interruptno = riscv_cpu_local_irq_pending(env); 442 if (interruptno >= 0) { 443 cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno; 444 riscv_cpu_do_interrupt(cs); 445 return true; 446 } 447 } 448 return false; 449 } 450 451 /* Return true is floating point support is currently enabled */ 452 bool riscv_cpu_fp_enabled(CPURISCVState *env) 453 { 454 if (env->mstatus & MSTATUS_FS) { 455 if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_FS)) { 456 return false; 457 } 458 return true; 459 } 460 461 return false; 462 } 463 464 /* Return true is vector support is currently enabled */ 465 bool riscv_cpu_vector_enabled(CPURISCVState *env) 466 { 467 if (env->mstatus & MSTATUS_VS) { 468 if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_VS)) { 469 return false; 470 } 471 return true; 472 } 473 474 return false; 475 } 476 477 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env) 478 { 479 uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM | 480 MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE | 481 MSTATUS64_UXL | MSTATUS_VS; 482 483 if (riscv_has_ext(env, RVF)) { 484 mstatus_mask |= MSTATUS_FS; 485 } 486 bool current_virt = riscv_cpu_virt_enabled(env); 487 488 g_assert(riscv_has_ext(env, RVH)); 489 490 if (current_virt) { 491 /* Current V=1 and we are about to change to V=0 */ 492 env->vsstatus = env->mstatus & mstatus_mask; 493 env->mstatus &= ~mstatus_mask; 494 env->mstatus |= env->mstatus_hs; 495 496 env->vstvec = env->stvec; 497 env->stvec = env->stvec_hs; 498 499 env->vsscratch = env->sscratch; 500 env->sscratch = env->sscratch_hs; 501 502 env->vsepc = env->sepc; 503 env->sepc = env->sepc_hs; 504 505 env->vscause = env->scause; 506 env->scause = env->scause_hs; 507 508 env->vstval = env->stval; 509 env->stval = env->stval_hs; 510 511 env->vsatp = env->satp; 512 env->satp = env->satp_hs; 513 } else { 514 /* Current V=0 and we are about to change to V=1 */ 515 env->mstatus_hs = env->mstatus & mstatus_mask; 516 env->mstatus &= ~mstatus_mask; 517 env->mstatus |= env->vsstatus; 518 519 env->stvec_hs = env->stvec; 520 env->stvec = env->vstvec; 521 522 env->sscratch_hs = env->sscratch; 523 env->sscratch = env->vsscratch; 524 525 env->sepc_hs = env->sepc; 526 env->sepc = env->vsepc; 527 528 env->scause_hs = env->scause; 529 env->scause = env->vscause; 530 531 env->stval_hs = env->stval; 532 env->stval = env->vstval; 533 534 env->satp_hs = env->satp; 535 env->satp = env->vsatp; 536 } 537 } 538 539 target_ulong riscv_cpu_get_geilen(CPURISCVState *env) 540 { 541 if (!riscv_has_ext(env, RVH)) { 542 return 0; 543 } 544 545 return env->geilen; 546 } 547 548 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen) 549 { 550 if (!riscv_has_ext(env, RVH)) { 551 return; 552 } 553 554 if (geilen > (TARGET_LONG_BITS - 1)) { 555 return; 556 } 557 558 env->geilen = geilen; 559 } 560 561 bool riscv_cpu_virt_enabled(CPURISCVState *env) 562 { 563 if (!riscv_has_ext(env, RVH)) { 564 return false; 565 } 566 567 return get_field(env->virt, VIRT_ONOFF); 568 } 569 570 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable) 571 { 572 if (!riscv_has_ext(env, RVH)) { 573 return; 574 } 575 576 /* Flush the TLB on all virt mode changes. */ 577 if (get_field(env->virt, VIRT_ONOFF) != enable) { 578 tlb_flush(env_cpu(env)); 579 } 580 581 env->virt = set_field(env->virt, VIRT_ONOFF, enable); 582 583 if (enable) { 584 /* 585 * The guest external interrupts from an interrupt controller are 586 * delivered only when the Guest/VM is running (i.e. V=1). This means 587 * any guest external interrupt which is triggered while the Guest/VM 588 * is not running (i.e. V=0) will be missed on QEMU resulting in guest 589 * with sluggish response to serial console input and other I/O events. 590 * 591 * To solve this, we check and inject interrupt after setting V=1. 592 */ 593 riscv_cpu_update_mip(env_archcpu(env), 0, 0); 594 } 595 } 596 597 bool riscv_cpu_two_stage_lookup(int mmu_idx) 598 { 599 return mmu_idx & TB_FLAGS_PRIV_HYP_ACCESS_MASK; 600 } 601 602 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts) 603 { 604 CPURISCVState *env = &cpu->env; 605 if (env->miclaim & interrupts) { 606 return -1; 607 } else { 608 env->miclaim |= interrupts; 609 return 0; 610 } 611 } 612 613 uint64_t riscv_cpu_update_mip(RISCVCPU *cpu, uint64_t mask, uint64_t value) 614 { 615 CPURISCVState *env = &cpu->env; 616 CPUState *cs = CPU(cpu); 617 uint64_t gein, vsgein = 0, vstip = 0, old = env->mip; 618 619 if (riscv_cpu_virt_enabled(env)) { 620 gein = get_field(env->hstatus, HSTATUS_VGEIN); 621 vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0; 622 } 623 624 vstip = env->vstime_irq ? MIP_VSTIP : 0; 625 626 QEMU_IOTHREAD_LOCK_GUARD(); 627 628 env->mip = (env->mip & ~mask) | (value & mask); 629 630 if (env->mip | vsgein | vstip) { 631 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 632 } else { 633 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); 634 } 635 636 return old; 637 } 638 639 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *), 640 void *arg) 641 { 642 env->rdtime_fn = fn; 643 env->rdtime_fn_arg = arg; 644 } 645 646 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv, 647 int (*rmw_fn)(void *arg, 648 target_ulong reg, 649 target_ulong *val, 650 target_ulong new_val, 651 target_ulong write_mask), 652 void *rmw_fn_arg) 653 { 654 if (priv <= PRV_M) { 655 env->aia_ireg_rmw_fn[priv] = rmw_fn; 656 env->aia_ireg_rmw_fn_arg[priv] = rmw_fn_arg; 657 } 658 } 659 660 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv) 661 { 662 if (newpriv > PRV_M) { 663 g_assert_not_reached(); 664 } 665 if (newpriv == PRV_H) { 666 newpriv = PRV_U; 667 } 668 if (icount_enabled() && newpriv != env->priv) { 669 riscv_itrigger_update_priv(env); 670 } 671 /* tlb_flush is unnecessary as mode is contained in mmu_idx */ 672 env->priv = newpriv; 673 env->xl = cpu_recompute_xl(env); 674 riscv_cpu_update_mask(env); 675 676 /* 677 * Clear the load reservation - otherwise a reservation placed in one 678 * context/process can be used by another, resulting in an SC succeeding 679 * incorrectly. Version 2.2 of the ISA specification explicitly requires 680 * this behaviour, while later revisions say that the kernel "should" use 681 * an SC instruction to force the yielding of a load reservation on a 682 * preemptive context switch. As a result, do both. 683 */ 684 env->load_res = -1; 685 } 686 687 /* 688 * get_physical_address_pmp - check PMP permission for this physical address 689 * 690 * Match the PMP region and check permission for this physical address and it's 691 * TLB page. Returns 0 if the permission checking was successful 692 * 693 * @env: CPURISCVState 694 * @prot: The returned protection attributes 695 * @tlb_size: TLB page size containing addr. It could be modified after PMP 696 * permission checking. NULL if not set TLB page for addr. 697 * @addr: The physical address to be checked permission 698 * @access_type: The type of MMU access 699 * @mode: Indicates current privilege level. 700 */ 701 static int get_physical_address_pmp(CPURISCVState *env, int *prot, 702 target_ulong *tlb_size, hwaddr addr, 703 int size, MMUAccessType access_type, 704 int mode) 705 { 706 pmp_priv_t pmp_priv; 707 int pmp_index = -1; 708 709 if (!riscv_cpu_cfg(env)->pmp) { 710 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 711 return TRANSLATE_SUCCESS; 712 } 713 714 pmp_index = pmp_hart_has_privs(env, addr, size, 1 << access_type, 715 &pmp_priv, mode); 716 if (pmp_index < 0) { 717 *prot = 0; 718 return TRANSLATE_PMP_FAIL; 719 } 720 721 *prot = pmp_priv_to_page_prot(pmp_priv); 722 if ((tlb_size != NULL) && pmp_index != MAX_RISCV_PMPS) { 723 target_ulong tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1); 724 target_ulong tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1; 725 726 *tlb_size = pmp_get_tlb_size(env, pmp_index, tlb_sa, tlb_ea); 727 } 728 729 return TRANSLATE_SUCCESS; 730 } 731 732 /* get_physical_address - get the physical address for this virtual address 733 * 734 * Do a page table walk to obtain the physical address corresponding to a 735 * virtual address. Returns 0 if the translation was successful 736 * 737 * Adapted from Spike's mmu_t::translate and mmu_t::walk 738 * 739 * @env: CPURISCVState 740 * @physical: This will be set to the calculated physical address 741 * @prot: The returned protection attributes 742 * @addr: The virtual address to be translated 743 * @fault_pte_addr: If not NULL, this will be set to fault pte address 744 * when a error occurs on pte address translation. 745 * This will already be shifted to match htval. 746 * @access_type: The type of MMU access 747 * @mmu_idx: Indicates current privilege level 748 * @first_stage: Are we in first stage translation? 749 * Second stage is used for hypervisor guest translation 750 * @two_stage: Are we going to perform two stage translation 751 * @is_debug: Is this access from a debugger or the monitor? 752 */ 753 static int get_physical_address(CPURISCVState *env, hwaddr *physical, 754 int *prot, target_ulong addr, 755 target_ulong *fault_pte_addr, 756 int access_type, int mmu_idx, 757 bool first_stage, bool two_stage, 758 bool is_debug) 759 { 760 /* NOTE: the env->pc value visible here will not be 761 * correct, but the value visible to the exception handler 762 * (riscv_cpu_do_interrupt) is correct */ 763 MemTxResult res; 764 MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; 765 int mode = mmu_idx & TB_FLAGS_PRIV_MMU_MASK; 766 bool use_background = false; 767 hwaddr ppn; 768 RISCVCPU *cpu = env_archcpu(env); 769 int napot_bits = 0; 770 target_ulong napot_mask; 771 772 /* 773 * Check if we should use the background registers for the two 774 * stage translation. We don't need to check if we actually need 775 * two stage translation as that happened before this function 776 * was called. Background registers will be used if the guest has 777 * forced a two stage translation to be on (in HS or M mode). 778 */ 779 if (!riscv_cpu_virt_enabled(env) && two_stage) { 780 use_background = true; 781 } 782 783 /* MPRV does not affect the virtual-machine load/store 784 instructions, HLV, HLVX, and HSV. */ 785 if (riscv_cpu_two_stage_lookup(mmu_idx)) { 786 mode = get_field(env->hstatus, HSTATUS_SPVP); 787 } else if (mode == PRV_M && access_type != MMU_INST_FETCH) { 788 if (get_field(env->mstatus, MSTATUS_MPRV)) { 789 mode = get_field(env->mstatus, MSTATUS_MPP); 790 } 791 } 792 793 if (first_stage == false) { 794 /* We are in stage 2 translation, this is similar to stage 1. */ 795 /* Stage 2 is always taken as U-mode */ 796 mode = PRV_U; 797 } 798 799 if (mode == PRV_M || !riscv_cpu_cfg(env)->mmu) { 800 *physical = addr; 801 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 802 return TRANSLATE_SUCCESS; 803 } 804 805 *prot = 0; 806 807 hwaddr base; 808 int levels, ptidxbits, ptesize, vm, sum, mxr, widened; 809 810 if (first_stage == true) { 811 mxr = get_field(env->mstatus, MSTATUS_MXR); 812 } else { 813 mxr = get_field(env->vsstatus, MSTATUS_MXR); 814 } 815 816 if (first_stage == true) { 817 if (use_background) { 818 if (riscv_cpu_mxl(env) == MXL_RV32) { 819 base = (hwaddr)get_field(env->vsatp, SATP32_PPN) << PGSHIFT; 820 vm = get_field(env->vsatp, SATP32_MODE); 821 } else { 822 base = (hwaddr)get_field(env->vsatp, SATP64_PPN) << PGSHIFT; 823 vm = get_field(env->vsatp, SATP64_MODE); 824 } 825 } else { 826 if (riscv_cpu_mxl(env) == MXL_RV32) { 827 base = (hwaddr)get_field(env->satp, SATP32_PPN) << PGSHIFT; 828 vm = get_field(env->satp, SATP32_MODE); 829 } else { 830 base = (hwaddr)get_field(env->satp, SATP64_PPN) << PGSHIFT; 831 vm = get_field(env->satp, SATP64_MODE); 832 } 833 } 834 widened = 0; 835 } else { 836 if (riscv_cpu_mxl(env) == MXL_RV32) { 837 base = (hwaddr)get_field(env->hgatp, SATP32_PPN) << PGSHIFT; 838 vm = get_field(env->hgatp, SATP32_MODE); 839 } else { 840 base = (hwaddr)get_field(env->hgatp, SATP64_PPN) << PGSHIFT; 841 vm = get_field(env->hgatp, SATP64_MODE); 842 } 843 widened = 2; 844 } 845 /* status.SUM will be ignored if execute on background */ 846 sum = get_field(env->mstatus, MSTATUS_SUM) || use_background || is_debug; 847 switch (vm) { 848 case VM_1_10_SV32: 849 levels = 2; ptidxbits = 10; ptesize = 4; break; 850 case VM_1_10_SV39: 851 levels = 3; ptidxbits = 9; ptesize = 8; break; 852 case VM_1_10_SV48: 853 levels = 4; ptidxbits = 9; ptesize = 8; break; 854 case VM_1_10_SV57: 855 levels = 5; ptidxbits = 9; ptesize = 8; break; 856 case VM_1_10_MBARE: 857 *physical = addr; 858 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 859 return TRANSLATE_SUCCESS; 860 default: 861 g_assert_not_reached(); 862 } 863 864 CPUState *cs = env_cpu(env); 865 int va_bits = PGSHIFT + levels * ptidxbits + widened; 866 target_ulong mask, masked_msbs; 867 868 if (TARGET_LONG_BITS > (va_bits - 1)) { 869 mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1; 870 } else { 871 mask = 0; 872 } 873 masked_msbs = (addr >> (va_bits - 1)) & mask; 874 875 if (masked_msbs != 0 && masked_msbs != mask) { 876 return TRANSLATE_FAIL; 877 } 878 879 int ptshift = (levels - 1) * ptidxbits; 880 int i; 881 882 #if !TCG_OVERSIZED_GUEST 883 restart: 884 #endif 885 for (i = 0; i < levels; i++, ptshift -= ptidxbits) { 886 target_ulong idx; 887 if (i == 0) { 888 idx = (addr >> (PGSHIFT + ptshift)) & 889 ((1 << (ptidxbits + widened)) - 1); 890 } else { 891 idx = (addr >> (PGSHIFT + ptshift)) & 892 ((1 << ptidxbits) - 1); 893 } 894 895 /* check that physical address of PTE is legal */ 896 hwaddr pte_addr; 897 898 if (two_stage && first_stage) { 899 int vbase_prot; 900 hwaddr vbase; 901 902 /* Do the second stage translation on the base PTE address. */ 903 int vbase_ret = get_physical_address(env, &vbase, &vbase_prot, 904 base, NULL, MMU_DATA_LOAD, 905 mmu_idx, false, true, 906 is_debug); 907 908 if (vbase_ret != TRANSLATE_SUCCESS) { 909 if (fault_pte_addr) { 910 *fault_pte_addr = (base + idx * ptesize) >> 2; 911 } 912 return TRANSLATE_G_STAGE_FAIL; 913 } 914 915 pte_addr = vbase + idx * ptesize; 916 } else { 917 pte_addr = base + idx * ptesize; 918 } 919 920 int pmp_prot; 921 int pmp_ret = get_physical_address_pmp(env, &pmp_prot, NULL, pte_addr, 922 sizeof(target_ulong), 923 MMU_DATA_LOAD, PRV_S); 924 if (pmp_ret != TRANSLATE_SUCCESS) { 925 return TRANSLATE_PMP_FAIL; 926 } 927 928 target_ulong pte; 929 if (riscv_cpu_mxl(env) == MXL_RV32) { 930 pte = address_space_ldl(cs->as, pte_addr, attrs, &res); 931 } else { 932 pte = address_space_ldq(cs->as, pte_addr, attrs, &res); 933 } 934 935 if (res != MEMTX_OK) { 936 return TRANSLATE_FAIL; 937 } 938 939 bool pbmte = env->menvcfg & MENVCFG_PBMTE; 940 bool hade = env->menvcfg & MENVCFG_HADE; 941 942 if (first_stage && two_stage && riscv_cpu_virt_enabled(env)) { 943 pbmte = pbmte && (env->henvcfg & HENVCFG_PBMTE); 944 hade = hade && (env->henvcfg & HENVCFG_HADE); 945 } 946 947 if (riscv_cpu_sxl(env) == MXL_RV32) { 948 ppn = pte >> PTE_PPN_SHIFT; 949 } else if (pbmte || cpu->cfg.ext_svnapot) { 950 ppn = (pte & (target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT; 951 } else { 952 ppn = pte >> PTE_PPN_SHIFT; 953 if ((pte & ~(target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT) { 954 return TRANSLATE_FAIL; 955 } 956 } 957 958 if (!(pte & PTE_V)) { 959 /* Invalid PTE */ 960 return TRANSLATE_FAIL; 961 } else if (!pbmte && (pte & PTE_PBMT)) { 962 return TRANSLATE_FAIL; 963 } else if (!(pte & (PTE_R | PTE_W | PTE_X))) { 964 /* Inner PTE, continue walking */ 965 if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) { 966 return TRANSLATE_FAIL; 967 } 968 base = ppn << PGSHIFT; 969 } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) { 970 /* Reserved leaf PTE flags: PTE_W */ 971 return TRANSLATE_FAIL; 972 } else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) { 973 /* Reserved leaf PTE flags: PTE_W + PTE_X */ 974 return TRANSLATE_FAIL; 975 } else if ((pte & PTE_U) && ((mode != PRV_U) && 976 (!sum || access_type == MMU_INST_FETCH))) { 977 /* User PTE flags when not U mode and mstatus.SUM is not set, 978 or the access type is an instruction fetch */ 979 return TRANSLATE_FAIL; 980 } else if (!(pte & PTE_U) && (mode != PRV_S)) { 981 /* Supervisor PTE flags when not S mode */ 982 return TRANSLATE_FAIL; 983 } else if (ppn & ((1ULL << ptshift) - 1)) { 984 /* Misaligned PPN */ 985 return TRANSLATE_FAIL; 986 } else if (access_type == MMU_DATA_LOAD && !((pte & PTE_R) || 987 ((pte & PTE_X) && mxr))) { 988 /* Read access check failed */ 989 return TRANSLATE_FAIL; 990 } else if (access_type == MMU_DATA_STORE && !(pte & PTE_W)) { 991 /* Write access check failed */ 992 return TRANSLATE_FAIL; 993 } else if (access_type == MMU_INST_FETCH && !(pte & PTE_X)) { 994 /* Fetch access check failed */ 995 return TRANSLATE_FAIL; 996 } else { 997 /* if necessary, set accessed and dirty bits. */ 998 target_ulong updated_pte = pte | PTE_A | 999 (access_type == MMU_DATA_STORE ? PTE_D : 0); 1000 1001 /* Page table updates need to be atomic with MTTCG enabled */ 1002 if (updated_pte != pte) { 1003 if (!hade) { 1004 return TRANSLATE_FAIL; 1005 } 1006 1007 /* 1008 * - if accessed or dirty bits need updating, and the PTE is 1009 * in RAM, then we do so atomically with a compare and swap. 1010 * - if the PTE is in IO space or ROM, then it can't be updated 1011 * and we return TRANSLATE_FAIL. 1012 * - if the PTE changed by the time we went to update it, then 1013 * it is no longer valid and we must re-walk the page table. 1014 */ 1015 MemoryRegion *mr; 1016 hwaddr l = sizeof(target_ulong), addr1; 1017 mr = address_space_translate(cs->as, pte_addr, 1018 &addr1, &l, false, MEMTXATTRS_UNSPECIFIED); 1019 if (memory_region_is_ram(mr)) { 1020 target_ulong *pte_pa = 1021 qemu_map_ram_ptr(mr->ram_block, addr1); 1022 #if TCG_OVERSIZED_GUEST 1023 /* MTTCG is not enabled on oversized TCG guests so 1024 * page table updates do not need to be atomic */ 1025 *pte_pa = pte = updated_pte; 1026 #else 1027 target_ulong old_pte = 1028 qatomic_cmpxchg(pte_pa, pte, updated_pte); 1029 if (old_pte != pte) { 1030 goto restart; 1031 } else { 1032 pte = updated_pte; 1033 } 1034 #endif 1035 } else { 1036 /* misconfigured PTE in ROM (AD bits are not preset) or 1037 * PTE is in IO space and can't be updated atomically */ 1038 return TRANSLATE_FAIL; 1039 } 1040 } 1041 1042 /* for superpage mappings, make a fake leaf PTE for the TLB's 1043 benefit. */ 1044 target_ulong vpn = addr >> PGSHIFT; 1045 1046 if (cpu->cfg.ext_svnapot && (pte & PTE_N)) { 1047 napot_bits = ctzl(ppn) + 1; 1048 if ((i != (levels - 1)) || (napot_bits != 4)) { 1049 return TRANSLATE_FAIL; 1050 } 1051 } 1052 1053 napot_mask = (1 << napot_bits) - 1; 1054 *physical = (((ppn & ~napot_mask) | (vpn & napot_mask) | 1055 (vpn & (((target_ulong)1 << ptshift) - 1)) 1056 ) << PGSHIFT) | (addr & ~TARGET_PAGE_MASK); 1057 1058 /* set permissions on the TLB entry */ 1059 if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) { 1060 *prot |= PAGE_READ; 1061 } 1062 if ((pte & PTE_X)) { 1063 *prot |= PAGE_EXEC; 1064 } 1065 /* add write permission on stores or if the page is already dirty, 1066 so that we TLB miss on later writes to update the dirty bit */ 1067 if ((pte & PTE_W) && 1068 (access_type == MMU_DATA_STORE || (pte & PTE_D))) { 1069 *prot |= PAGE_WRITE; 1070 } 1071 return TRANSLATE_SUCCESS; 1072 } 1073 } 1074 return TRANSLATE_FAIL; 1075 } 1076 1077 static void raise_mmu_exception(CPURISCVState *env, target_ulong address, 1078 MMUAccessType access_type, bool pmp_violation, 1079 bool first_stage, bool two_stage, 1080 bool two_stage_indirect) 1081 { 1082 CPUState *cs = env_cpu(env); 1083 int page_fault_exceptions, vm; 1084 uint64_t stap_mode; 1085 1086 if (riscv_cpu_mxl(env) == MXL_RV32) { 1087 stap_mode = SATP32_MODE; 1088 } else { 1089 stap_mode = SATP64_MODE; 1090 } 1091 1092 if (first_stage) { 1093 vm = get_field(env->satp, stap_mode); 1094 } else { 1095 vm = get_field(env->hgatp, stap_mode); 1096 } 1097 1098 page_fault_exceptions = vm != VM_1_10_MBARE && !pmp_violation; 1099 1100 switch (access_type) { 1101 case MMU_INST_FETCH: 1102 if (riscv_cpu_virt_enabled(env) && !first_stage) { 1103 cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT; 1104 } else { 1105 cs->exception_index = page_fault_exceptions ? 1106 RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT; 1107 } 1108 break; 1109 case MMU_DATA_LOAD: 1110 if (two_stage && !first_stage) { 1111 cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT; 1112 } else { 1113 cs->exception_index = page_fault_exceptions ? 1114 RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT; 1115 } 1116 break; 1117 case MMU_DATA_STORE: 1118 if (two_stage && !first_stage) { 1119 cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT; 1120 } else { 1121 cs->exception_index = page_fault_exceptions ? 1122 RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT; 1123 } 1124 break; 1125 default: 1126 g_assert_not_reached(); 1127 } 1128 env->badaddr = address; 1129 env->two_stage_lookup = two_stage; 1130 env->two_stage_indirect_lookup = two_stage_indirect; 1131 } 1132 1133 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 1134 { 1135 RISCVCPU *cpu = RISCV_CPU(cs); 1136 CPURISCVState *env = &cpu->env; 1137 hwaddr phys_addr; 1138 int prot; 1139 int mmu_idx = cpu_mmu_index(&cpu->env, false); 1140 1141 if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx, 1142 true, riscv_cpu_virt_enabled(env), true)) { 1143 return -1; 1144 } 1145 1146 if (riscv_cpu_virt_enabled(env)) { 1147 if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL, 1148 0, mmu_idx, false, true, true)) { 1149 return -1; 1150 } 1151 } 1152 1153 return phys_addr & TARGET_PAGE_MASK; 1154 } 1155 1156 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 1157 vaddr addr, unsigned size, 1158 MMUAccessType access_type, 1159 int mmu_idx, MemTxAttrs attrs, 1160 MemTxResult response, uintptr_t retaddr) 1161 { 1162 RISCVCPU *cpu = RISCV_CPU(cs); 1163 CPURISCVState *env = &cpu->env; 1164 1165 if (access_type == MMU_DATA_STORE) { 1166 cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT; 1167 } else if (access_type == MMU_DATA_LOAD) { 1168 cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT; 1169 } else { 1170 cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT; 1171 } 1172 1173 env->badaddr = addr; 1174 env->two_stage_lookup = riscv_cpu_virt_enabled(env) || 1175 riscv_cpu_two_stage_lookup(mmu_idx); 1176 env->two_stage_indirect_lookup = false; 1177 cpu_loop_exit_restore(cs, retaddr); 1178 } 1179 1180 void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 1181 MMUAccessType access_type, int mmu_idx, 1182 uintptr_t retaddr) 1183 { 1184 RISCVCPU *cpu = RISCV_CPU(cs); 1185 CPURISCVState *env = &cpu->env; 1186 switch (access_type) { 1187 case MMU_INST_FETCH: 1188 cs->exception_index = RISCV_EXCP_INST_ADDR_MIS; 1189 break; 1190 case MMU_DATA_LOAD: 1191 cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS; 1192 break; 1193 case MMU_DATA_STORE: 1194 cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS; 1195 break; 1196 default: 1197 g_assert_not_reached(); 1198 } 1199 env->badaddr = addr; 1200 env->two_stage_lookup = riscv_cpu_virt_enabled(env) || 1201 riscv_cpu_two_stage_lookup(mmu_idx); 1202 env->two_stage_indirect_lookup = false; 1203 cpu_loop_exit_restore(cs, retaddr); 1204 } 1205 1206 1207 static void pmu_tlb_fill_incr_ctr(RISCVCPU *cpu, MMUAccessType access_type) 1208 { 1209 enum riscv_pmu_event_idx pmu_event_type; 1210 1211 switch (access_type) { 1212 case MMU_INST_FETCH: 1213 pmu_event_type = RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS; 1214 break; 1215 case MMU_DATA_LOAD: 1216 pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS; 1217 break; 1218 case MMU_DATA_STORE: 1219 pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS; 1220 break; 1221 default: 1222 return; 1223 } 1224 1225 riscv_pmu_incr_ctr(cpu, pmu_event_type); 1226 } 1227 1228 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 1229 MMUAccessType access_type, int mmu_idx, 1230 bool probe, uintptr_t retaddr) 1231 { 1232 RISCVCPU *cpu = RISCV_CPU(cs); 1233 CPURISCVState *env = &cpu->env; 1234 vaddr im_address; 1235 hwaddr pa = 0; 1236 int prot, prot2, prot_pmp; 1237 bool pmp_violation = false; 1238 bool first_stage_error = true; 1239 bool two_stage_lookup = false; 1240 bool two_stage_indirect_error = false; 1241 int ret = TRANSLATE_FAIL; 1242 int mode = mmu_idx; 1243 /* default TLB page size */ 1244 target_ulong tlb_size = TARGET_PAGE_SIZE; 1245 1246 env->guest_phys_fault_addr = 0; 1247 1248 qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n", 1249 __func__, address, access_type, mmu_idx); 1250 1251 /* MPRV does not affect the virtual-machine load/store 1252 instructions, HLV, HLVX, and HSV. */ 1253 if (riscv_cpu_two_stage_lookup(mmu_idx)) { 1254 mode = get_field(env->hstatus, HSTATUS_SPVP); 1255 } else if (mode == PRV_M && access_type != MMU_INST_FETCH && 1256 get_field(env->mstatus, MSTATUS_MPRV)) { 1257 mode = get_field(env->mstatus, MSTATUS_MPP); 1258 if (riscv_has_ext(env, RVH) && get_field(env->mstatus, MSTATUS_MPV)) { 1259 two_stage_lookup = true; 1260 } 1261 } 1262 1263 pmu_tlb_fill_incr_ctr(cpu, access_type); 1264 if (riscv_cpu_virt_enabled(env) || 1265 ((riscv_cpu_two_stage_lookup(mmu_idx) || two_stage_lookup) && 1266 access_type != MMU_INST_FETCH)) { 1267 /* Two stage lookup */ 1268 ret = get_physical_address(env, &pa, &prot, address, 1269 &env->guest_phys_fault_addr, access_type, 1270 mmu_idx, true, true, false); 1271 1272 /* 1273 * A G-stage exception may be triggered during two state lookup. 1274 * And the env->guest_phys_fault_addr has already been set in 1275 * get_physical_address(). 1276 */ 1277 if (ret == TRANSLATE_G_STAGE_FAIL) { 1278 first_stage_error = false; 1279 two_stage_indirect_error = true; 1280 access_type = MMU_DATA_LOAD; 1281 } 1282 1283 qemu_log_mask(CPU_LOG_MMU, 1284 "%s 1st-stage address=%" VADDR_PRIx " ret %d physical " 1285 HWADDR_FMT_plx " prot %d\n", 1286 __func__, address, ret, pa, prot); 1287 1288 if (ret == TRANSLATE_SUCCESS) { 1289 /* Second stage lookup */ 1290 im_address = pa; 1291 1292 ret = get_physical_address(env, &pa, &prot2, im_address, NULL, 1293 access_type, mmu_idx, false, true, 1294 false); 1295 1296 qemu_log_mask(CPU_LOG_MMU, 1297 "%s 2nd-stage address=%" VADDR_PRIx " ret %d physical " 1298 HWADDR_FMT_plx " prot %d\n", 1299 __func__, im_address, ret, pa, prot2); 1300 1301 prot &= prot2; 1302 1303 if (ret == TRANSLATE_SUCCESS) { 1304 ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa, 1305 size, access_type, mode); 1306 1307 qemu_log_mask(CPU_LOG_MMU, 1308 "%s PMP address=" HWADDR_FMT_plx " ret %d prot" 1309 " %d tlb_size " TARGET_FMT_lu "\n", 1310 __func__, pa, ret, prot_pmp, tlb_size); 1311 1312 prot &= prot_pmp; 1313 } 1314 1315 if (ret != TRANSLATE_SUCCESS) { 1316 /* 1317 * Guest physical address translation failed, this is a HS 1318 * level exception 1319 */ 1320 first_stage_error = false; 1321 env->guest_phys_fault_addr = (im_address | 1322 (address & 1323 (TARGET_PAGE_SIZE - 1))) >> 2; 1324 } 1325 } 1326 } else { 1327 /* Single stage lookup */ 1328 ret = get_physical_address(env, &pa, &prot, address, NULL, 1329 access_type, mmu_idx, true, false, false); 1330 1331 qemu_log_mask(CPU_LOG_MMU, 1332 "%s address=%" VADDR_PRIx " ret %d physical " 1333 HWADDR_FMT_plx " prot %d\n", 1334 __func__, address, ret, pa, prot); 1335 1336 if (ret == TRANSLATE_SUCCESS) { 1337 ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa, 1338 size, access_type, mode); 1339 1340 qemu_log_mask(CPU_LOG_MMU, 1341 "%s PMP address=" HWADDR_FMT_plx " ret %d prot" 1342 " %d tlb_size " TARGET_FMT_lu "\n", 1343 __func__, pa, ret, prot_pmp, tlb_size); 1344 1345 prot &= prot_pmp; 1346 } 1347 } 1348 1349 if (ret == TRANSLATE_PMP_FAIL) { 1350 pmp_violation = true; 1351 } 1352 1353 if (ret == TRANSLATE_SUCCESS) { 1354 tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1), 1355 prot, mmu_idx, tlb_size); 1356 return true; 1357 } else if (probe) { 1358 return false; 1359 } else { 1360 raise_mmu_exception(env, address, access_type, pmp_violation, 1361 first_stage_error, 1362 riscv_cpu_virt_enabled(env) || 1363 riscv_cpu_two_stage_lookup(mmu_idx), 1364 two_stage_indirect_error); 1365 cpu_loop_exit_restore(cs, retaddr); 1366 } 1367 1368 return true; 1369 } 1370 1371 static target_ulong riscv_transformed_insn(CPURISCVState *env, 1372 target_ulong insn, 1373 target_ulong taddr) 1374 { 1375 target_ulong xinsn = 0; 1376 target_ulong access_rs1 = 0, access_imm = 0, access_size = 0; 1377 1378 /* 1379 * Only Quadrant 0 and Quadrant 2 of RVC instruction space need to 1380 * be uncompressed. The Quadrant 1 of RVC instruction space need 1381 * not be transformed because these instructions won't generate 1382 * any load/store trap. 1383 */ 1384 1385 if ((insn & 0x3) != 0x3) { 1386 /* Transform 16bit instruction into 32bit instruction */ 1387 switch (GET_C_OP(insn)) { 1388 case OPC_RISC_C_OP_QUAD0: /* Quadrant 0 */ 1389 switch (GET_C_FUNC(insn)) { 1390 case OPC_RISC_C_FUNC_FLD_LQ: 1391 if (riscv_cpu_xlen(env) != 128) { /* C.FLD (RV32/64) */ 1392 xinsn = OPC_RISC_FLD; 1393 xinsn = SET_RD(xinsn, GET_C_RS2S(insn)); 1394 access_rs1 = GET_C_RS1S(insn); 1395 access_imm = GET_C_LD_IMM(insn); 1396 access_size = 8; 1397 } 1398 break; 1399 case OPC_RISC_C_FUNC_LW: /* C.LW */ 1400 xinsn = OPC_RISC_LW; 1401 xinsn = SET_RD(xinsn, GET_C_RS2S(insn)); 1402 access_rs1 = GET_C_RS1S(insn); 1403 access_imm = GET_C_LW_IMM(insn); 1404 access_size = 4; 1405 break; 1406 case OPC_RISC_C_FUNC_FLW_LD: 1407 if (riscv_cpu_xlen(env) == 32) { /* C.FLW (RV32) */ 1408 xinsn = OPC_RISC_FLW; 1409 xinsn = SET_RD(xinsn, GET_C_RS2S(insn)); 1410 access_rs1 = GET_C_RS1S(insn); 1411 access_imm = GET_C_LW_IMM(insn); 1412 access_size = 4; 1413 } else { /* C.LD (RV64/RV128) */ 1414 xinsn = OPC_RISC_LD; 1415 xinsn = SET_RD(xinsn, GET_C_RS2S(insn)); 1416 access_rs1 = GET_C_RS1S(insn); 1417 access_imm = GET_C_LD_IMM(insn); 1418 access_size = 8; 1419 } 1420 break; 1421 case OPC_RISC_C_FUNC_FSD_SQ: 1422 if (riscv_cpu_xlen(env) != 128) { /* C.FSD (RV32/64) */ 1423 xinsn = OPC_RISC_FSD; 1424 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn)); 1425 access_rs1 = GET_C_RS1S(insn); 1426 access_imm = GET_C_SD_IMM(insn); 1427 access_size = 8; 1428 } 1429 break; 1430 case OPC_RISC_C_FUNC_SW: /* C.SW */ 1431 xinsn = OPC_RISC_SW; 1432 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn)); 1433 access_rs1 = GET_C_RS1S(insn); 1434 access_imm = GET_C_SW_IMM(insn); 1435 access_size = 4; 1436 break; 1437 case OPC_RISC_C_FUNC_FSW_SD: 1438 if (riscv_cpu_xlen(env) == 32) { /* C.FSW (RV32) */ 1439 xinsn = OPC_RISC_FSW; 1440 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn)); 1441 access_rs1 = GET_C_RS1S(insn); 1442 access_imm = GET_C_SW_IMM(insn); 1443 access_size = 4; 1444 } else { /* C.SD (RV64/RV128) */ 1445 xinsn = OPC_RISC_SD; 1446 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn)); 1447 access_rs1 = GET_C_RS1S(insn); 1448 access_imm = GET_C_SD_IMM(insn); 1449 access_size = 8; 1450 } 1451 break; 1452 default: 1453 break; 1454 } 1455 break; 1456 case OPC_RISC_C_OP_QUAD2: /* Quadrant 2 */ 1457 switch (GET_C_FUNC(insn)) { 1458 case OPC_RISC_C_FUNC_FLDSP_LQSP: 1459 if (riscv_cpu_xlen(env) != 128) { /* C.FLDSP (RV32/64) */ 1460 xinsn = OPC_RISC_FLD; 1461 xinsn = SET_RD(xinsn, GET_C_RD(insn)); 1462 access_rs1 = 2; 1463 access_imm = GET_C_LDSP_IMM(insn); 1464 access_size = 8; 1465 } 1466 break; 1467 case OPC_RISC_C_FUNC_LWSP: /* C.LWSP */ 1468 xinsn = OPC_RISC_LW; 1469 xinsn = SET_RD(xinsn, GET_C_RD(insn)); 1470 access_rs1 = 2; 1471 access_imm = GET_C_LWSP_IMM(insn); 1472 access_size = 4; 1473 break; 1474 case OPC_RISC_C_FUNC_FLWSP_LDSP: 1475 if (riscv_cpu_xlen(env) == 32) { /* C.FLWSP (RV32) */ 1476 xinsn = OPC_RISC_FLW; 1477 xinsn = SET_RD(xinsn, GET_C_RD(insn)); 1478 access_rs1 = 2; 1479 access_imm = GET_C_LWSP_IMM(insn); 1480 access_size = 4; 1481 } else { /* C.LDSP (RV64/RV128) */ 1482 xinsn = OPC_RISC_LD; 1483 xinsn = SET_RD(xinsn, GET_C_RD(insn)); 1484 access_rs1 = 2; 1485 access_imm = GET_C_LDSP_IMM(insn); 1486 access_size = 8; 1487 } 1488 break; 1489 case OPC_RISC_C_FUNC_FSDSP_SQSP: 1490 if (riscv_cpu_xlen(env) != 128) { /* C.FSDSP (RV32/64) */ 1491 xinsn = OPC_RISC_FSD; 1492 xinsn = SET_RS2(xinsn, GET_C_RS2(insn)); 1493 access_rs1 = 2; 1494 access_imm = GET_C_SDSP_IMM(insn); 1495 access_size = 8; 1496 } 1497 break; 1498 case OPC_RISC_C_FUNC_SWSP: /* C.SWSP */ 1499 xinsn = OPC_RISC_SW; 1500 xinsn = SET_RS2(xinsn, GET_C_RS2(insn)); 1501 access_rs1 = 2; 1502 access_imm = GET_C_SWSP_IMM(insn); 1503 access_size = 4; 1504 break; 1505 case 7: 1506 if (riscv_cpu_xlen(env) == 32) { /* C.FSWSP (RV32) */ 1507 xinsn = OPC_RISC_FSW; 1508 xinsn = SET_RS2(xinsn, GET_C_RS2(insn)); 1509 access_rs1 = 2; 1510 access_imm = GET_C_SWSP_IMM(insn); 1511 access_size = 4; 1512 } else { /* C.SDSP (RV64/RV128) */ 1513 xinsn = OPC_RISC_SD; 1514 xinsn = SET_RS2(xinsn, GET_C_RS2(insn)); 1515 access_rs1 = 2; 1516 access_imm = GET_C_SDSP_IMM(insn); 1517 access_size = 8; 1518 } 1519 break; 1520 default: 1521 break; 1522 } 1523 break; 1524 default: 1525 break; 1526 } 1527 1528 /* 1529 * Clear Bit1 of transformed instruction to indicate that 1530 * original insruction was a 16bit instruction 1531 */ 1532 xinsn &= ~((target_ulong)0x2); 1533 } else { 1534 /* Transform 32bit (or wider) instructions */ 1535 switch (MASK_OP_MAJOR(insn)) { 1536 case OPC_RISC_ATOMIC: 1537 xinsn = insn; 1538 access_rs1 = GET_RS1(insn); 1539 access_size = 1 << GET_FUNCT3(insn); 1540 break; 1541 case OPC_RISC_LOAD: 1542 case OPC_RISC_FP_LOAD: 1543 xinsn = SET_I_IMM(insn, 0); 1544 access_rs1 = GET_RS1(insn); 1545 access_imm = GET_IMM(insn); 1546 access_size = 1 << GET_FUNCT3(insn); 1547 break; 1548 case OPC_RISC_STORE: 1549 case OPC_RISC_FP_STORE: 1550 xinsn = SET_S_IMM(insn, 0); 1551 access_rs1 = GET_RS1(insn); 1552 access_imm = GET_STORE_IMM(insn); 1553 access_size = 1 << GET_FUNCT3(insn); 1554 break; 1555 case OPC_RISC_SYSTEM: 1556 if (MASK_OP_SYSTEM(insn) == OPC_RISC_HLVHSV) { 1557 xinsn = insn; 1558 access_rs1 = GET_RS1(insn); 1559 access_size = 1 << ((GET_FUNCT7(insn) >> 1) & 0x3); 1560 access_size = 1 << access_size; 1561 } 1562 break; 1563 default: 1564 break; 1565 } 1566 } 1567 1568 if (access_size) { 1569 xinsn = SET_RS1(xinsn, (taddr - (env->gpr[access_rs1] + access_imm)) & 1570 (access_size - 1)); 1571 } 1572 1573 return xinsn; 1574 } 1575 #endif /* !CONFIG_USER_ONLY */ 1576 1577 /* 1578 * Handle Traps 1579 * 1580 * Adapted from Spike's processor_t::take_trap. 1581 * 1582 */ 1583 void riscv_cpu_do_interrupt(CPUState *cs) 1584 { 1585 #if !defined(CONFIG_USER_ONLY) 1586 1587 RISCVCPU *cpu = RISCV_CPU(cs); 1588 CPURISCVState *env = &cpu->env; 1589 bool write_gva = false; 1590 uint64_t s; 1591 1592 /* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide 1593 * so we mask off the MSB and separate into trap type and cause. 1594 */ 1595 bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG); 1596 target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK; 1597 uint64_t deleg = async ? env->mideleg : env->medeleg; 1598 target_ulong tval = 0; 1599 target_ulong tinst = 0; 1600 target_ulong htval = 0; 1601 target_ulong mtval2 = 0; 1602 1603 if (cause == RISCV_EXCP_SEMIHOST) { 1604 do_common_semihosting(cs); 1605 env->pc += 4; 1606 return; 1607 } 1608 1609 if (!async) { 1610 /* set tval to badaddr for traps with address information */ 1611 switch (cause) { 1612 case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT: 1613 case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT: 1614 case RISCV_EXCP_LOAD_ADDR_MIS: 1615 case RISCV_EXCP_STORE_AMO_ADDR_MIS: 1616 case RISCV_EXCP_LOAD_ACCESS_FAULT: 1617 case RISCV_EXCP_STORE_AMO_ACCESS_FAULT: 1618 case RISCV_EXCP_LOAD_PAGE_FAULT: 1619 case RISCV_EXCP_STORE_PAGE_FAULT: 1620 write_gva = env->two_stage_lookup; 1621 tval = env->badaddr; 1622 if (env->two_stage_indirect_lookup) { 1623 /* 1624 * special pseudoinstruction for G-stage fault taken while 1625 * doing VS-stage page table walk. 1626 */ 1627 tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000; 1628 } else { 1629 /* 1630 * The "Addr. Offset" field in transformed instruction is 1631 * non-zero only for misaligned access. 1632 */ 1633 tinst = riscv_transformed_insn(env, env->bins, tval); 1634 } 1635 break; 1636 case RISCV_EXCP_INST_GUEST_PAGE_FAULT: 1637 case RISCV_EXCP_INST_ADDR_MIS: 1638 case RISCV_EXCP_INST_ACCESS_FAULT: 1639 case RISCV_EXCP_INST_PAGE_FAULT: 1640 write_gva = env->two_stage_lookup; 1641 tval = env->badaddr; 1642 if (env->two_stage_indirect_lookup) { 1643 /* 1644 * special pseudoinstruction for G-stage fault taken while 1645 * doing VS-stage page table walk. 1646 */ 1647 tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000; 1648 } 1649 break; 1650 case RISCV_EXCP_ILLEGAL_INST: 1651 case RISCV_EXCP_VIRT_INSTRUCTION_FAULT: 1652 tval = env->bins; 1653 break; 1654 case RISCV_EXCP_BREAKPOINT: 1655 if (cs->watchpoint_hit) { 1656 tval = cs->watchpoint_hit->hitaddr; 1657 cs->watchpoint_hit = NULL; 1658 } 1659 break; 1660 default: 1661 break; 1662 } 1663 /* ecall is dispatched as one cause so translate based on mode */ 1664 if (cause == RISCV_EXCP_U_ECALL) { 1665 assert(env->priv <= 3); 1666 1667 if (env->priv == PRV_M) { 1668 cause = RISCV_EXCP_M_ECALL; 1669 } else if (env->priv == PRV_S && riscv_cpu_virt_enabled(env)) { 1670 cause = RISCV_EXCP_VS_ECALL; 1671 } else if (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) { 1672 cause = RISCV_EXCP_S_ECALL; 1673 } else if (env->priv == PRV_U) { 1674 cause = RISCV_EXCP_U_ECALL; 1675 } 1676 } 1677 } 1678 1679 trace_riscv_trap(env->mhartid, async, cause, env->pc, tval, 1680 riscv_cpu_get_trap_name(cause, async)); 1681 1682 qemu_log_mask(CPU_LOG_INT, 1683 "%s: hart:"TARGET_FMT_ld", async:%d, cause:"TARGET_FMT_lx", " 1684 "epc:0x"TARGET_FMT_lx", tval:0x"TARGET_FMT_lx", desc=%s\n", 1685 __func__, env->mhartid, async, cause, env->pc, tval, 1686 riscv_cpu_get_trap_name(cause, async)); 1687 1688 if (env->priv <= PRV_S && 1689 cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) { 1690 /* handle the trap in S-mode */ 1691 if (riscv_has_ext(env, RVH)) { 1692 uint64_t hdeleg = async ? env->hideleg : env->hedeleg; 1693 1694 if (riscv_cpu_virt_enabled(env) && ((hdeleg >> cause) & 1)) { 1695 /* Trap to VS mode */ 1696 /* 1697 * See if we need to adjust cause. Yes if its VS mode interrupt 1698 * no if hypervisor has delegated one of hs mode's interrupt 1699 */ 1700 if (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT || 1701 cause == IRQ_VS_EXT) { 1702 cause = cause - 1; 1703 } 1704 write_gva = false; 1705 } else if (riscv_cpu_virt_enabled(env)) { 1706 /* Trap into HS mode, from virt */ 1707 riscv_cpu_swap_hypervisor_regs(env); 1708 env->hstatus = set_field(env->hstatus, HSTATUS_SPVP, 1709 env->priv); 1710 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, 1711 riscv_cpu_virt_enabled(env)); 1712 1713 1714 htval = env->guest_phys_fault_addr; 1715 1716 riscv_cpu_set_virt_enabled(env, 0); 1717 } else { 1718 /* Trap into HS mode */ 1719 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false); 1720 htval = env->guest_phys_fault_addr; 1721 } 1722 env->hstatus = set_field(env->hstatus, HSTATUS_GVA, write_gva); 1723 } 1724 1725 s = env->mstatus; 1726 s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE)); 1727 s = set_field(s, MSTATUS_SPP, env->priv); 1728 s = set_field(s, MSTATUS_SIE, 0); 1729 env->mstatus = s; 1730 env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1)); 1731 env->sepc = env->pc; 1732 env->stval = tval; 1733 env->htval = htval; 1734 env->htinst = tinst; 1735 env->pc = (env->stvec >> 2 << 2) + 1736 ((async && (env->stvec & 3) == 1) ? cause * 4 : 0); 1737 riscv_cpu_set_mode(env, PRV_S); 1738 } else { 1739 /* handle the trap in M-mode */ 1740 if (riscv_has_ext(env, RVH)) { 1741 if (riscv_cpu_virt_enabled(env)) { 1742 riscv_cpu_swap_hypervisor_regs(env); 1743 } 1744 env->mstatus = set_field(env->mstatus, MSTATUS_MPV, 1745 riscv_cpu_virt_enabled(env)); 1746 if (riscv_cpu_virt_enabled(env) && tval) { 1747 env->mstatus = set_field(env->mstatus, MSTATUS_GVA, 1); 1748 } 1749 1750 mtval2 = env->guest_phys_fault_addr; 1751 1752 /* Trapping to M mode, virt is disabled */ 1753 riscv_cpu_set_virt_enabled(env, 0); 1754 } 1755 1756 s = env->mstatus; 1757 s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE)); 1758 s = set_field(s, MSTATUS_MPP, env->priv); 1759 s = set_field(s, MSTATUS_MIE, 0); 1760 env->mstatus = s; 1761 env->mcause = cause | ~(((target_ulong)-1) >> async); 1762 env->mepc = env->pc; 1763 env->mtval = tval; 1764 env->mtval2 = mtval2; 1765 env->mtinst = tinst; 1766 env->pc = (env->mtvec >> 2 << 2) + 1767 ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0); 1768 riscv_cpu_set_mode(env, PRV_M); 1769 } 1770 1771 /* NOTE: it is not necessary to yield load reservations here. It is only 1772 * necessary for an SC from "another hart" to cause a load reservation 1773 * to be yielded. Refer to the memory consistency model section of the 1774 * RISC-V ISA Specification. 1775 */ 1776 1777 env->two_stage_lookup = false; 1778 env->two_stage_indirect_lookup = false; 1779 #endif 1780 cs->exception_index = RISCV_EXCP_NONE; /* mark handled to qemu */ 1781 } 1782