1 /* 2 * RISC-V CPU helpers for qemu. 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/log.h" 22 #include "qemu/main-loop.h" 23 #include "cpu.h" 24 #include "internals.h" 25 #include "pmu.h" 26 #include "exec/exec-all.h" 27 #include "instmap.h" 28 #include "tcg/tcg-op.h" 29 #include "trace.h" 30 #include "semihosting/common-semi.h" 31 #include "sysemu/cpu-timers.h" 32 #include "cpu_bits.h" 33 #include "debug.h" 34 #include "tcg/oversized-guest.h" 35 36 int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch) 37 { 38 #ifdef CONFIG_USER_ONLY 39 return 0; 40 #else 41 bool virt = env->virt_enabled; 42 int mode = env->priv; 43 44 /* All priv -> mmu_idx mapping are here */ 45 if (!ifetch) { 46 uint64_t status = env->mstatus; 47 48 if (mode == PRV_M && get_field(status, MSTATUS_MPRV)) { 49 mode = get_field(env->mstatus, MSTATUS_MPP); 50 virt = get_field(env->mstatus, MSTATUS_MPV); 51 if (virt) { 52 status = env->vsstatus; 53 } 54 } 55 if (mode == PRV_S && get_field(status, MSTATUS_SUM)) { 56 mode = MMUIdx_S_SUM; 57 } 58 } 59 60 return mode | (virt ? MMU_2STAGE_BIT : 0); 61 #endif 62 } 63 64 void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc, 65 target_ulong *cs_base, uint32_t *pflags) 66 { 67 CPUState *cs = env_cpu(env); 68 RISCVCPU *cpu = RISCV_CPU(cs); 69 RISCVExtStatus fs, vs; 70 uint32_t flags = 0; 71 72 *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc; 73 *cs_base = 0; 74 75 if (cpu->cfg.ext_zve32f) { 76 /* 77 * If env->vl equals to VLMAX, we can use generic vector operation 78 * expanders (GVEC) to accerlate the vector operations. 79 * However, as LMUL could be a fractional number. The maximum 80 * vector size can be operated might be less than 8 bytes, 81 * which is not supported by GVEC. So we set vl_eq_vlmax flag to true 82 * only when maxsz >= 8 bytes. 83 */ 84 uint32_t vlmax = vext_get_vlmax(cpu, env->vtype); 85 uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW); 86 uint32_t maxsz = vlmax << sew; 87 bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) && 88 (maxsz >= 8); 89 flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill); 90 flags = FIELD_DP32(flags, TB_FLAGS, SEW, sew); 91 flags = FIELD_DP32(flags, TB_FLAGS, LMUL, 92 FIELD_EX64(env->vtype, VTYPE, VLMUL)); 93 flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax); 94 flags = FIELD_DP32(flags, TB_FLAGS, VTA, 95 FIELD_EX64(env->vtype, VTYPE, VTA)); 96 flags = FIELD_DP32(flags, TB_FLAGS, VMA, 97 FIELD_EX64(env->vtype, VTYPE, VMA)); 98 flags = FIELD_DP32(flags, TB_FLAGS, VSTART_EQ_ZERO, env->vstart == 0); 99 } else { 100 flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1); 101 } 102 103 #ifdef CONFIG_USER_ONLY 104 fs = EXT_STATUS_DIRTY; 105 vs = EXT_STATUS_DIRTY; 106 #else 107 flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv); 108 109 flags |= cpu_mmu_index(env, 0); 110 fs = get_field(env->mstatus, MSTATUS_FS); 111 vs = get_field(env->mstatus, MSTATUS_VS); 112 113 if (env->virt_enabled) { 114 flags = FIELD_DP32(flags, TB_FLAGS, VIRT_ENABLED, 1); 115 /* 116 * Merge DISABLED and !DIRTY states using MIN. 117 * We will set both fields when dirtying. 118 */ 119 fs = MIN(fs, get_field(env->mstatus_hs, MSTATUS_FS)); 120 vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS)); 121 } 122 123 if (cpu->cfg.debug && !icount_enabled()) { 124 flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled); 125 } 126 #endif 127 128 flags = FIELD_DP32(flags, TB_FLAGS, FS, fs); 129 flags = FIELD_DP32(flags, TB_FLAGS, VS, vs); 130 flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl); 131 if (env->cur_pmmask < (env->xl == MXL_RV32 ? UINT32_MAX : UINT64_MAX)) { 132 flags = FIELD_DP32(flags, TB_FLAGS, PM_MASK_ENABLED, 1); 133 } 134 if (env->cur_pmbase != 0) { 135 flags = FIELD_DP32(flags, TB_FLAGS, PM_BASE_ENABLED, 1); 136 } 137 138 *pflags = flags; 139 } 140 141 void riscv_cpu_update_mask(CPURISCVState *env) 142 { 143 target_ulong mask = -1, base = 0; 144 /* 145 * TODO: Current RVJ spec does not specify 146 * how the extension interacts with XLEN. 147 */ 148 #ifndef CONFIG_USER_ONLY 149 if (riscv_has_ext(env, RVJ)) { 150 switch (env->priv) { 151 case PRV_M: 152 if (env->mmte & M_PM_ENABLE) { 153 mask = env->mpmmask; 154 base = env->mpmbase; 155 } 156 break; 157 case PRV_S: 158 if (env->mmte & S_PM_ENABLE) { 159 mask = env->spmmask; 160 base = env->spmbase; 161 } 162 break; 163 case PRV_U: 164 if (env->mmte & U_PM_ENABLE) { 165 mask = env->upmmask; 166 base = env->upmbase; 167 } 168 break; 169 default: 170 g_assert_not_reached(); 171 } 172 } 173 #endif 174 if (env->xl == MXL_RV32) { 175 env->cur_pmmask = mask & UINT32_MAX; 176 env->cur_pmbase = base & UINT32_MAX; 177 } else { 178 env->cur_pmmask = mask; 179 env->cur_pmbase = base; 180 } 181 } 182 183 #ifndef CONFIG_USER_ONLY 184 185 /* 186 * The HS-mode is allowed to configure priority only for the 187 * following VS-mode local interrupts: 188 * 189 * 0 (Reserved interrupt, reads as zero) 190 * 1 Supervisor software interrupt 191 * 4 (Reserved interrupt, reads as zero) 192 * 5 Supervisor timer interrupt 193 * 8 (Reserved interrupt, reads as zero) 194 * 13 (Reserved interrupt) 195 * 14 " 196 * 15 " 197 * 16 " 198 * 17 " 199 * 18 " 200 * 19 " 201 * 20 " 202 * 21 " 203 * 22 " 204 * 23 " 205 */ 206 207 static const int hviprio_index2irq[] = { 208 0, 1, 4, 5, 8, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 }; 209 static const int hviprio_index2rdzero[] = { 210 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 211 212 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero) 213 { 214 if (index < 0 || ARRAY_SIZE(hviprio_index2irq) <= index) { 215 return -EINVAL; 216 } 217 218 if (out_irq) { 219 *out_irq = hviprio_index2irq[index]; 220 } 221 222 if (out_rdzero) { 223 *out_rdzero = hviprio_index2rdzero[index]; 224 } 225 226 return 0; 227 } 228 229 /* 230 * Default priorities of local interrupts are defined in the 231 * RISC-V Advanced Interrupt Architecture specification. 232 * 233 * ---------------------------------------------------------------- 234 * Default | 235 * Priority | Major Interrupt Numbers 236 * ---------------------------------------------------------------- 237 * Highest | 47, 23, 46, 45, 22, 44, 238 * | 43, 21, 42, 41, 20, 40 239 * | 240 * | 11 (0b), 3 (03), 7 (07) 241 * | 9 (09), 1 (01), 5 (05) 242 * | 12 (0c) 243 * | 10 (0a), 2 (02), 6 (06) 244 * | 245 * | 39, 19, 38, 37, 18, 36, 246 * Lowest | 35, 17, 34, 33, 16, 32 247 * ---------------------------------------------------------------- 248 */ 249 static const uint8_t default_iprio[64] = { 250 /* Custom interrupts 48 to 63 */ 251 [63] = IPRIO_MMAXIPRIO, 252 [62] = IPRIO_MMAXIPRIO, 253 [61] = IPRIO_MMAXIPRIO, 254 [60] = IPRIO_MMAXIPRIO, 255 [59] = IPRIO_MMAXIPRIO, 256 [58] = IPRIO_MMAXIPRIO, 257 [57] = IPRIO_MMAXIPRIO, 258 [56] = IPRIO_MMAXIPRIO, 259 [55] = IPRIO_MMAXIPRIO, 260 [54] = IPRIO_MMAXIPRIO, 261 [53] = IPRIO_MMAXIPRIO, 262 [52] = IPRIO_MMAXIPRIO, 263 [51] = IPRIO_MMAXIPRIO, 264 [50] = IPRIO_MMAXIPRIO, 265 [49] = IPRIO_MMAXIPRIO, 266 [48] = IPRIO_MMAXIPRIO, 267 268 /* Custom interrupts 24 to 31 */ 269 [31] = IPRIO_MMAXIPRIO, 270 [30] = IPRIO_MMAXIPRIO, 271 [29] = IPRIO_MMAXIPRIO, 272 [28] = IPRIO_MMAXIPRIO, 273 [27] = IPRIO_MMAXIPRIO, 274 [26] = IPRIO_MMAXIPRIO, 275 [25] = IPRIO_MMAXIPRIO, 276 [24] = IPRIO_MMAXIPRIO, 277 278 [47] = IPRIO_DEFAULT_UPPER, 279 [23] = IPRIO_DEFAULT_UPPER + 1, 280 [46] = IPRIO_DEFAULT_UPPER + 2, 281 [45] = IPRIO_DEFAULT_UPPER + 3, 282 [22] = IPRIO_DEFAULT_UPPER + 4, 283 [44] = IPRIO_DEFAULT_UPPER + 5, 284 285 [43] = IPRIO_DEFAULT_UPPER + 6, 286 [21] = IPRIO_DEFAULT_UPPER + 7, 287 [42] = IPRIO_DEFAULT_UPPER + 8, 288 [41] = IPRIO_DEFAULT_UPPER + 9, 289 [20] = IPRIO_DEFAULT_UPPER + 10, 290 [40] = IPRIO_DEFAULT_UPPER + 11, 291 292 [11] = IPRIO_DEFAULT_M, 293 [3] = IPRIO_DEFAULT_M + 1, 294 [7] = IPRIO_DEFAULT_M + 2, 295 296 [9] = IPRIO_DEFAULT_S, 297 [1] = IPRIO_DEFAULT_S + 1, 298 [5] = IPRIO_DEFAULT_S + 2, 299 300 [12] = IPRIO_DEFAULT_SGEXT, 301 302 [10] = IPRIO_DEFAULT_VS, 303 [2] = IPRIO_DEFAULT_VS + 1, 304 [6] = IPRIO_DEFAULT_VS + 2, 305 306 [39] = IPRIO_DEFAULT_LOWER, 307 [19] = IPRIO_DEFAULT_LOWER + 1, 308 [38] = IPRIO_DEFAULT_LOWER + 2, 309 [37] = IPRIO_DEFAULT_LOWER + 3, 310 [18] = IPRIO_DEFAULT_LOWER + 4, 311 [36] = IPRIO_DEFAULT_LOWER + 5, 312 313 [35] = IPRIO_DEFAULT_LOWER + 6, 314 [17] = IPRIO_DEFAULT_LOWER + 7, 315 [34] = IPRIO_DEFAULT_LOWER + 8, 316 [33] = IPRIO_DEFAULT_LOWER + 9, 317 [16] = IPRIO_DEFAULT_LOWER + 10, 318 [32] = IPRIO_DEFAULT_LOWER + 11, 319 }; 320 321 uint8_t riscv_cpu_default_priority(int irq) 322 { 323 if (irq < 0 || irq > 63) { 324 return IPRIO_MMAXIPRIO; 325 } 326 327 return default_iprio[irq] ? default_iprio[irq] : IPRIO_MMAXIPRIO; 328 }; 329 330 static int riscv_cpu_pending_to_irq(CPURISCVState *env, 331 int extirq, unsigned int extirq_def_prio, 332 uint64_t pending, uint8_t *iprio) 333 { 334 int irq, best_irq = RISCV_EXCP_NONE; 335 unsigned int prio, best_prio = UINT_MAX; 336 337 if (!pending) { 338 return RISCV_EXCP_NONE; 339 } 340 341 irq = ctz64(pending); 342 if (!((extirq == IRQ_M_EXT) ? riscv_cpu_cfg(env)->ext_smaia : 343 riscv_cpu_cfg(env)->ext_ssaia)) { 344 return irq; 345 } 346 347 pending = pending >> irq; 348 while (pending) { 349 prio = iprio[irq]; 350 if (!prio) { 351 if (irq == extirq) { 352 prio = extirq_def_prio; 353 } else { 354 prio = (riscv_cpu_default_priority(irq) < extirq_def_prio) ? 355 1 : IPRIO_MMAXIPRIO; 356 } 357 } 358 if ((pending & 0x1) && (prio <= best_prio)) { 359 best_irq = irq; 360 best_prio = prio; 361 } 362 irq++; 363 pending = pending >> 1; 364 } 365 366 return best_irq; 367 } 368 369 uint64_t riscv_cpu_all_pending(CPURISCVState *env) 370 { 371 uint32_t gein = get_field(env->hstatus, HSTATUS_VGEIN); 372 uint64_t vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0; 373 uint64_t vstip = (env->vstime_irq) ? MIP_VSTIP : 0; 374 375 return (env->mip | vsgein | vstip) & env->mie; 376 } 377 378 int riscv_cpu_mirq_pending(CPURISCVState *env) 379 { 380 uint64_t irqs = riscv_cpu_all_pending(env) & ~env->mideleg & 381 ~(MIP_SGEIP | MIP_VSSIP | MIP_VSTIP | MIP_VSEIP); 382 383 return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M, 384 irqs, env->miprio); 385 } 386 387 int riscv_cpu_sirq_pending(CPURISCVState *env) 388 { 389 uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg & 390 ~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP); 391 392 return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S, 393 irqs, env->siprio); 394 } 395 396 int riscv_cpu_vsirq_pending(CPURISCVState *env) 397 { 398 uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg & 399 (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP); 400 401 return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S, 402 irqs >> 1, env->hviprio); 403 } 404 405 static int riscv_cpu_local_irq_pending(CPURISCVState *env) 406 { 407 int virq; 408 uint64_t irqs, pending, mie, hsie, vsie; 409 410 /* Determine interrupt enable state of all privilege modes */ 411 if (env->virt_enabled) { 412 mie = 1; 413 hsie = 1; 414 vsie = (env->priv < PRV_S) || 415 (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE)); 416 } else { 417 mie = (env->priv < PRV_M) || 418 (env->priv == PRV_M && get_field(env->mstatus, MSTATUS_MIE)); 419 hsie = (env->priv < PRV_S) || 420 (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE)); 421 vsie = 0; 422 } 423 424 /* Determine all pending interrupts */ 425 pending = riscv_cpu_all_pending(env); 426 427 /* Check M-mode interrupts */ 428 irqs = pending & ~env->mideleg & -mie; 429 if (irqs) { 430 return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M, 431 irqs, env->miprio); 432 } 433 434 /* Check HS-mode interrupts */ 435 irqs = pending & env->mideleg & ~env->hideleg & -hsie; 436 if (irqs) { 437 return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S, 438 irqs, env->siprio); 439 } 440 441 /* Check VS-mode interrupts */ 442 irqs = pending & env->mideleg & env->hideleg & -vsie; 443 if (irqs) { 444 virq = riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S, 445 irqs >> 1, env->hviprio); 446 return (virq <= 0) ? virq : virq + 1; 447 } 448 449 /* Indicate no pending interrupt */ 450 return RISCV_EXCP_NONE; 451 } 452 453 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 454 { 455 if (interrupt_request & CPU_INTERRUPT_HARD) { 456 RISCVCPU *cpu = RISCV_CPU(cs); 457 CPURISCVState *env = &cpu->env; 458 int interruptno = riscv_cpu_local_irq_pending(env); 459 if (interruptno >= 0) { 460 cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno; 461 riscv_cpu_do_interrupt(cs); 462 return true; 463 } 464 } 465 return false; 466 } 467 468 /* Return true is floating point support is currently enabled */ 469 bool riscv_cpu_fp_enabled(CPURISCVState *env) 470 { 471 if (env->mstatus & MSTATUS_FS) { 472 if (env->virt_enabled && !(env->mstatus_hs & MSTATUS_FS)) { 473 return false; 474 } 475 return true; 476 } 477 478 return false; 479 } 480 481 /* Return true is vector support is currently enabled */ 482 bool riscv_cpu_vector_enabled(CPURISCVState *env) 483 { 484 if (env->mstatus & MSTATUS_VS) { 485 if (env->virt_enabled && !(env->mstatus_hs & MSTATUS_VS)) { 486 return false; 487 } 488 return true; 489 } 490 491 return false; 492 } 493 494 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env) 495 { 496 uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM | 497 MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE | 498 MSTATUS64_UXL | MSTATUS_VS; 499 500 if (riscv_has_ext(env, RVF)) { 501 mstatus_mask |= MSTATUS_FS; 502 } 503 bool current_virt = env->virt_enabled; 504 505 g_assert(riscv_has_ext(env, RVH)); 506 507 if (current_virt) { 508 /* Current V=1 and we are about to change to V=0 */ 509 env->vsstatus = env->mstatus & mstatus_mask; 510 env->mstatus &= ~mstatus_mask; 511 env->mstatus |= env->mstatus_hs; 512 513 env->vstvec = env->stvec; 514 env->stvec = env->stvec_hs; 515 516 env->vsscratch = env->sscratch; 517 env->sscratch = env->sscratch_hs; 518 519 env->vsepc = env->sepc; 520 env->sepc = env->sepc_hs; 521 522 env->vscause = env->scause; 523 env->scause = env->scause_hs; 524 525 env->vstval = env->stval; 526 env->stval = env->stval_hs; 527 528 env->vsatp = env->satp; 529 env->satp = env->satp_hs; 530 } else { 531 /* Current V=0 and we are about to change to V=1 */ 532 env->mstatus_hs = env->mstatus & mstatus_mask; 533 env->mstatus &= ~mstatus_mask; 534 env->mstatus |= env->vsstatus; 535 536 env->stvec_hs = env->stvec; 537 env->stvec = env->vstvec; 538 539 env->sscratch_hs = env->sscratch; 540 env->sscratch = env->vsscratch; 541 542 env->sepc_hs = env->sepc; 543 env->sepc = env->vsepc; 544 545 env->scause_hs = env->scause; 546 env->scause = env->vscause; 547 548 env->stval_hs = env->stval; 549 env->stval = env->vstval; 550 551 env->satp_hs = env->satp; 552 env->satp = env->vsatp; 553 } 554 } 555 556 target_ulong riscv_cpu_get_geilen(CPURISCVState *env) 557 { 558 if (!riscv_has_ext(env, RVH)) { 559 return 0; 560 } 561 562 return env->geilen; 563 } 564 565 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen) 566 { 567 if (!riscv_has_ext(env, RVH)) { 568 return; 569 } 570 571 if (geilen > (TARGET_LONG_BITS - 1)) { 572 return; 573 } 574 575 env->geilen = geilen; 576 } 577 578 /* This function can only be called to set virt when RVH is enabled */ 579 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable) 580 { 581 /* Flush the TLB on all virt mode changes. */ 582 if (env->virt_enabled != enable) { 583 tlb_flush(env_cpu(env)); 584 } 585 586 env->virt_enabled = enable; 587 588 if (enable) { 589 /* 590 * The guest external interrupts from an interrupt controller are 591 * delivered only when the Guest/VM is running (i.e. V=1). This means 592 * any guest external interrupt which is triggered while the Guest/VM 593 * is not running (i.e. V=0) will be missed on QEMU resulting in guest 594 * with sluggish response to serial console input and other I/O events. 595 * 596 * To solve this, we check and inject interrupt after setting V=1. 597 */ 598 riscv_cpu_update_mip(env, 0, 0); 599 } 600 } 601 602 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts) 603 { 604 CPURISCVState *env = &cpu->env; 605 if (env->miclaim & interrupts) { 606 return -1; 607 } else { 608 env->miclaim |= interrupts; 609 return 0; 610 } 611 } 612 613 uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask, 614 uint64_t value) 615 { 616 CPUState *cs = env_cpu(env); 617 uint64_t gein, vsgein = 0, vstip = 0, old = env->mip; 618 619 if (env->virt_enabled) { 620 gein = get_field(env->hstatus, HSTATUS_VGEIN); 621 vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0; 622 } 623 624 vstip = env->vstime_irq ? MIP_VSTIP : 0; 625 626 QEMU_IOTHREAD_LOCK_GUARD(); 627 628 env->mip = (env->mip & ~mask) | (value & mask); 629 630 if (env->mip | vsgein | vstip) { 631 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 632 } else { 633 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); 634 } 635 636 return old; 637 } 638 639 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *), 640 void *arg) 641 { 642 env->rdtime_fn = fn; 643 env->rdtime_fn_arg = arg; 644 } 645 646 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv, 647 int (*rmw_fn)(void *arg, 648 target_ulong reg, 649 target_ulong *val, 650 target_ulong new_val, 651 target_ulong write_mask), 652 void *rmw_fn_arg) 653 { 654 if (priv <= PRV_M) { 655 env->aia_ireg_rmw_fn[priv] = rmw_fn; 656 env->aia_ireg_rmw_fn_arg[priv] = rmw_fn_arg; 657 } 658 } 659 660 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv) 661 { 662 g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED); 663 664 if (icount_enabled() && newpriv != env->priv) { 665 riscv_itrigger_update_priv(env); 666 } 667 /* tlb_flush is unnecessary as mode is contained in mmu_idx */ 668 env->priv = newpriv; 669 env->xl = cpu_recompute_xl(env); 670 riscv_cpu_update_mask(env); 671 672 /* 673 * Clear the load reservation - otherwise a reservation placed in one 674 * context/process can be used by another, resulting in an SC succeeding 675 * incorrectly. Version 2.2 of the ISA specification explicitly requires 676 * this behaviour, while later revisions say that the kernel "should" use 677 * an SC instruction to force the yielding of a load reservation on a 678 * preemptive context switch. As a result, do both. 679 */ 680 env->load_res = -1; 681 } 682 683 /* 684 * get_physical_address_pmp - check PMP permission for this physical address 685 * 686 * Match the PMP region and check permission for this physical address and it's 687 * TLB page. Returns 0 if the permission checking was successful 688 * 689 * @env: CPURISCVState 690 * @prot: The returned protection attributes 691 * @addr: The physical address to be checked permission 692 * @access_type: The type of MMU access 693 * @mode: Indicates current privilege level. 694 */ 695 static int get_physical_address_pmp(CPURISCVState *env, int *prot, hwaddr addr, 696 int size, MMUAccessType access_type, 697 int mode) 698 { 699 pmp_priv_t pmp_priv; 700 int pmp_index = -1; 701 702 if (!riscv_cpu_cfg(env)->pmp) { 703 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 704 return TRANSLATE_SUCCESS; 705 } 706 707 pmp_index = pmp_hart_has_privs(env, addr, size, 1 << access_type, 708 &pmp_priv, mode); 709 if (pmp_index < 0) { 710 *prot = 0; 711 return TRANSLATE_PMP_FAIL; 712 } 713 714 *prot = pmp_priv_to_page_prot(pmp_priv); 715 716 return TRANSLATE_SUCCESS; 717 } 718 719 /* 720 * get_physical_address - get the physical address for this virtual address 721 * 722 * Do a page table walk to obtain the physical address corresponding to a 723 * virtual address. Returns 0 if the translation was successful 724 * 725 * Adapted from Spike's mmu_t::translate and mmu_t::walk 726 * 727 * @env: CPURISCVState 728 * @physical: This will be set to the calculated physical address 729 * @prot: The returned protection attributes 730 * @addr: The virtual address or guest physical address to be translated 731 * @fault_pte_addr: If not NULL, this will be set to fault pte address 732 * when a error occurs on pte address translation. 733 * This will already be shifted to match htval. 734 * @access_type: The type of MMU access 735 * @mmu_idx: Indicates current privilege level 736 * @first_stage: Are we in first stage translation? 737 * Second stage is used for hypervisor guest translation 738 * @two_stage: Are we going to perform two stage translation 739 * @is_debug: Is this access from a debugger or the monitor? 740 */ 741 static int get_physical_address(CPURISCVState *env, hwaddr *physical, 742 int *ret_prot, vaddr addr, 743 target_ulong *fault_pte_addr, 744 int access_type, int mmu_idx, 745 bool first_stage, bool two_stage, 746 bool is_debug) 747 { 748 /* 749 * NOTE: the env->pc value visible here will not be 750 * correct, but the value visible to the exception handler 751 * (riscv_cpu_do_interrupt) is correct 752 */ 753 MemTxResult res; 754 MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; 755 int mode = mmuidx_priv(mmu_idx); 756 bool use_background = false; 757 hwaddr ppn; 758 int napot_bits = 0; 759 target_ulong napot_mask; 760 761 /* 762 * Check if we should use the background registers for the two 763 * stage translation. We don't need to check if we actually need 764 * two stage translation as that happened before this function 765 * was called. Background registers will be used if the guest has 766 * forced a two stage translation to be on (in HS or M mode). 767 */ 768 if (!env->virt_enabled && two_stage) { 769 use_background = true; 770 } 771 772 if (mode == PRV_M || !riscv_cpu_cfg(env)->mmu) { 773 *physical = addr; 774 *ret_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 775 return TRANSLATE_SUCCESS; 776 } 777 778 *ret_prot = 0; 779 780 hwaddr base; 781 int levels, ptidxbits, ptesize, vm, widened; 782 783 if (first_stage == true) { 784 if (use_background) { 785 if (riscv_cpu_mxl(env) == MXL_RV32) { 786 base = (hwaddr)get_field(env->vsatp, SATP32_PPN) << PGSHIFT; 787 vm = get_field(env->vsatp, SATP32_MODE); 788 } else { 789 base = (hwaddr)get_field(env->vsatp, SATP64_PPN) << PGSHIFT; 790 vm = get_field(env->vsatp, SATP64_MODE); 791 } 792 } else { 793 if (riscv_cpu_mxl(env) == MXL_RV32) { 794 base = (hwaddr)get_field(env->satp, SATP32_PPN) << PGSHIFT; 795 vm = get_field(env->satp, SATP32_MODE); 796 } else { 797 base = (hwaddr)get_field(env->satp, SATP64_PPN) << PGSHIFT; 798 vm = get_field(env->satp, SATP64_MODE); 799 } 800 } 801 widened = 0; 802 } else { 803 if (riscv_cpu_mxl(env) == MXL_RV32) { 804 base = (hwaddr)get_field(env->hgatp, SATP32_PPN) << PGSHIFT; 805 vm = get_field(env->hgatp, SATP32_MODE); 806 } else { 807 base = (hwaddr)get_field(env->hgatp, SATP64_PPN) << PGSHIFT; 808 vm = get_field(env->hgatp, SATP64_MODE); 809 } 810 widened = 2; 811 } 812 813 switch (vm) { 814 case VM_1_10_SV32: 815 levels = 2; ptidxbits = 10; ptesize = 4; break; 816 case VM_1_10_SV39: 817 levels = 3; ptidxbits = 9; ptesize = 8; break; 818 case VM_1_10_SV48: 819 levels = 4; ptidxbits = 9; ptesize = 8; break; 820 case VM_1_10_SV57: 821 levels = 5; ptidxbits = 9; ptesize = 8; break; 822 case VM_1_10_MBARE: 823 *physical = addr; 824 *ret_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 825 return TRANSLATE_SUCCESS; 826 default: 827 g_assert_not_reached(); 828 } 829 830 CPUState *cs = env_cpu(env); 831 int va_bits = PGSHIFT + levels * ptidxbits + widened; 832 833 if (first_stage == true) { 834 target_ulong mask, masked_msbs; 835 836 if (TARGET_LONG_BITS > (va_bits - 1)) { 837 mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1; 838 } else { 839 mask = 0; 840 } 841 masked_msbs = (addr >> (va_bits - 1)) & mask; 842 843 if (masked_msbs != 0 && masked_msbs != mask) { 844 return TRANSLATE_FAIL; 845 } 846 } else { 847 if (vm != VM_1_10_SV32 && addr >> va_bits != 0) { 848 return TRANSLATE_FAIL; 849 } 850 } 851 852 bool pbmte = env->menvcfg & MENVCFG_PBMTE; 853 bool hade = env->menvcfg & MENVCFG_HADE; 854 855 if (first_stage && two_stage && env->virt_enabled) { 856 pbmte = pbmte && (env->henvcfg & HENVCFG_PBMTE); 857 hade = hade && (env->henvcfg & HENVCFG_HADE); 858 } 859 860 int ptshift = (levels - 1) * ptidxbits; 861 target_ulong pte; 862 hwaddr pte_addr; 863 int i; 864 865 #if !TCG_OVERSIZED_GUEST 866 restart: 867 #endif 868 for (i = 0; i < levels; i++, ptshift -= ptidxbits) { 869 target_ulong idx; 870 if (i == 0) { 871 idx = (addr >> (PGSHIFT + ptshift)) & 872 ((1 << (ptidxbits + widened)) - 1); 873 } else { 874 idx = (addr >> (PGSHIFT + ptshift)) & 875 ((1 << ptidxbits) - 1); 876 } 877 878 /* check that physical address of PTE is legal */ 879 880 if (two_stage && first_stage) { 881 int vbase_prot; 882 hwaddr vbase; 883 884 /* Do the second stage translation on the base PTE address. */ 885 int vbase_ret = get_physical_address(env, &vbase, &vbase_prot, 886 base, NULL, MMU_DATA_LOAD, 887 MMUIdx_U, false, true, 888 is_debug); 889 890 if (vbase_ret != TRANSLATE_SUCCESS) { 891 if (fault_pte_addr) { 892 *fault_pte_addr = (base + idx * ptesize) >> 2; 893 } 894 return TRANSLATE_G_STAGE_FAIL; 895 } 896 897 pte_addr = vbase + idx * ptesize; 898 } else { 899 pte_addr = base + idx * ptesize; 900 } 901 902 int pmp_prot; 903 int pmp_ret = get_physical_address_pmp(env, &pmp_prot, pte_addr, 904 sizeof(target_ulong), 905 MMU_DATA_LOAD, PRV_S); 906 if (pmp_ret != TRANSLATE_SUCCESS) { 907 return TRANSLATE_PMP_FAIL; 908 } 909 910 if (riscv_cpu_mxl(env) == MXL_RV32) { 911 pte = address_space_ldl(cs->as, pte_addr, attrs, &res); 912 } else { 913 pte = address_space_ldq(cs->as, pte_addr, attrs, &res); 914 } 915 916 if (res != MEMTX_OK) { 917 return TRANSLATE_FAIL; 918 } 919 920 if (riscv_cpu_sxl(env) == MXL_RV32) { 921 ppn = pte >> PTE_PPN_SHIFT; 922 } else { 923 if (pte & PTE_RESERVED) { 924 return TRANSLATE_FAIL; 925 } 926 927 if (!pbmte && (pte & PTE_PBMT)) { 928 return TRANSLATE_FAIL; 929 } 930 931 if (!riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) { 932 return TRANSLATE_FAIL; 933 } 934 935 ppn = (pte & (target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT; 936 } 937 938 if (!(pte & PTE_V)) { 939 /* Invalid PTE */ 940 return TRANSLATE_FAIL; 941 } 942 if (pte & (PTE_R | PTE_W | PTE_X)) { 943 goto leaf; 944 } 945 946 /* Inner PTE, continue walking */ 947 if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) { 948 return TRANSLATE_FAIL; 949 } 950 base = ppn << PGSHIFT; 951 } 952 953 /* No leaf pte at any translation level. */ 954 return TRANSLATE_FAIL; 955 956 leaf: 957 if (ppn & ((1ULL << ptshift) - 1)) { 958 /* Misaligned PPN */ 959 return TRANSLATE_FAIL; 960 } 961 if (!pbmte && (pte & PTE_PBMT)) { 962 /* Reserved without Svpbmt. */ 963 return TRANSLATE_FAIL; 964 } 965 966 /* Check for reserved combinations of RWX flags. */ 967 switch (pte & (PTE_R | PTE_W | PTE_X)) { 968 case PTE_W: 969 case PTE_W | PTE_X: 970 return TRANSLATE_FAIL; 971 } 972 973 int prot = 0; 974 if (pte & PTE_R) { 975 prot |= PAGE_READ; 976 } 977 if (pte & PTE_W) { 978 prot |= PAGE_WRITE; 979 } 980 if (pte & PTE_X) { 981 bool mxr; 982 983 if (first_stage == true) { 984 mxr = get_field(env->mstatus, MSTATUS_MXR); 985 } else { 986 mxr = get_field(env->vsstatus, MSTATUS_MXR); 987 } 988 if (mxr) { 989 prot |= PAGE_READ; 990 } 991 prot |= PAGE_EXEC; 992 } 993 994 if (pte & PTE_U) { 995 if (mode != PRV_U) { 996 if (!mmuidx_sum(mmu_idx)) { 997 return TRANSLATE_FAIL; 998 } 999 /* SUM allows only read+write, not execute. */ 1000 prot &= PAGE_READ | PAGE_WRITE; 1001 } 1002 } else if (mode != PRV_S) { 1003 /* Supervisor PTE flags when not S mode */ 1004 return TRANSLATE_FAIL; 1005 } 1006 1007 if (!((prot >> access_type) & 1)) { 1008 /* Access check failed */ 1009 return TRANSLATE_FAIL; 1010 } 1011 1012 /* If necessary, set accessed and dirty bits. */ 1013 target_ulong updated_pte = pte | PTE_A | 1014 (access_type == MMU_DATA_STORE ? PTE_D : 0); 1015 1016 /* Page table updates need to be atomic with MTTCG enabled */ 1017 if (updated_pte != pte && !is_debug) { 1018 if (!hade) { 1019 return TRANSLATE_FAIL; 1020 } 1021 1022 /* 1023 * - if accessed or dirty bits need updating, and the PTE is 1024 * in RAM, then we do so atomically with a compare and swap. 1025 * - if the PTE is in IO space or ROM, then it can't be updated 1026 * and we return TRANSLATE_FAIL. 1027 * - if the PTE changed by the time we went to update it, then 1028 * it is no longer valid and we must re-walk the page table. 1029 */ 1030 MemoryRegion *mr; 1031 hwaddr l = sizeof(target_ulong), addr1; 1032 mr = address_space_translate(cs->as, pte_addr, &addr1, &l, 1033 false, MEMTXATTRS_UNSPECIFIED); 1034 if (memory_region_is_ram(mr)) { 1035 target_ulong *pte_pa = qemu_map_ram_ptr(mr->ram_block, addr1); 1036 #if TCG_OVERSIZED_GUEST 1037 /* 1038 * MTTCG is not enabled on oversized TCG guests so 1039 * page table updates do not need to be atomic 1040 */ 1041 *pte_pa = pte = updated_pte; 1042 #else 1043 target_ulong old_pte = qatomic_cmpxchg(pte_pa, pte, updated_pte); 1044 if (old_pte != pte) { 1045 goto restart; 1046 } 1047 pte = updated_pte; 1048 #endif 1049 } else { 1050 /* 1051 * Misconfigured PTE in ROM (AD bits are not preset) or 1052 * PTE is in IO space and can't be updated atomically. 1053 */ 1054 return TRANSLATE_FAIL; 1055 } 1056 } 1057 1058 /* For superpage mappings, make a fake leaf PTE for the TLB's benefit. */ 1059 target_ulong vpn = addr >> PGSHIFT; 1060 1061 if (riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) { 1062 napot_bits = ctzl(ppn) + 1; 1063 if ((i != (levels - 1)) || (napot_bits != 4)) { 1064 return TRANSLATE_FAIL; 1065 } 1066 } 1067 1068 napot_mask = (1 << napot_bits) - 1; 1069 *physical = (((ppn & ~napot_mask) | (vpn & napot_mask) | 1070 (vpn & (((target_ulong)1 << ptshift) - 1)) 1071 ) << PGSHIFT) | (addr & ~TARGET_PAGE_MASK); 1072 1073 /* 1074 * Remove write permission unless this is a store, or the page is 1075 * already dirty, so that we TLB miss on later writes to update 1076 * the dirty bit. 1077 */ 1078 if (access_type != MMU_DATA_STORE && !(pte & PTE_D)) { 1079 prot &= ~PAGE_WRITE; 1080 } 1081 *ret_prot = prot; 1082 1083 return TRANSLATE_SUCCESS; 1084 } 1085 1086 static void raise_mmu_exception(CPURISCVState *env, target_ulong address, 1087 MMUAccessType access_type, bool pmp_violation, 1088 bool first_stage, bool two_stage, 1089 bool two_stage_indirect) 1090 { 1091 CPUState *cs = env_cpu(env); 1092 int page_fault_exceptions, vm; 1093 uint64_t stap_mode; 1094 1095 if (riscv_cpu_mxl(env) == MXL_RV32) { 1096 stap_mode = SATP32_MODE; 1097 } else { 1098 stap_mode = SATP64_MODE; 1099 } 1100 1101 if (first_stage) { 1102 vm = get_field(env->satp, stap_mode); 1103 } else { 1104 vm = get_field(env->hgatp, stap_mode); 1105 } 1106 1107 page_fault_exceptions = vm != VM_1_10_MBARE && !pmp_violation; 1108 1109 switch (access_type) { 1110 case MMU_INST_FETCH: 1111 if (env->virt_enabled && !first_stage) { 1112 cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT; 1113 } else { 1114 cs->exception_index = page_fault_exceptions ? 1115 RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT; 1116 } 1117 break; 1118 case MMU_DATA_LOAD: 1119 if (two_stage && !first_stage) { 1120 cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT; 1121 } else { 1122 cs->exception_index = page_fault_exceptions ? 1123 RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT; 1124 } 1125 break; 1126 case MMU_DATA_STORE: 1127 if (two_stage && !first_stage) { 1128 cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT; 1129 } else { 1130 cs->exception_index = page_fault_exceptions ? 1131 RISCV_EXCP_STORE_PAGE_FAULT : 1132 RISCV_EXCP_STORE_AMO_ACCESS_FAULT; 1133 } 1134 break; 1135 default: 1136 g_assert_not_reached(); 1137 } 1138 env->badaddr = address; 1139 env->two_stage_lookup = two_stage; 1140 env->two_stage_indirect_lookup = two_stage_indirect; 1141 } 1142 1143 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 1144 { 1145 RISCVCPU *cpu = RISCV_CPU(cs); 1146 CPURISCVState *env = &cpu->env; 1147 hwaddr phys_addr; 1148 int prot; 1149 int mmu_idx = cpu_mmu_index(&cpu->env, false); 1150 1151 if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx, 1152 true, env->virt_enabled, true)) { 1153 return -1; 1154 } 1155 1156 if (env->virt_enabled) { 1157 if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL, 1158 0, mmu_idx, false, true, true)) { 1159 return -1; 1160 } 1161 } 1162 1163 return phys_addr & TARGET_PAGE_MASK; 1164 } 1165 1166 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 1167 vaddr addr, unsigned size, 1168 MMUAccessType access_type, 1169 int mmu_idx, MemTxAttrs attrs, 1170 MemTxResult response, uintptr_t retaddr) 1171 { 1172 RISCVCPU *cpu = RISCV_CPU(cs); 1173 CPURISCVState *env = &cpu->env; 1174 1175 if (access_type == MMU_DATA_STORE) { 1176 cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT; 1177 } else if (access_type == MMU_DATA_LOAD) { 1178 cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT; 1179 } else { 1180 cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT; 1181 } 1182 1183 env->badaddr = addr; 1184 env->two_stage_lookup = mmuidx_2stage(mmu_idx); 1185 env->two_stage_indirect_lookup = false; 1186 cpu_loop_exit_restore(cs, retaddr); 1187 } 1188 1189 void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 1190 MMUAccessType access_type, int mmu_idx, 1191 uintptr_t retaddr) 1192 { 1193 RISCVCPU *cpu = RISCV_CPU(cs); 1194 CPURISCVState *env = &cpu->env; 1195 switch (access_type) { 1196 case MMU_INST_FETCH: 1197 cs->exception_index = RISCV_EXCP_INST_ADDR_MIS; 1198 break; 1199 case MMU_DATA_LOAD: 1200 cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS; 1201 break; 1202 case MMU_DATA_STORE: 1203 cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS; 1204 break; 1205 default: 1206 g_assert_not_reached(); 1207 } 1208 env->badaddr = addr; 1209 env->two_stage_lookup = mmuidx_2stage(mmu_idx); 1210 env->two_stage_indirect_lookup = false; 1211 cpu_loop_exit_restore(cs, retaddr); 1212 } 1213 1214 1215 static void pmu_tlb_fill_incr_ctr(RISCVCPU *cpu, MMUAccessType access_type) 1216 { 1217 enum riscv_pmu_event_idx pmu_event_type; 1218 1219 switch (access_type) { 1220 case MMU_INST_FETCH: 1221 pmu_event_type = RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS; 1222 break; 1223 case MMU_DATA_LOAD: 1224 pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS; 1225 break; 1226 case MMU_DATA_STORE: 1227 pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS; 1228 break; 1229 default: 1230 return; 1231 } 1232 1233 riscv_pmu_incr_ctr(cpu, pmu_event_type); 1234 } 1235 1236 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 1237 MMUAccessType access_type, int mmu_idx, 1238 bool probe, uintptr_t retaddr) 1239 { 1240 RISCVCPU *cpu = RISCV_CPU(cs); 1241 CPURISCVState *env = &cpu->env; 1242 vaddr im_address; 1243 hwaddr pa = 0; 1244 int prot, prot2, prot_pmp; 1245 bool pmp_violation = false; 1246 bool first_stage_error = true; 1247 bool two_stage_lookup = mmuidx_2stage(mmu_idx); 1248 bool two_stage_indirect_error = false; 1249 int ret = TRANSLATE_FAIL; 1250 int mode = mmu_idx; 1251 /* default TLB page size */ 1252 target_ulong tlb_size = TARGET_PAGE_SIZE; 1253 1254 env->guest_phys_fault_addr = 0; 1255 1256 qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n", 1257 __func__, address, access_type, mmu_idx); 1258 1259 pmu_tlb_fill_incr_ctr(cpu, access_type); 1260 if (two_stage_lookup) { 1261 /* Two stage lookup */ 1262 ret = get_physical_address(env, &pa, &prot, address, 1263 &env->guest_phys_fault_addr, access_type, 1264 mmu_idx, true, true, false); 1265 1266 /* 1267 * A G-stage exception may be triggered during two state lookup. 1268 * And the env->guest_phys_fault_addr has already been set in 1269 * get_physical_address(). 1270 */ 1271 if (ret == TRANSLATE_G_STAGE_FAIL) { 1272 first_stage_error = false; 1273 two_stage_indirect_error = true; 1274 access_type = MMU_DATA_LOAD; 1275 } 1276 1277 qemu_log_mask(CPU_LOG_MMU, 1278 "%s 1st-stage address=%" VADDR_PRIx " ret %d physical " 1279 HWADDR_FMT_plx " prot %d\n", 1280 __func__, address, ret, pa, prot); 1281 1282 if (ret == TRANSLATE_SUCCESS) { 1283 /* Second stage lookup */ 1284 im_address = pa; 1285 1286 ret = get_physical_address(env, &pa, &prot2, im_address, NULL, 1287 access_type, MMUIdx_U, false, true, 1288 false); 1289 1290 qemu_log_mask(CPU_LOG_MMU, 1291 "%s 2nd-stage address=%" VADDR_PRIx 1292 " ret %d physical " 1293 HWADDR_FMT_plx " prot %d\n", 1294 __func__, im_address, ret, pa, prot2); 1295 1296 prot &= prot2; 1297 1298 if (ret == TRANSLATE_SUCCESS) { 1299 ret = get_physical_address_pmp(env, &prot_pmp, pa, 1300 size, access_type, mode); 1301 tlb_size = pmp_get_tlb_size(env, pa); 1302 1303 qemu_log_mask(CPU_LOG_MMU, 1304 "%s PMP address=" HWADDR_FMT_plx " ret %d prot" 1305 " %d tlb_size " TARGET_FMT_lu "\n", 1306 __func__, pa, ret, prot_pmp, tlb_size); 1307 1308 prot &= prot_pmp; 1309 } 1310 1311 if (ret != TRANSLATE_SUCCESS) { 1312 /* 1313 * Guest physical address translation failed, this is a HS 1314 * level exception 1315 */ 1316 first_stage_error = false; 1317 env->guest_phys_fault_addr = (im_address | 1318 (address & 1319 (TARGET_PAGE_SIZE - 1))) >> 2; 1320 } 1321 } 1322 } else { 1323 /* Single stage lookup */ 1324 ret = get_physical_address(env, &pa, &prot, address, NULL, 1325 access_type, mmu_idx, true, false, false); 1326 1327 qemu_log_mask(CPU_LOG_MMU, 1328 "%s address=%" VADDR_PRIx " ret %d physical " 1329 HWADDR_FMT_plx " prot %d\n", 1330 __func__, address, ret, pa, prot); 1331 1332 if (ret == TRANSLATE_SUCCESS) { 1333 ret = get_physical_address_pmp(env, &prot_pmp, pa, 1334 size, access_type, mode); 1335 tlb_size = pmp_get_tlb_size(env, pa); 1336 1337 qemu_log_mask(CPU_LOG_MMU, 1338 "%s PMP address=" HWADDR_FMT_plx " ret %d prot" 1339 " %d tlb_size " TARGET_FMT_lu "\n", 1340 __func__, pa, ret, prot_pmp, tlb_size); 1341 1342 prot &= prot_pmp; 1343 } 1344 } 1345 1346 if (ret == TRANSLATE_PMP_FAIL) { 1347 pmp_violation = true; 1348 } 1349 1350 if (ret == TRANSLATE_SUCCESS) { 1351 tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1), 1352 prot, mmu_idx, tlb_size); 1353 return true; 1354 } else if (probe) { 1355 return false; 1356 } else { 1357 raise_mmu_exception(env, address, access_type, pmp_violation, 1358 first_stage_error, two_stage_lookup, 1359 two_stage_indirect_error); 1360 cpu_loop_exit_restore(cs, retaddr); 1361 } 1362 1363 return true; 1364 } 1365 1366 static target_ulong riscv_transformed_insn(CPURISCVState *env, 1367 target_ulong insn, 1368 target_ulong taddr) 1369 { 1370 target_ulong xinsn = 0; 1371 target_ulong access_rs1 = 0, access_imm = 0, access_size = 0; 1372 1373 /* 1374 * Only Quadrant 0 and Quadrant 2 of RVC instruction space need to 1375 * be uncompressed. The Quadrant 1 of RVC instruction space need 1376 * not be transformed because these instructions won't generate 1377 * any load/store trap. 1378 */ 1379 1380 if ((insn & 0x3) != 0x3) { 1381 /* Transform 16bit instruction into 32bit instruction */ 1382 switch (GET_C_OP(insn)) { 1383 case OPC_RISC_C_OP_QUAD0: /* Quadrant 0 */ 1384 switch (GET_C_FUNC(insn)) { 1385 case OPC_RISC_C_FUNC_FLD_LQ: 1386 if (riscv_cpu_xlen(env) != 128) { /* C.FLD (RV32/64) */ 1387 xinsn = OPC_RISC_FLD; 1388 xinsn = SET_RD(xinsn, GET_C_RS2S(insn)); 1389 access_rs1 = GET_C_RS1S(insn); 1390 access_imm = GET_C_LD_IMM(insn); 1391 access_size = 8; 1392 } 1393 break; 1394 case OPC_RISC_C_FUNC_LW: /* C.LW */ 1395 xinsn = OPC_RISC_LW; 1396 xinsn = SET_RD(xinsn, GET_C_RS2S(insn)); 1397 access_rs1 = GET_C_RS1S(insn); 1398 access_imm = GET_C_LW_IMM(insn); 1399 access_size = 4; 1400 break; 1401 case OPC_RISC_C_FUNC_FLW_LD: 1402 if (riscv_cpu_xlen(env) == 32) { /* C.FLW (RV32) */ 1403 xinsn = OPC_RISC_FLW; 1404 xinsn = SET_RD(xinsn, GET_C_RS2S(insn)); 1405 access_rs1 = GET_C_RS1S(insn); 1406 access_imm = GET_C_LW_IMM(insn); 1407 access_size = 4; 1408 } else { /* C.LD (RV64/RV128) */ 1409 xinsn = OPC_RISC_LD; 1410 xinsn = SET_RD(xinsn, GET_C_RS2S(insn)); 1411 access_rs1 = GET_C_RS1S(insn); 1412 access_imm = GET_C_LD_IMM(insn); 1413 access_size = 8; 1414 } 1415 break; 1416 case OPC_RISC_C_FUNC_FSD_SQ: 1417 if (riscv_cpu_xlen(env) != 128) { /* C.FSD (RV32/64) */ 1418 xinsn = OPC_RISC_FSD; 1419 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn)); 1420 access_rs1 = GET_C_RS1S(insn); 1421 access_imm = GET_C_SD_IMM(insn); 1422 access_size = 8; 1423 } 1424 break; 1425 case OPC_RISC_C_FUNC_SW: /* C.SW */ 1426 xinsn = OPC_RISC_SW; 1427 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn)); 1428 access_rs1 = GET_C_RS1S(insn); 1429 access_imm = GET_C_SW_IMM(insn); 1430 access_size = 4; 1431 break; 1432 case OPC_RISC_C_FUNC_FSW_SD: 1433 if (riscv_cpu_xlen(env) == 32) { /* C.FSW (RV32) */ 1434 xinsn = OPC_RISC_FSW; 1435 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn)); 1436 access_rs1 = GET_C_RS1S(insn); 1437 access_imm = GET_C_SW_IMM(insn); 1438 access_size = 4; 1439 } else { /* C.SD (RV64/RV128) */ 1440 xinsn = OPC_RISC_SD; 1441 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn)); 1442 access_rs1 = GET_C_RS1S(insn); 1443 access_imm = GET_C_SD_IMM(insn); 1444 access_size = 8; 1445 } 1446 break; 1447 default: 1448 break; 1449 } 1450 break; 1451 case OPC_RISC_C_OP_QUAD2: /* Quadrant 2 */ 1452 switch (GET_C_FUNC(insn)) { 1453 case OPC_RISC_C_FUNC_FLDSP_LQSP: 1454 if (riscv_cpu_xlen(env) != 128) { /* C.FLDSP (RV32/64) */ 1455 xinsn = OPC_RISC_FLD; 1456 xinsn = SET_RD(xinsn, GET_C_RD(insn)); 1457 access_rs1 = 2; 1458 access_imm = GET_C_LDSP_IMM(insn); 1459 access_size = 8; 1460 } 1461 break; 1462 case OPC_RISC_C_FUNC_LWSP: /* C.LWSP */ 1463 xinsn = OPC_RISC_LW; 1464 xinsn = SET_RD(xinsn, GET_C_RD(insn)); 1465 access_rs1 = 2; 1466 access_imm = GET_C_LWSP_IMM(insn); 1467 access_size = 4; 1468 break; 1469 case OPC_RISC_C_FUNC_FLWSP_LDSP: 1470 if (riscv_cpu_xlen(env) == 32) { /* C.FLWSP (RV32) */ 1471 xinsn = OPC_RISC_FLW; 1472 xinsn = SET_RD(xinsn, GET_C_RD(insn)); 1473 access_rs1 = 2; 1474 access_imm = GET_C_LWSP_IMM(insn); 1475 access_size = 4; 1476 } else { /* C.LDSP (RV64/RV128) */ 1477 xinsn = OPC_RISC_LD; 1478 xinsn = SET_RD(xinsn, GET_C_RD(insn)); 1479 access_rs1 = 2; 1480 access_imm = GET_C_LDSP_IMM(insn); 1481 access_size = 8; 1482 } 1483 break; 1484 case OPC_RISC_C_FUNC_FSDSP_SQSP: 1485 if (riscv_cpu_xlen(env) != 128) { /* C.FSDSP (RV32/64) */ 1486 xinsn = OPC_RISC_FSD; 1487 xinsn = SET_RS2(xinsn, GET_C_RS2(insn)); 1488 access_rs1 = 2; 1489 access_imm = GET_C_SDSP_IMM(insn); 1490 access_size = 8; 1491 } 1492 break; 1493 case OPC_RISC_C_FUNC_SWSP: /* C.SWSP */ 1494 xinsn = OPC_RISC_SW; 1495 xinsn = SET_RS2(xinsn, GET_C_RS2(insn)); 1496 access_rs1 = 2; 1497 access_imm = GET_C_SWSP_IMM(insn); 1498 access_size = 4; 1499 break; 1500 case 7: 1501 if (riscv_cpu_xlen(env) == 32) { /* C.FSWSP (RV32) */ 1502 xinsn = OPC_RISC_FSW; 1503 xinsn = SET_RS2(xinsn, GET_C_RS2(insn)); 1504 access_rs1 = 2; 1505 access_imm = GET_C_SWSP_IMM(insn); 1506 access_size = 4; 1507 } else { /* C.SDSP (RV64/RV128) */ 1508 xinsn = OPC_RISC_SD; 1509 xinsn = SET_RS2(xinsn, GET_C_RS2(insn)); 1510 access_rs1 = 2; 1511 access_imm = GET_C_SDSP_IMM(insn); 1512 access_size = 8; 1513 } 1514 break; 1515 default: 1516 break; 1517 } 1518 break; 1519 default: 1520 break; 1521 } 1522 1523 /* 1524 * Clear Bit1 of transformed instruction to indicate that 1525 * original insruction was a 16bit instruction 1526 */ 1527 xinsn &= ~((target_ulong)0x2); 1528 } else { 1529 /* Transform 32bit (or wider) instructions */ 1530 switch (MASK_OP_MAJOR(insn)) { 1531 case OPC_RISC_ATOMIC: 1532 xinsn = insn; 1533 access_rs1 = GET_RS1(insn); 1534 access_size = 1 << GET_FUNCT3(insn); 1535 break; 1536 case OPC_RISC_LOAD: 1537 case OPC_RISC_FP_LOAD: 1538 xinsn = SET_I_IMM(insn, 0); 1539 access_rs1 = GET_RS1(insn); 1540 access_imm = GET_IMM(insn); 1541 access_size = 1 << GET_FUNCT3(insn); 1542 break; 1543 case OPC_RISC_STORE: 1544 case OPC_RISC_FP_STORE: 1545 xinsn = SET_S_IMM(insn, 0); 1546 access_rs1 = GET_RS1(insn); 1547 access_imm = GET_STORE_IMM(insn); 1548 access_size = 1 << GET_FUNCT3(insn); 1549 break; 1550 case OPC_RISC_SYSTEM: 1551 if (MASK_OP_SYSTEM(insn) == OPC_RISC_HLVHSV) { 1552 xinsn = insn; 1553 access_rs1 = GET_RS1(insn); 1554 access_size = 1 << ((GET_FUNCT7(insn) >> 1) & 0x3); 1555 access_size = 1 << access_size; 1556 } 1557 break; 1558 default: 1559 break; 1560 } 1561 } 1562 1563 if (access_size) { 1564 xinsn = SET_RS1(xinsn, (taddr - (env->gpr[access_rs1] + access_imm)) & 1565 (access_size - 1)); 1566 } 1567 1568 return xinsn; 1569 } 1570 #endif /* !CONFIG_USER_ONLY */ 1571 1572 /* 1573 * Handle Traps 1574 * 1575 * Adapted from Spike's processor_t::take_trap. 1576 * 1577 */ 1578 void riscv_cpu_do_interrupt(CPUState *cs) 1579 { 1580 #if !defined(CONFIG_USER_ONLY) 1581 1582 RISCVCPU *cpu = RISCV_CPU(cs); 1583 CPURISCVState *env = &cpu->env; 1584 bool write_gva = false; 1585 uint64_t s; 1586 1587 /* 1588 * cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide 1589 * so we mask off the MSB and separate into trap type and cause. 1590 */ 1591 bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG); 1592 target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK; 1593 uint64_t deleg = async ? env->mideleg : env->medeleg; 1594 target_ulong tval = 0; 1595 target_ulong tinst = 0; 1596 target_ulong htval = 0; 1597 target_ulong mtval2 = 0; 1598 1599 if (cause == RISCV_EXCP_SEMIHOST) { 1600 do_common_semihosting(cs); 1601 env->pc += 4; 1602 return; 1603 } 1604 1605 if (!async) { 1606 /* set tval to badaddr for traps with address information */ 1607 switch (cause) { 1608 case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT: 1609 case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT: 1610 case RISCV_EXCP_LOAD_ADDR_MIS: 1611 case RISCV_EXCP_STORE_AMO_ADDR_MIS: 1612 case RISCV_EXCP_LOAD_ACCESS_FAULT: 1613 case RISCV_EXCP_STORE_AMO_ACCESS_FAULT: 1614 case RISCV_EXCP_LOAD_PAGE_FAULT: 1615 case RISCV_EXCP_STORE_PAGE_FAULT: 1616 write_gva = env->two_stage_lookup; 1617 tval = env->badaddr; 1618 if (env->two_stage_indirect_lookup) { 1619 /* 1620 * special pseudoinstruction for G-stage fault taken while 1621 * doing VS-stage page table walk. 1622 */ 1623 tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000; 1624 } else { 1625 /* 1626 * The "Addr. Offset" field in transformed instruction is 1627 * non-zero only for misaligned access. 1628 */ 1629 tinst = riscv_transformed_insn(env, env->bins, tval); 1630 } 1631 break; 1632 case RISCV_EXCP_INST_GUEST_PAGE_FAULT: 1633 case RISCV_EXCP_INST_ADDR_MIS: 1634 case RISCV_EXCP_INST_ACCESS_FAULT: 1635 case RISCV_EXCP_INST_PAGE_FAULT: 1636 write_gva = env->two_stage_lookup; 1637 tval = env->badaddr; 1638 if (env->two_stage_indirect_lookup) { 1639 /* 1640 * special pseudoinstruction for G-stage fault taken while 1641 * doing VS-stage page table walk. 1642 */ 1643 tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000; 1644 } 1645 break; 1646 case RISCV_EXCP_ILLEGAL_INST: 1647 case RISCV_EXCP_VIRT_INSTRUCTION_FAULT: 1648 tval = env->bins; 1649 break; 1650 case RISCV_EXCP_BREAKPOINT: 1651 if (cs->watchpoint_hit) { 1652 tval = cs->watchpoint_hit->hitaddr; 1653 cs->watchpoint_hit = NULL; 1654 } 1655 break; 1656 default: 1657 break; 1658 } 1659 /* ecall is dispatched as one cause so translate based on mode */ 1660 if (cause == RISCV_EXCP_U_ECALL) { 1661 assert(env->priv <= 3); 1662 1663 if (env->priv == PRV_M) { 1664 cause = RISCV_EXCP_M_ECALL; 1665 } else if (env->priv == PRV_S && env->virt_enabled) { 1666 cause = RISCV_EXCP_VS_ECALL; 1667 } else if (env->priv == PRV_S && !env->virt_enabled) { 1668 cause = RISCV_EXCP_S_ECALL; 1669 } else if (env->priv == PRV_U) { 1670 cause = RISCV_EXCP_U_ECALL; 1671 } 1672 } 1673 } 1674 1675 trace_riscv_trap(env->mhartid, async, cause, env->pc, tval, 1676 riscv_cpu_get_trap_name(cause, async)); 1677 1678 qemu_log_mask(CPU_LOG_INT, 1679 "%s: hart:"TARGET_FMT_ld", async:%d, cause:"TARGET_FMT_lx", " 1680 "epc:0x"TARGET_FMT_lx", tval:0x"TARGET_FMT_lx", desc=%s\n", 1681 __func__, env->mhartid, async, cause, env->pc, tval, 1682 riscv_cpu_get_trap_name(cause, async)); 1683 1684 if (env->priv <= PRV_S && 1685 cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) { 1686 /* handle the trap in S-mode */ 1687 if (riscv_has_ext(env, RVH)) { 1688 uint64_t hdeleg = async ? env->hideleg : env->hedeleg; 1689 1690 if (env->virt_enabled && ((hdeleg >> cause) & 1)) { 1691 /* Trap to VS mode */ 1692 /* 1693 * See if we need to adjust cause. Yes if its VS mode interrupt 1694 * no if hypervisor has delegated one of hs mode's interrupt 1695 */ 1696 if (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT || 1697 cause == IRQ_VS_EXT) { 1698 cause = cause - 1; 1699 } 1700 write_gva = false; 1701 } else if (env->virt_enabled) { 1702 /* Trap into HS mode, from virt */ 1703 riscv_cpu_swap_hypervisor_regs(env); 1704 env->hstatus = set_field(env->hstatus, HSTATUS_SPVP, 1705 env->priv); 1706 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, true); 1707 1708 htval = env->guest_phys_fault_addr; 1709 1710 riscv_cpu_set_virt_enabled(env, 0); 1711 } else { 1712 /* Trap into HS mode */ 1713 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false); 1714 htval = env->guest_phys_fault_addr; 1715 } 1716 env->hstatus = set_field(env->hstatus, HSTATUS_GVA, write_gva); 1717 } 1718 1719 s = env->mstatus; 1720 s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE)); 1721 s = set_field(s, MSTATUS_SPP, env->priv); 1722 s = set_field(s, MSTATUS_SIE, 0); 1723 env->mstatus = s; 1724 env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1)); 1725 env->sepc = env->pc; 1726 env->stval = tval; 1727 env->htval = htval; 1728 env->htinst = tinst; 1729 env->pc = (env->stvec >> 2 << 2) + 1730 ((async && (env->stvec & 3) == 1) ? cause * 4 : 0); 1731 riscv_cpu_set_mode(env, PRV_S); 1732 } else { 1733 /* handle the trap in M-mode */ 1734 if (riscv_has_ext(env, RVH)) { 1735 if (env->virt_enabled) { 1736 riscv_cpu_swap_hypervisor_regs(env); 1737 } 1738 env->mstatus = set_field(env->mstatus, MSTATUS_MPV, 1739 env->virt_enabled); 1740 if (env->virt_enabled && tval) { 1741 env->mstatus = set_field(env->mstatus, MSTATUS_GVA, 1); 1742 } 1743 1744 mtval2 = env->guest_phys_fault_addr; 1745 1746 /* Trapping to M mode, virt is disabled */ 1747 riscv_cpu_set_virt_enabled(env, 0); 1748 } 1749 1750 s = env->mstatus; 1751 s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE)); 1752 s = set_field(s, MSTATUS_MPP, env->priv); 1753 s = set_field(s, MSTATUS_MIE, 0); 1754 env->mstatus = s; 1755 env->mcause = cause | ~(((target_ulong)-1) >> async); 1756 env->mepc = env->pc; 1757 env->mtval = tval; 1758 env->mtval2 = mtval2; 1759 env->mtinst = tinst; 1760 env->pc = (env->mtvec >> 2 << 2) + 1761 ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0); 1762 riscv_cpu_set_mode(env, PRV_M); 1763 } 1764 1765 /* 1766 * NOTE: it is not necessary to yield load reservations here. It is only 1767 * necessary for an SC from "another hart" to cause a load reservation 1768 * to be yielded. Refer to the memory consistency model section of the 1769 * RISC-V ISA Specification. 1770 */ 1771 1772 env->two_stage_lookup = false; 1773 env->two_stage_indirect_lookup = false; 1774 #endif 1775 cs->exception_index = RISCV_EXCP_NONE; /* mark handled to qemu */ 1776 } 1777