1 /* 2 * RISC-V CPU helpers for qemu. 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/log.h" 22 #include "qemu/main-loop.h" 23 #include "cpu.h" 24 #include "internals.h" 25 #include "pmu.h" 26 #include "exec/exec-all.h" 27 #include "instmap.h" 28 #include "tcg/tcg-op.h" 29 #include "trace.h" 30 #include "semihosting/common-semi.h" 31 #include "sysemu/cpu-timers.h" 32 #include "cpu_bits.h" 33 #include "debug.h" 34 35 int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch) 36 { 37 #ifdef CONFIG_USER_ONLY 38 return 0; 39 #else 40 if (ifetch) { 41 return env->priv; 42 } 43 44 /* All priv -> mmu_idx mapping are here */ 45 int mode = env->priv; 46 if (mode == PRV_M && get_field(env->mstatus, MSTATUS_MPRV)) { 47 mode = get_field(env->mstatus, MSTATUS_MPP); 48 } 49 if (mode == PRV_S && get_field(env->mstatus, MSTATUS_SUM)) { 50 return MMUIdx_S_SUM; 51 } 52 return mode; 53 #endif 54 } 55 56 void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc, 57 target_ulong *cs_base, uint32_t *pflags) 58 { 59 CPUState *cs = env_cpu(env); 60 RISCVCPU *cpu = RISCV_CPU(cs); 61 RISCVExtStatus fs, vs; 62 uint32_t flags = 0; 63 64 *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc; 65 *cs_base = 0; 66 67 if (cpu->cfg.ext_zve32f) { 68 /* 69 * If env->vl equals to VLMAX, we can use generic vector operation 70 * expanders (GVEC) to accerlate the vector operations. 71 * However, as LMUL could be a fractional number. The maximum 72 * vector size can be operated might be less than 8 bytes, 73 * which is not supported by GVEC. So we set vl_eq_vlmax flag to true 74 * only when maxsz >= 8 bytes. 75 */ 76 uint32_t vlmax = vext_get_vlmax(cpu, env->vtype); 77 uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW); 78 uint32_t maxsz = vlmax << sew; 79 bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) && 80 (maxsz >= 8); 81 flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill); 82 flags = FIELD_DP32(flags, TB_FLAGS, SEW, sew); 83 flags = FIELD_DP32(flags, TB_FLAGS, LMUL, 84 FIELD_EX64(env->vtype, VTYPE, VLMUL)); 85 flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax); 86 flags = FIELD_DP32(flags, TB_FLAGS, VTA, 87 FIELD_EX64(env->vtype, VTYPE, VTA)); 88 flags = FIELD_DP32(flags, TB_FLAGS, VMA, 89 FIELD_EX64(env->vtype, VTYPE, VMA)); 90 flags = FIELD_DP32(flags, TB_FLAGS, VSTART_EQ_ZERO, env->vstart == 0); 91 } else { 92 flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1); 93 } 94 95 #ifdef CONFIG_USER_ONLY 96 fs = EXT_STATUS_DIRTY; 97 vs = EXT_STATUS_DIRTY; 98 #else 99 flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv); 100 101 flags |= cpu_mmu_index(env, 0); 102 fs = get_field(env->mstatus, MSTATUS_FS); 103 vs = get_field(env->mstatus, MSTATUS_VS); 104 105 if (env->virt_enabled) { 106 flags = FIELD_DP32(flags, TB_FLAGS, VIRT_ENABLED, 1); 107 /* 108 * Merge DISABLED and !DIRTY states using MIN. 109 * We will set both fields when dirtying. 110 */ 111 fs = MIN(fs, get_field(env->mstatus_hs, MSTATUS_FS)); 112 vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS)); 113 } 114 115 if (cpu->cfg.debug && !icount_enabled()) { 116 flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled); 117 } 118 #endif 119 120 flags = FIELD_DP32(flags, TB_FLAGS, FS, fs); 121 flags = FIELD_DP32(flags, TB_FLAGS, VS, vs); 122 flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl); 123 if (env->cur_pmmask < (env->xl == MXL_RV32 ? UINT32_MAX : UINT64_MAX)) { 124 flags = FIELD_DP32(flags, TB_FLAGS, PM_MASK_ENABLED, 1); 125 } 126 if (env->cur_pmbase != 0) { 127 flags = FIELD_DP32(flags, TB_FLAGS, PM_BASE_ENABLED, 1); 128 } 129 130 *pflags = flags; 131 } 132 133 void riscv_cpu_update_mask(CPURISCVState *env) 134 { 135 target_ulong mask = -1, base = 0; 136 /* 137 * TODO: Current RVJ spec does not specify 138 * how the extension interacts with XLEN. 139 */ 140 #ifndef CONFIG_USER_ONLY 141 if (riscv_has_ext(env, RVJ)) { 142 switch (env->priv) { 143 case PRV_M: 144 if (env->mmte & M_PM_ENABLE) { 145 mask = env->mpmmask; 146 base = env->mpmbase; 147 } 148 break; 149 case PRV_S: 150 if (env->mmte & S_PM_ENABLE) { 151 mask = env->spmmask; 152 base = env->spmbase; 153 } 154 break; 155 case PRV_U: 156 if (env->mmte & U_PM_ENABLE) { 157 mask = env->upmmask; 158 base = env->upmbase; 159 } 160 break; 161 default: 162 g_assert_not_reached(); 163 } 164 } 165 #endif 166 if (env->xl == MXL_RV32) { 167 env->cur_pmmask = mask & UINT32_MAX; 168 env->cur_pmbase = base & UINT32_MAX; 169 } else { 170 env->cur_pmmask = mask; 171 env->cur_pmbase = base; 172 } 173 } 174 175 #ifndef CONFIG_USER_ONLY 176 177 /* 178 * The HS-mode is allowed to configure priority only for the 179 * following VS-mode local interrupts: 180 * 181 * 0 (Reserved interrupt, reads as zero) 182 * 1 Supervisor software interrupt 183 * 4 (Reserved interrupt, reads as zero) 184 * 5 Supervisor timer interrupt 185 * 8 (Reserved interrupt, reads as zero) 186 * 13 (Reserved interrupt) 187 * 14 " 188 * 15 " 189 * 16 " 190 * 17 " 191 * 18 " 192 * 19 " 193 * 20 " 194 * 21 " 195 * 22 " 196 * 23 " 197 */ 198 199 static const int hviprio_index2irq[] = { 200 0, 1, 4, 5, 8, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 }; 201 static const int hviprio_index2rdzero[] = { 202 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 203 204 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero) 205 { 206 if (index < 0 || ARRAY_SIZE(hviprio_index2irq) <= index) { 207 return -EINVAL; 208 } 209 210 if (out_irq) { 211 *out_irq = hviprio_index2irq[index]; 212 } 213 214 if (out_rdzero) { 215 *out_rdzero = hviprio_index2rdzero[index]; 216 } 217 218 return 0; 219 } 220 221 /* 222 * Default priorities of local interrupts are defined in the 223 * RISC-V Advanced Interrupt Architecture specification. 224 * 225 * ---------------------------------------------------------------- 226 * Default | 227 * Priority | Major Interrupt Numbers 228 * ---------------------------------------------------------------- 229 * Highest | 47, 23, 46, 45, 22, 44, 230 * | 43, 21, 42, 41, 20, 40 231 * | 232 * | 11 (0b), 3 (03), 7 (07) 233 * | 9 (09), 1 (01), 5 (05) 234 * | 12 (0c) 235 * | 10 (0a), 2 (02), 6 (06) 236 * | 237 * | 39, 19, 38, 37, 18, 36, 238 * Lowest | 35, 17, 34, 33, 16, 32 239 * ---------------------------------------------------------------- 240 */ 241 static const uint8_t default_iprio[64] = { 242 /* Custom interrupts 48 to 63 */ 243 [63] = IPRIO_MMAXIPRIO, 244 [62] = IPRIO_MMAXIPRIO, 245 [61] = IPRIO_MMAXIPRIO, 246 [60] = IPRIO_MMAXIPRIO, 247 [59] = IPRIO_MMAXIPRIO, 248 [58] = IPRIO_MMAXIPRIO, 249 [57] = IPRIO_MMAXIPRIO, 250 [56] = IPRIO_MMAXIPRIO, 251 [55] = IPRIO_MMAXIPRIO, 252 [54] = IPRIO_MMAXIPRIO, 253 [53] = IPRIO_MMAXIPRIO, 254 [52] = IPRIO_MMAXIPRIO, 255 [51] = IPRIO_MMAXIPRIO, 256 [50] = IPRIO_MMAXIPRIO, 257 [49] = IPRIO_MMAXIPRIO, 258 [48] = IPRIO_MMAXIPRIO, 259 260 /* Custom interrupts 24 to 31 */ 261 [31] = IPRIO_MMAXIPRIO, 262 [30] = IPRIO_MMAXIPRIO, 263 [29] = IPRIO_MMAXIPRIO, 264 [28] = IPRIO_MMAXIPRIO, 265 [27] = IPRIO_MMAXIPRIO, 266 [26] = IPRIO_MMAXIPRIO, 267 [25] = IPRIO_MMAXIPRIO, 268 [24] = IPRIO_MMAXIPRIO, 269 270 [47] = IPRIO_DEFAULT_UPPER, 271 [23] = IPRIO_DEFAULT_UPPER + 1, 272 [46] = IPRIO_DEFAULT_UPPER + 2, 273 [45] = IPRIO_DEFAULT_UPPER + 3, 274 [22] = IPRIO_DEFAULT_UPPER + 4, 275 [44] = IPRIO_DEFAULT_UPPER + 5, 276 277 [43] = IPRIO_DEFAULT_UPPER + 6, 278 [21] = IPRIO_DEFAULT_UPPER + 7, 279 [42] = IPRIO_DEFAULT_UPPER + 8, 280 [41] = IPRIO_DEFAULT_UPPER + 9, 281 [20] = IPRIO_DEFAULT_UPPER + 10, 282 [40] = IPRIO_DEFAULT_UPPER + 11, 283 284 [11] = IPRIO_DEFAULT_M, 285 [3] = IPRIO_DEFAULT_M + 1, 286 [7] = IPRIO_DEFAULT_M + 2, 287 288 [9] = IPRIO_DEFAULT_S, 289 [1] = IPRIO_DEFAULT_S + 1, 290 [5] = IPRIO_DEFAULT_S + 2, 291 292 [12] = IPRIO_DEFAULT_SGEXT, 293 294 [10] = IPRIO_DEFAULT_VS, 295 [2] = IPRIO_DEFAULT_VS + 1, 296 [6] = IPRIO_DEFAULT_VS + 2, 297 298 [39] = IPRIO_DEFAULT_LOWER, 299 [19] = IPRIO_DEFAULT_LOWER + 1, 300 [38] = IPRIO_DEFAULT_LOWER + 2, 301 [37] = IPRIO_DEFAULT_LOWER + 3, 302 [18] = IPRIO_DEFAULT_LOWER + 4, 303 [36] = IPRIO_DEFAULT_LOWER + 5, 304 305 [35] = IPRIO_DEFAULT_LOWER + 6, 306 [17] = IPRIO_DEFAULT_LOWER + 7, 307 [34] = IPRIO_DEFAULT_LOWER + 8, 308 [33] = IPRIO_DEFAULT_LOWER + 9, 309 [16] = IPRIO_DEFAULT_LOWER + 10, 310 [32] = IPRIO_DEFAULT_LOWER + 11, 311 }; 312 313 uint8_t riscv_cpu_default_priority(int irq) 314 { 315 if (irq < 0 || irq > 63) { 316 return IPRIO_MMAXIPRIO; 317 } 318 319 return default_iprio[irq] ? default_iprio[irq] : IPRIO_MMAXIPRIO; 320 }; 321 322 static int riscv_cpu_pending_to_irq(CPURISCVState *env, 323 int extirq, unsigned int extirq_def_prio, 324 uint64_t pending, uint8_t *iprio) 325 { 326 int irq, best_irq = RISCV_EXCP_NONE; 327 unsigned int prio, best_prio = UINT_MAX; 328 329 if (!pending) { 330 return RISCV_EXCP_NONE; 331 } 332 333 irq = ctz64(pending); 334 if (!((extirq == IRQ_M_EXT) ? riscv_cpu_cfg(env)->ext_smaia : 335 riscv_cpu_cfg(env)->ext_ssaia)) { 336 return irq; 337 } 338 339 pending = pending >> irq; 340 while (pending) { 341 prio = iprio[irq]; 342 if (!prio) { 343 if (irq == extirq) { 344 prio = extirq_def_prio; 345 } else { 346 prio = (riscv_cpu_default_priority(irq) < extirq_def_prio) ? 347 1 : IPRIO_MMAXIPRIO; 348 } 349 } 350 if ((pending & 0x1) && (prio <= best_prio)) { 351 best_irq = irq; 352 best_prio = prio; 353 } 354 irq++; 355 pending = pending >> 1; 356 } 357 358 return best_irq; 359 } 360 361 uint64_t riscv_cpu_all_pending(CPURISCVState *env) 362 { 363 uint32_t gein = get_field(env->hstatus, HSTATUS_VGEIN); 364 uint64_t vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0; 365 uint64_t vstip = (env->vstime_irq) ? MIP_VSTIP : 0; 366 367 return (env->mip | vsgein | vstip) & env->mie; 368 } 369 370 int riscv_cpu_mirq_pending(CPURISCVState *env) 371 { 372 uint64_t irqs = riscv_cpu_all_pending(env) & ~env->mideleg & 373 ~(MIP_SGEIP | MIP_VSSIP | MIP_VSTIP | MIP_VSEIP); 374 375 return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M, 376 irqs, env->miprio); 377 } 378 379 int riscv_cpu_sirq_pending(CPURISCVState *env) 380 { 381 uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg & 382 ~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP); 383 384 return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S, 385 irqs, env->siprio); 386 } 387 388 int riscv_cpu_vsirq_pending(CPURISCVState *env) 389 { 390 uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg & 391 (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP); 392 393 return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S, 394 irqs >> 1, env->hviprio); 395 } 396 397 static int riscv_cpu_local_irq_pending(CPURISCVState *env) 398 { 399 int virq; 400 uint64_t irqs, pending, mie, hsie, vsie; 401 402 /* Determine interrupt enable state of all privilege modes */ 403 if (env->virt_enabled) { 404 mie = 1; 405 hsie = 1; 406 vsie = (env->priv < PRV_S) || 407 (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE)); 408 } else { 409 mie = (env->priv < PRV_M) || 410 (env->priv == PRV_M && get_field(env->mstatus, MSTATUS_MIE)); 411 hsie = (env->priv < PRV_S) || 412 (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE)); 413 vsie = 0; 414 } 415 416 /* Determine all pending interrupts */ 417 pending = riscv_cpu_all_pending(env); 418 419 /* Check M-mode interrupts */ 420 irqs = pending & ~env->mideleg & -mie; 421 if (irqs) { 422 return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M, 423 irqs, env->miprio); 424 } 425 426 /* Check HS-mode interrupts */ 427 irqs = pending & env->mideleg & ~env->hideleg & -hsie; 428 if (irqs) { 429 return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S, 430 irqs, env->siprio); 431 } 432 433 /* Check VS-mode interrupts */ 434 irqs = pending & env->mideleg & env->hideleg & -vsie; 435 if (irqs) { 436 virq = riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S, 437 irqs >> 1, env->hviprio); 438 return (virq <= 0) ? virq : virq + 1; 439 } 440 441 /* Indicate no pending interrupt */ 442 return RISCV_EXCP_NONE; 443 } 444 445 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 446 { 447 if (interrupt_request & CPU_INTERRUPT_HARD) { 448 RISCVCPU *cpu = RISCV_CPU(cs); 449 CPURISCVState *env = &cpu->env; 450 int interruptno = riscv_cpu_local_irq_pending(env); 451 if (interruptno >= 0) { 452 cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno; 453 riscv_cpu_do_interrupt(cs); 454 return true; 455 } 456 } 457 return false; 458 } 459 460 /* Return true is floating point support is currently enabled */ 461 bool riscv_cpu_fp_enabled(CPURISCVState *env) 462 { 463 if (env->mstatus & MSTATUS_FS) { 464 if (env->virt_enabled && !(env->mstatus_hs & MSTATUS_FS)) { 465 return false; 466 } 467 return true; 468 } 469 470 return false; 471 } 472 473 /* Return true is vector support is currently enabled */ 474 bool riscv_cpu_vector_enabled(CPURISCVState *env) 475 { 476 if (env->mstatus & MSTATUS_VS) { 477 if (env->virt_enabled && !(env->mstatus_hs & MSTATUS_VS)) { 478 return false; 479 } 480 return true; 481 } 482 483 return false; 484 } 485 486 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env) 487 { 488 uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM | 489 MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE | 490 MSTATUS64_UXL | MSTATUS_VS; 491 492 if (riscv_has_ext(env, RVF)) { 493 mstatus_mask |= MSTATUS_FS; 494 } 495 bool current_virt = env->virt_enabled; 496 497 g_assert(riscv_has_ext(env, RVH)); 498 499 if (current_virt) { 500 /* Current V=1 and we are about to change to V=0 */ 501 env->vsstatus = env->mstatus & mstatus_mask; 502 env->mstatus &= ~mstatus_mask; 503 env->mstatus |= env->mstatus_hs; 504 505 env->vstvec = env->stvec; 506 env->stvec = env->stvec_hs; 507 508 env->vsscratch = env->sscratch; 509 env->sscratch = env->sscratch_hs; 510 511 env->vsepc = env->sepc; 512 env->sepc = env->sepc_hs; 513 514 env->vscause = env->scause; 515 env->scause = env->scause_hs; 516 517 env->vstval = env->stval; 518 env->stval = env->stval_hs; 519 520 env->vsatp = env->satp; 521 env->satp = env->satp_hs; 522 } else { 523 /* Current V=0 and we are about to change to V=1 */ 524 env->mstatus_hs = env->mstatus & mstatus_mask; 525 env->mstatus &= ~mstatus_mask; 526 env->mstatus |= env->vsstatus; 527 528 env->stvec_hs = env->stvec; 529 env->stvec = env->vstvec; 530 531 env->sscratch_hs = env->sscratch; 532 env->sscratch = env->vsscratch; 533 534 env->sepc_hs = env->sepc; 535 env->sepc = env->vsepc; 536 537 env->scause_hs = env->scause; 538 env->scause = env->vscause; 539 540 env->stval_hs = env->stval; 541 env->stval = env->vstval; 542 543 env->satp_hs = env->satp; 544 env->satp = env->vsatp; 545 } 546 } 547 548 target_ulong riscv_cpu_get_geilen(CPURISCVState *env) 549 { 550 if (!riscv_has_ext(env, RVH)) { 551 return 0; 552 } 553 554 return env->geilen; 555 } 556 557 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen) 558 { 559 if (!riscv_has_ext(env, RVH)) { 560 return; 561 } 562 563 if (geilen > (TARGET_LONG_BITS - 1)) { 564 return; 565 } 566 567 env->geilen = geilen; 568 } 569 570 /* This function can only be called to set virt when RVH is enabled */ 571 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable) 572 { 573 /* Flush the TLB on all virt mode changes. */ 574 if (env->virt_enabled != enable) { 575 tlb_flush(env_cpu(env)); 576 } 577 578 env->virt_enabled = enable; 579 580 if (enable) { 581 /* 582 * The guest external interrupts from an interrupt controller are 583 * delivered only when the Guest/VM is running (i.e. V=1). This means 584 * any guest external interrupt which is triggered while the Guest/VM 585 * is not running (i.e. V=0) will be missed on QEMU resulting in guest 586 * with sluggish response to serial console input and other I/O events. 587 * 588 * To solve this, we check and inject interrupt after setting V=1. 589 */ 590 riscv_cpu_update_mip(env, 0, 0); 591 } 592 } 593 594 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts) 595 { 596 CPURISCVState *env = &cpu->env; 597 if (env->miclaim & interrupts) { 598 return -1; 599 } else { 600 env->miclaim |= interrupts; 601 return 0; 602 } 603 } 604 605 uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask, 606 uint64_t value) 607 { 608 CPUState *cs = env_cpu(env); 609 uint64_t gein, vsgein = 0, vstip = 0, old = env->mip; 610 611 if (env->virt_enabled) { 612 gein = get_field(env->hstatus, HSTATUS_VGEIN); 613 vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0; 614 } 615 616 vstip = env->vstime_irq ? MIP_VSTIP : 0; 617 618 QEMU_IOTHREAD_LOCK_GUARD(); 619 620 env->mip = (env->mip & ~mask) | (value & mask); 621 622 if (env->mip | vsgein | vstip) { 623 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 624 } else { 625 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); 626 } 627 628 return old; 629 } 630 631 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *), 632 void *arg) 633 { 634 env->rdtime_fn = fn; 635 env->rdtime_fn_arg = arg; 636 } 637 638 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv, 639 int (*rmw_fn)(void *arg, 640 target_ulong reg, 641 target_ulong *val, 642 target_ulong new_val, 643 target_ulong write_mask), 644 void *rmw_fn_arg) 645 { 646 if (priv <= PRV_M) { 647 env->aia_ireg_rmw_fn[priv] = rmw_fn; 648 env->aia_ireg_rmw_fn_arg[priv] = rmw_fn_arg; 649 } 650 } 651 652 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv) 653 { 654 g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED); 655 656 if (icount_enabled() && newpriv != env->priv) { 657 riscv_itrigger_update_priv(env); 658 } 659 /* tlb_flush is unnecessary as mode is contained in mmu_idx */ 660 env->priv = newpriv; 661 env->xl = cpu_recompute_xl(env); 662 riscv_cpu_update_mask(env); 663 664 /* 665 * Clear the load reservation - otherwise a reservation placed in one 666 * context/process can be used by another, resulting in an SC succeeding 667 * incorrectly. Version 2.2 of the ISA specification explicitly requires 668 * this behaviour, while later revisions say that the kernel "should" use 669 * an SC instruction to force the yielding of a load reservation on a 670 * preemptive context switch. As a result, do both. 671 */ 672 env->load_res = -1; 673 } 674 675 /* 676 * get_physical_address_pmp - check PMP permission for this physical address 677 * 678 * Match the PMP region and check permission for this physical address and it's 679 * TLB page. Returns 0 if the permission checking was successful 680 * 681 * @env: CPURISCVState 682 * @prot: The returned protection attributes 683 * @tlb_size: TLB page size containing addr. It could be modified after PMP 684 * permission checking. NULL if not set TLB page for addr. 685 * @addr: The physical address to be checked permission 686 * @access_type: The type of MMU access 687 * @mode: Indicates current privilege level. 688 */ 689 static int get_physical_address_pmp(CPURISCVState *env, int *prot, 690 target_ulong *tlb_size, hwaddr addr, 691 int size, MMUAccessType access_type, 692 int mode) 693 { 694 pmp_priv_t pmp_priv; 695 int pmp_index = -1; 696 697 if (!riscv_cpu_cfg(env)->pmp) { 698 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 699 return TRANSLATE_SUCCESS; 700 } 701 702 pmp_index = pmp_hart_has_privs(env, addr, size, 1 << access_type, 703 &pmp_priv, mode); 704 if (pmp_index < 0) { 705 *prot = 0; 706 return TRANSLATE_PMP_FAIL; 707 } 708 709 *prot = pmp_priv_to_page_prot(pmp_priv); 710 if ((tlb_size != NULL) && pmp_index != MAX_RISCV_PMPS) { 711 target_ulong tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1); 712 target_ulong tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1; 713 714 *tlb_size = pmp_get_tlb_size(env, pmp_index, tlb_sa, tlb_ea); 715 } 716 717 return TRANSLATE_SUCCESS; 718 } 719 720 /* 721 * get_physical_address - get the physical address for this virtual address 722 * 723 * Do a page table walk to obtain the physical address corresponding to a 724 * virtual address. Returns 0 if the translation was successful 725 * 726 * Adapted from Spike's mmu_t::translate and mmu_t::walk 727 * 728 * @env: CPURISCVState 729 * @physical: This will be set to the calculated physical address 730 * @prot: The returned protection attributes 731 * @addr: The virtual address or guest physical address to be translated 732 * @fault_pte_addr: If not NULL, this will be set to fault pte address 733 * when a error occurs on pte address translation. 734 * This will already be shifted to match htval. 735 * @access_type: The type of MMU access 736 * @mmu_idx: Indicates current privilege level 737 * @first_stage: Are we in first stage translation? 738 * Second stage is used for hypervisor guest translation 739 * @two_stage: Are we going to perform two stage translation 740 * @is_debug: Is this access from a debugger or the monitor? 741 */ 742 static int get_physical_address(CPURISCVState *env, hwaddr *physical, 743 int *prot, vaddr addr, 744 target_ulong *fault_pte_addr, 745 int access_type, int mmu_idx, 746 bool first_stage, bool two_stage, 747 bool is_debug) 748 { 749 /* 750 * NOTE: the env->pc value visible here will not be 751 * correct, but the value visible to the exception handler 752 * (riscv_cpu_do_interrupt) is correct 753 */ 754 MemTxResult res; 755 MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; 756 int mode = mmuidx_priv(mmu_idx); 757 bool use_background = false; 758 hwaddr ppn; 759 int napot_bits = 0; 760 target_ulong napot_mask; 761 762 /* 763 * Check if we should use the background registers for the two 764 * stage translation. We don't need to check if we actually need 765 * two stage translation as that happened before this function 766 * was called. Background registers will be used if the guest has 767 * forced a two stage translation to be on (in HS or M mode). 768 */ 769 if (!env->virt_enabled && two_stage) { 770 use_background = true; 771 } 772 773 /* 774 * MPRV does not affect the virtual-machine load/store 775 * instructions, HLV, HLVX, and HSV. 776 */ 777 if (mmuidx_2stage(mmu_idx)) { 778 mode = get_field(env->hstatus, HSTATUS_SPVP); 779 } 780 781 if (first_stage == false) { 782 /* 783 * We are in stage 2 translation, this is similar to stage 1. 784 * Stage 2 is always taken as U-mode 785 */ 786 mode = PRV_U; 787 } 788 789 if (mode == PRV_M || !riscv_cpu_cfg(env)->mmu) { 790 *physical = addr; 791 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 792 return TRANSLATE_SUCCESS; 793 } 794 795 *prot = 0; 796 797 hwaddr base; 798 int levels, ptidxbits, ptesize, vm, sum, mxr, widened; 799 800 if (first_stage == true) { 801 mxr = get_field(env->mstatus, MSTATUS_MXR); 802 } else { 803 mxr = get_field(env->vsstatus, MSTATUS_MXR); 804 } 805 806 if (first_stage == true) { 807 if (use_background) { 808 if (riscv_cpu_mxl(env) == MXL_RV32) { 809 base = (hwaddr)get_field(env->vsatp, SATP32_PPN) << PGSHIFT; 810 vm = get_field(env->vsatp, SATP32_MODE); 811 } else { 812 base = (hwaddr)get_field(env->vsatp, SATP64_PPN) << PGSHIFT; 813 vm = get_field(env->vsatp, SATP64_MODE); 814 } 815 } else { 816 if (riscv_cpu_mxl(env) == MXL_RV32) { 817 base = (hwaddr)get_field(env->satp, SATP32_PPN) << PGSHIFT; 818 vm = get_field(env->satp, SATP32_MODE); 819 } else { 820 base = (hwaddr)get_field(env->satp, SATP64_PPN) << PGSHIFT; 821 vm = get_field(env->satp, SATP64_MODE); 822 } 823 } 824 widened = 0; 825 } else { 826 if (riscv_cpu_mxl(env) == MXL_RV32) { 827 base = (hwaddr)get_field(env->hgatp, SATP32_PPN) << PGSHIFT; 828 vm = get_field(env->hgatp, SATP32_MODE); 829 } else { 830 base = (hwaddr)get_field(env->hgatp, SATP64_PPN) << PGSHIFT; 831 vm = get_field(env->hgatp, SATP64_MODE); 832 } 833 widened = 2; 834 } 835 /* status.SUM will be ignored if execute on background */ 836 sum = mmuidx_sum(mmu_idx) || use_background || is_debug; 837 switch (vm) { 838 case VM_1_10_SV32: 839 levels = 2; ptidxbits = 10; ptesize = 4; break; 840 case VM_1_10_SV39: 841 levels = 3; ptidxbits = 9; ptesize = 8; break; 842 case VM_1_10_SV48: 843 levels = 4; ptidxbits = 9; ptesize = 8; break; 844 case VM_1_10_SV57: 845 levels = 5; ptidxbits = 9; ptesize = 8; break; 846 case VM_1_10_MBARE: 847 *physical = addr; 848 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 849 return TRANSLATE_SUCCESS; 850 default: 851 g_assert_not_reached(); 852 } 853 854 CPUState *cs = env_cpu(env); 855 int va_bits = PGSHIFT + levels * ptidxbits + widened; 856 target_ulong mask, masked_msbs; 857 858 if (TARGET_LONG_BITS > (va_bits - 1)) { 859 mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1; 860 } else { 861 mask = 0; 862 } 863 masked_msbs = (addr >> (va_bits - 1)) & mask; 864 865 if (masked_msbs != 0 && masked_msbs != mask) { 866 return TRANSLATE_FAIL; 867 } 868 869 int ptshift = (levels - 1) * ptidxbits; 870 int i; 871 872 #if !TCG_OVERSIZED_GUEST 873 restart: 874 #endif 875 for (i = 0; i < levels; i++, ptshift -= ptidxbits) { 876 target_ulong idx; 877 if (i == 0) { 878 idx = (addr >> (PGSHIFT + ptshift)) & 879 ((1 << (ptidxbits + widened)) - 1); 880 } else { 881 idx = (addr >> (PGSHIFT + ptshift)) & 882 ((1 << ptidxbits) - 1); 883 } 884 885 /* check that physical address of PTE is legal */ 886 hwaddr pte_addr; 887 888 if (two_stage && first_stage) { 889 int vbase_prot; 890 hwaddr vbase; 891 892 /* Do the second stage translation on the base PTE address. */ 893 int vbase_ret = get_physical_address(env, &vbase, &vbase_prot, 894 base, NULL, MMU_DATA_LOAD, 895 mmu_idx, false, true, 896 is_debug); 897 898 if (vbase_ret != TRANSLATE_SUCCESS) { 899 if (fault_pte_addr) { 900 *fault_pte_addr = (base + idx * ptesize) >> 2; 901 } 902 return TRANSLATE_G_STAGE_FAIL; 903 } 904 905 pte_addr = vbase + idx * ptesize; 906 } else { 907 pte_addr = base + idx * ptesize; 908 } 909 910 int pmp_prot; 911 int pmp_ret = get_physical_address_pmp(env, &pmp_prot, NULL, pte_addr, 912 sizeof(target_ulong), 913 MMU_DATA_LOAD, PRV_S); 914 if (pmp_ret != TRANSLATE_SUCCESS) { 915 return TRANSLATE_PMP_FAIL; 916 } 917 918 target_ulong pte; 919 if (riscv_cpu_mxl(env) == MXL_RV32) { 920 pte = address_space_ldl(cs->as, pte_addr, attrs, &res); 921 } else { 922 pte = address_space_ldq(cs->as, pte_addr, attrs, &res); 923 } 924 925 if (res != MEMTX_OK) { 926 return TRANSLATE_FAIL; 927 } 928 929 bool pbmte = env->menvcfg & MENVCFG_PBMTE; 930 bool hade = env->menvcfg & MENVCFG_HADE; 931 932 if (first_stage && two_stage && env->virt_enabled) { 933 pbmte = pbmte && (env->henvcfg & HENVCFG_PBMTE); 934 hade = hade && (env->henvcfg & HENVCFG_HADE); 935 } 936 937 if (riscv_cpu_sxl(env) == MXL_RV32) { 938 ppn = pte >> PTE_PPN_SHIFT; 939 } else if (pbmte || riscv_cpu_cfg(env)->ext_svnapot) { 940 ppn = (pte & (target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT; 941 } else { 942 ppn = pte >> PTE_PPN_SHIFT; 943 if ((pte & ~(target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT) { 944 return TRANSLATE_FAIL; 945 } 946 } 947 948 if (!(pte & PTE_V)) { 949 /* Invalid PTE */ 950 return TRANSLATE_FAIL; 951 } else if (!pbmte && (pte & PTE_PBMT)) { 952 return TRANSLATE_FAIL; 953 } else if (!(pte & (PTE_R | PTE_W | PTE_X))) { 954 /* Inner PTE, continue walking */ 955 if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) { 956 return TRANSLATE_FAIL; 957 } 958 base = ppn << PGSHIFT; 959 } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) { 960 /* Reserved leaf PTE flags: PTE_W */ 961 return TRANSLATE_FAIL; 962 } else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) { 963 /* Reserved leaf PTE flags: PTE_W + PTE_X */ 964 return TRANSLATE_FAIL; 965 } else if ((pte & PTE_U) && ((mode != PRV_U) && 966 (!sum || access_type == MMU_INST_FETCH))) { 967 /* User PTE flags when not U mode and mstatus.SUM is not set, 968 or the access type is an instruction fetch */ 969 return TRANSLATE_FAIL; 970 } else if (!(pte & PTE_U) && (mode != PRV_S)) { 971 /* Supervisor PTE flags when not S mode */ 972 return TRANSLATE_FAIL; 973 } else if (ppn & ((1ULL << ptshift) - 1)) { 974 /* Misaligned PPN */ 975 return TRANSLATE_FAIL; 976 } else if (access_type == MMU_DATA_LOAD && !((pte & PTE_R) || 977 ((pte & PTE_X) && mxr))) { 978 /* Read access check failed */ 979 return TRANSLATE_FAIL; 980 } else if (access_type == MMU_DATA_STORE && !(pte & PTE_W)) { 981 /* Write access check failed */ 982 return TRANSLATE_FAIL; 983 } else if (access_type == MMU_INST_FETCH && !(pte & PTE_X)) { 984 /* Fetch access check failed */ 985 return TRANSLATE_FAIL; 986 } else { 987 /* if necessary, set accessed and dirty bits. */ 988 target_ulong updated_pte = pte | PTE_A | 989 (access_type == MMU_DATA_STORE ? PTE_D : 0); 990 991 /* Page table updates need to be atomic with MTTCG enabled */ 992 if (updated_pte != pte) { 993 if (!hade) { 994 return TRANSLATE_FAIL; 995 } 996 997 /* 998 * - if accessed or dirty bits need updating, and the PTE is 999 * in RAM, then we do so atomically with a compare and swap. 1000 * - if the PTE is in IO space or ROM, then it can't be updated 1001 * and we return TRANSLATE_FAIL. 1002 * - if the PTE changed by the time we went to update it, then 1003 * it is no longer valid and we must re-walk the page table. 1004 */ 1005 MemoryRegion *mr; 1006 hwaddr l = sizeof(target_ulong), addr1; 1007 mr = address_space_translate(cs->as, pte_addr, &addr1, &l, 1008 false, MEMTXATTRS_UNSPECIFIED); 1009 if (memory_region_is_ram(mr)) { 1010 target_ulong *pte_pa = 1011 qemu_map_ram_ptr(mr->ram_block, addr1); 1012 #if TCG_OVERSIZED_GUEST 1013 /* 1014 * MTTCG is not enabled on oversized TCG guests so 1015 * page table updates do not need to be atomic 1016 */ 1017 *pte_pa = pte = updated_pte; 1018 #else 1019 target_ulong old_pte = 1020 qatomic_cmpxchg(pte_pa, pte, updated_pte); 1021 if (old_pte != pte) { 1022 goto restart; 1023 } else { 1024 pte = updated_pte; 1025 } 1026 #endif 1027 } else { 1028 /* 1029 * misconfigured PTE in ROM (AD bits are not preset) or 1030 * PTE is in IO space and can't be updated atomically 1031 */ 1032 return TRANSLATE_FAIL; 1033 } 1034 } 1035 1036 /* 1037 * for superpage mappings, make a fake leaf PTE for the TLB's 1038 * benefit. 1039 */ 1040 target_ulong vpn = addr >> PGSHIFT; 1041 1042 if (riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) { 1043 napot_bits = ctzl(ppn) + 1; 1044 if ((i != (levels - 1)) || (napot_bits != 4)) { 1045 return TRANSLATE_FAIL; 1046 } 1047 } 1048 1049 napot_mask = (1 << napot_bits) - 1; 1050 *physical = (((ppn & ~napot_mask) | (vpn & napot_mask) | 1051 (vpn & (((target_ulong)1 << ptshift) - 1)) 1052 ) << PGSHIFT) | (addr & ~TARGET_PAGE_MASK); 1053 1054 /* set permissions on the TLB entry */ 1055 if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) { 1056 *prot |= PAGE_READ; 1057 } 1058 if (pte & PTE_X) { 1059 *prot |= PAGE_EXEC; 1060 } 1061 /* 1062 * add write permission on stores or if the page is already dirty, 1063 * so that we TLB miss on later writes to update the dirty bit 1064 */ 1065 if ((pte & PTE_W) && 1066 (access_type == MMU_DATA_STORE || (pte & PTE_D))) { 1067 *prot |= PAGE_WRITE; 1068 } 1069 return TRANSLATE_SUCCESS; 1070 } 1071 } 1072 return TRANSLATE_FAIL; 1073 } 1074 1075 static void raise_mmu_exception(CPURISCVState *env, target_ulong address, 1076 MMUAccessType access_type, bool pmp_violation, 1077 bool first_stage, bool two_stage, 1078 bool two_stage_indirect) 1079 { 1080 CPUState *cs = env_cpu(env); 1081 int page_fault_exceptions, vm; 1082 uint64_t stap_mode; 1083 1084 if (riscv_cpu_mxl(env) == MXL_RV32) { 1085 stap_mode = SATP32_MODE; 1086 } else { 1087 stap_mode = SATP64_MODE; 1088 } 1089 1090 if (first_stage) { 1091 vm = get_field(env->satp, stap_mode); 1092 } else { 1093 vm = get_field(env->hgatp, stap_mode); 1094 } 1095 1096 page_fault_exceptions = vm != VM_1_10_MBARE && !pmp_violation; 1097 1098 switch (access_type) { 1099 case MMU_INST_FETCH: 1100 if (env->virt_enabled && !first_stage) { 1101 cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT; 1102 } else { 1103 cs->exception_index = page_fault_exceptions ? 1104 RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT; 1105 } 1106 break; 1107 case MMU_DATA_LOAD: 1108 if (two_stage && !first_stage) { 1109 cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT; 1110 } else { 1111 cs->exception_index = page_fault_exceptions ? 1112 RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT; 1113 } 1114 break; 1115 case MMU_DATA_STORE: 1116 if (two_stage && !first_stage) { 1117 cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT; 1118 } else { 1119 cs->exception_index = page_fault_exceptions ? 1120 RISCV_EXCP_STORE_PAGE_FAULT : 1121 RISCV_EXCP_STORE_AMO_ACCESS_FAULT; 1122 } 1123 break; 1124 default: 1125 g_assert_not_reached(); 1126 } 1127 env->badaddr = address; 1128 env->two_stage_lookup = two_stage; 1129 env->two_stage_indirect_lookup = two_stage_indirect; 1130 } 1131 1132 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 1133 { 1134 RISCVCPU *cpu = RISCV_CPU(cs); 1135 CPURISCVState *env = &cpu->env; 1136 hwaddr phys_addr; 1137 int prot; 1138 int mmu_idx = cpu_mmu_index(&cpu->env, false); 1139 1140 if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx, 1141 true, env->virt_enabled, true)) { 1142 return -1; 1143 } 1144 1145 if (env->virt_enabled) { 1146 if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL, 1147 0, mmu_idx, false, true, true)) { 1148 return -1; 1149 } 1150 } 1151 1152 return phys_addr & TARGET_PAGE_MASK; 1153 } 1154 1155 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 1156 vaddr addr, unsigned size, 1157 MMUAccessType access_type, 1158 int mmu_idx, MemTxAttrs attrs, 1159 MemTxResult response, uintptr_t retaddr) 1160 { 1161 RISCVCPU *cpu = RISCV_CPU(cs); 1162 CPURISCVState *env = &cpu->env; 1163 1164 if (access_type == MMU_DATA_STORE) { 1165 cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT; 1166 } else if (access_type == MMU_DATA_LOAD) { 1167 cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT; 1168 } else { 1169 cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT; 1170 } 1171 1172 env->badaddr = addr; 1173 env->two_stage_lookup = env->virt_enabled || mmuidx_2stage(mmu_idx); 1174 env->two_stage_indirect_lookup = false; 1175 cpu_loop_exit_restore(cs, retaddr); 1176 } 1177 1178 void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 1179 MMUAccessType access_type, int mmu_idx, 1180 uintptr_t retaddr) 1181 { 1182 RISCVCPU *cpu = RISCV_CPU(cs); 1183 CPURISCVState *env = &cpu->env; 1184 switch (access_type) { 1185 case MMU_INST_FETCH: 1186 cs->exception_index = RISCV_EXCP_INST_ADDR_MIS; 1187 break; 1188 case MMU_DATA_LOAD: 1189 cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS; 1190 break; 1191 case MMU_DATA_STORE: 1192 cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS; 1193 break; 1194 default: 1195 g_assert_not_reached(); 1196 } 1197 env->badaddr = addr; 1198 env->two_stage_lookup = env->virt_enabled || mmuidx_2stage(mmu_idx); 1199 env->two_stage_indirect_lookup = false; 1200 cpu_loop_exit_restore(cs, retaddr); 1201 } 1202 1203 1204 static void pmu_tlb_fill_incr_ctr(RISCVCPU *cpu, MMUAccessType access_type) 1205 { 1206 enum riscv_pmu_event_idx pmu_event_type; 1207 1208 switch (access_type) { 1209 case MMU_INST_FETCH: 1210 pmu_event_type = RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS; 1211 break; 1212 case MMU_DATA_LOAD: 1213 pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS; 1214 break; 1215 case MMU_DATA_STORE: 1216 pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS; 1217 break; 1218 default: 1219 return; 1220 } 1221 1222 riscv_pmu_incr_ctr(cpu, pmu_event_type); 1223 } 1224 1225 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 1226 MMUAccessType access_type, int mmu_idx, 1227 bool probe, uintptr_t retaddr) 1228 { 1229 RISCVCPU *cpu = RISCV_CPU(cs); 1230 CPURISCVState *env = &cpu->env; 1231 vaddr im_address; 1232 hwaddr pa = 0; 1233 int prot, prot2, prot_pmp; 1234 bool pmp_violation = false; 1235 bool first_stage_error = true; 1236 bool two_stage_lookup = false; 1237 bool two_stage_indirect_error = false; 1238 int ret = TRANSLATE_FAIL; 1239 int mode = mmu_idx; 1240 /* default TLB page size */ 1241 target_ulong tlb_size = TARGET_PAGE_SIZE; 1242 1243 env->guest_phys_fault_addr = 0; 1244 1245 qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n", 1246 __func__, address, access_type, mmu_idx); 1247 1248 /* 1249 * MPRV does not affect the virtual-machine load/store 1250 * instructions, HLV, HLVX, and HSV. 1251 */ 1252 if (mmuidx_2stage(mmu_idx)) { 1253 mode = get_field(env->hstatus, HSTATUS_SPVP); 1254 } else if (mode == PRV_M && access_type != MMU_INST_FETCH && 1255 get_field(env->mstatus, MSTATUS_MPRV)) { 1256 mode = get_field(env->mstatus, MSTATUS_MPP); 1257 if (riscv_has_ext(env, RVH) && get_field(env->mstatus, MSTATUS_MPV)) { 1258 two_stage_lookup = true; 1259 } 1260 } 1261 1262 pmu_tlb_fill_incr_ctr(cpu, access_type); 1263 if (env->virt_enabled || 1264 ((mmuidx_2stage(mmu_idx) || two_stage_lookup) && 1265 access_type != MMU_INST_FETCH)) { 1266 /* Two stage lookup */ 1267 ret = get_physical_address(env, &pa, &prot, address, 1268 &env->guest_phys_fault_addr, access_type, 1269 mmu_idx, true, true, false); 1270 1271 /* 1272 * A G-stage exception may be triggered during two state lookup. 1273 * And the env->guest_phys_fault_addr has already been set in 1274 * get_physical_address(). 1275 */ 1276 if (ret == TRANSLATE_G_STAGE_FAIL) { 1277 first_stage_error = false; 1278 two_stage_indirect_error = true; 1279 access_type = MMU_DATA_LOAD; 1280 } 1281 1282 qemu_log_mask(CPU_LOG_MMU, 1283 "%s 1st-stage address=%" VADDR_PRIx " ret %d physical " 1284 HWADDR_FMT_plx " prot %d\n", 1285 __func__, address, ret, pa, prot); 1286 1287 if (ret == TRANSLATE_SUCCESS) { 1288 /* Second stage lookup */ 1289 im_address = pa; 1290 1291 ret = get_physical_address(env, &pa, &prot2, im_address, NULL, 1292 access_type, mmu_idx, false, true, 1293 false); 1294 1295 qemu_log_mask(CPU_LOG_MMU, 1296 "%s 2nd-stage address=%" VADDR_PRIx 1297 " ret %d physical " 1298 HWADDR_FMT_plx " prot %d\n", 1299 __func__, im_address, ret, pa, prot2); 1300 1301 prot &= prot2; 1302 1303 if (ret == TRANSLATE_SUCCESS) { 1304 ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa, 1305 size, access_type, mode); 1306 1307 qemu_log_mask(CPU_LOG_MMU, 1308 "%s PMP address=" HWADDR_FMT_plx " ret %d prot" 1309 " %d tlb_size " TARGET_FMT_lu "\n", 1310 __func__, pa, ret, prot_pmp, tlb_size); 1311 1312 prot &= prot_pmp; 1313 } 1314 1315 if (ret != TRANSLATE_SUCCESS) { 1316 /* 1317 * Guest physical address translation failed, this is a HS 1318 * level exception 1319 */ 1320 first_stage_error = false; 1321 env->guest_phys_fault_addr = (im_address | 1322 (address & 1323 (TARGET_PAGE_SIZE - 1))) >> 2; 1324 } 1325 } 1326 } else { 1327 /* Single stage lookup */ 1328 ret = get_physical_address(env, &pa, &prot, address, NULL, 1329 access_type, mmu_idx, true, false, false); 1330 1331 qemu_log_mask(CPU_LOG_MMU, 1332 "%s address=%" VADDR_PRIx " ret %d physical " 1333 HWADDR_FMT_plx " prot %d\n", 1334 __func__, address, ret, pa, prot); 1335 1336 if (ret == TRANSLATE_SUCCESS) { 1337 ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa, 1338 size, access_type, mode); 1339 1340 qemu_log_mask(CPU_LOG_MMU, 1341 "%s PMP address=" HWADDR_FMT_plx " ret %d prot" 1342 " %d tlb_size " TARGET_FMT_lu "\n", 1343 __func__, pa, ret, prot_pmp, tlb_size); 1344 1345 prot &= prot_pmp; 1346 } 1347 } 1348 1349 if (ret == TRANSLATE_PMP_FAIL) { 1350 pmp_violation = true; 1351 } 1352 1353 if (ret == TRANSLATE_SUCCESS) { 1354 tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1), 1355 prot, mmu_idx, tlb_size); 1356 return true; 1357 } else if (probe) { 1358 return false; 1359 } else { 1360 raise_mmu_exception(env, address, access_type, pmp_violation, 1361 first_stage_error, 1362 env->virt_enabled || mmuidx_2stage(mmu_idx), 1363 two_stage_indirect_error); 1364 cpu_loop_exit_restore(cs, retaddr); 1365 } 1366 1367 return true; 1368 } 1369 1370 static target_ulong riscv_transformed_insn(CPURISCVState *env, 1371 target_ulong insn, 1372 target_ulong taddr) 1373 { 1374 target_ulong xinsn = 0; 1375 target_ulong access_rs1 = 0, access_imm = 0, access_size = 0; 1376 1377 /* 1378 * Only Quadrant 0 and Quadrant 2 of RVC instruction space need to 1379 * be uncompressed. The Quadrant 1 of RVC instruction space need 1380 * not be transformed because these instructions won't generate 1381 * any load/store trap. 1382 */ 1383 1384 if ((insn & 0x3) != 0x3) { 1385 /* Transform 16bit instruction into 32bit instruction */ 1386 switch (GET_C_OP(insn)) { 1387 case OPC_RISC_C_OP_QUAD0: /* Quadrant 0 */ 1388 switch (GET_C_FUNC(insn)) { 1389 case OPC_RISC_C_FUNC_FLD_LQ: 1390 if (riscv_cpu_xlen(env) != 128) { /* C.FLD (RV32/64) */ 1391 xinsn = OPC_RISC_FLD; 1392 xinsn = SET_RD(xinsn, GET_C_RS2S(insn)); 1393 access_rs1 = GET_C_RS1S(insn); 1394 access_imm = GET_C_LD_IMM(insn); 1395 access_size = 8; 1396 } 1397 break; 1398 case OPC_RISC_C_FUNC_LW: /* C.LW */ 1399 xinsn = OPC_RISC_LW; 1400 xinsn = SET_RD(xinsn, GET_C_RS2S(insn)); 1401 access_rs1 = GET_C_RS1S(insn); 1402 access_imm = GET_C_LW_IMM(insn); 1403 access_size = 4; 1404 break; 1405 case OPC_RISC_C_FUNC_FLW_LD: 1406 if (riscv_cpu_xlen(env) == 32) { /* C.FLW (RV32) */ 1407 xinsn = OPC_RISC_FLW; 1408 xinsn = SET_RD(xinsn, GET_C_RS2S(insn)); 1409 access_rs1 = GET_C_RS1S(insn); 1410 access_imm = GET_C_LW_IMM(insn); 1411 access_size = 4; 1412 } else { /* C.LD (RV64/RV128) */ 1413 xinsn = OPC_RISC_LD; 1414 xinsn = SET_RD(xinsn, GET_C_RS2S(insn)); 1415 access_rs1 = GET_C_RS1S(insn); 1416 access_imm = GET_C_LD_IMM(insn); 1417 access_size = 8; 1418 } 1419 break; 1420 case OPC_RISC_C_FUNC_FSD_SQ: 1421 if (riscv_cpu_xlen(env) != 128) { /* C.FSD (RV32/64) */ 1422 xinsn = OPC_RISC_FSD; 1423 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn)); 1424 access_rs1 = GET_C_RS1S(insn); 1425 access_imm = GET_C_SD_IMM(insn); 1426 access_size = 8; 1427 } 1428 break; 1429 case OPC_RISC_C_FUNC_SW: /* C.SW */ 1430 xinsn = OPC_RISC_SW; 1431 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn)); 1432 access_rs1 = GET_C_RS1S(insn); 1433 access_imm = GET_C_SW_IMM(insn); 1434 access_size = 4; 1435 break; 1436 case OPC_RISC_C_FUNC_FSW_SD: 1437 if (riscv_cpu_xlen(env) == 32) { /* C.FSW (RV32) */ 1438 xinsn = OPC_RISC_FSW; 1439 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn)); 1440 access_rs1 = GET_C_RS1S(insn); 1441 access_imm = GET_C_SW_IMM(insn); 1442 access_size = 4; 1443 } else { /* C.SD (RV64/RV128) */ 1444 xinsn = OPC_RISC_SD; 1445 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn)); 1446 access_rs1 = GET_C_RS1S(insn); 1447 access_imm = GET_C_SD_IMM(insn); 1448 access_size = 8; 1449 } 1450 break; 1451 default: 1452 break; 1453 } 1454 break; 1455 case OPC_RISC_C_OP_QUAD2: /* Quadrant 2 */ 1456 switch (GET_C_FUNC(insn)) { 1457 case OPC_RISC_C_FUNC_FLDSP_LQSP: 1458 if (riscv_cpu_xlen(env) != 128) { /* C.FLDSP (RV32/64) */ 1459 xinsn = OPC_RISC_FLD; 1460 xinsn = SET_RD(xinsn, GET_C_RD(insn)); 1461 access_rs1 = 2; 1462 access_imm = GET_C_LDSP_IMM(insn); 1463 access_size = 8; 1464 } 1465 break; 1466 case OPC_RISC_C_FUNC_LWSP: /* C.LWSP */ 1467 xinsn = OPC_RISC_LW; 1468 xinsn = SET_RD(xinsn, GET_C_RD(insn)); 1469 access_rs1 = 2; 1470 access_imm = GET_C_LWSP_IMM(insn); 1471 access_size = 4; 1472 break; 1473 case OPC_RISC_C_FUNC_FLWSP_LDSP: 1474 if (riscv_cpu_xlen(env) == 32) { /* C.FLWSP (RV32) */ 1475 xinsn = OPC_RISC_FLW; 1476 xinsn = SET_RD(xinsn, GET_C_RD(insn)); 1477 access_rs1 = 2; 1478 access_imm = GET_C_LWSP_IMM(insn); 1479 access_size = 4; 1480 } else { /* C.LDSP (RV64/RV128) */ 1481 xinsn = OPC_RISC_LD; 1482 xinsn = SET_RD(xinsn, GET_C_RD(insn)); 1483 access_rs1 = 2; 1484 access_imm = GET_C_LDSP_IMM(insn); 1485 access_size = 8; 1486 } 1487 break; 1488 case OPC_RISC_C_FUNC_FSDSP_SQSP: 1489 if (riscv_cpu_xlen(env) != 128) { /* C.FSDSP (RV32/64) */ 1490 xinsn = OPC_RISC_FSD; 1491 xinsn = SET_RS2(xinsn, GET_C_RS2(insn)); 1492 access_rs1 = 2; 1493 access_imm = GET_C_SDSP_IMM(insn); 1494 access_size = 8; 1495 } 1496 break; 1497 case OPC_RISC_C_FUNC_SWSP: /* C.SWSP */ 1498 xinsn = OPC_RISC_SW; 1499 xinsn = SET_RS2(xinsn, GET_C_RS2(insn)); 1500 access_rs1 = 2; 1501 access_imm = GET_C_SWSP_IMM(insn); 1502 access_size = 4; 1503 break; 1504 case 7: 1505 if (riscv_cpu_xlen(env) == 32) { /* C.FSWSP (RV32) */ 1506 xinsn = OPC_RISC_FSW; 1507 xinsn = SET_RS2(xinsn, GET_C_RS2(insn)); 1508 access_rs1 = 2; 1509 access_imm = GET_C_SWSP_IMM(insn); 1510 access_size = 4; 1511 } else { /* C.SDSP (RV64/RV128) */ 1512 xinsn = OPC_RISC_SD; 1513 xinsn = SET_RS2(xinsn, GET_C_RS2(insn)); 1514 access_rs1 = 2; 1515 access_imm = GET_C_SDSP_IMM(insn); 1516 access_size = 8; 1517 } 1518 break; 1519 default: 1520 break; 1521 } 1522 break; 1523 default: 1524 break; 1525 } 1526 1527 /* 1528 * Clear Bit1 of transformed instruction to indicate that 1529 * original insruction was a 16bit instruction 1530 */ 1531 xinsn &= ~((target_ulong)0x2); 1532 } else { 1533 /* Transform 32bit (or wider) instructions */ 1534 switch (MASK_OP_MAJOR(insn)) { 1535 case OPC_RISC_ATOMIC: 1536 xinsn = insn; 1537 access_rs1 = GET_RS1(insn); 1538 access_size = 1 << GET_FUNCT3(insn); 1539 break; 1540 case OPC_RISC_LOAD: 1541 case OPC_RISC_FP_LOAD: 1542 xinsn = SET_I_IMM(insn, 0); 1543 access_rs1 = GET_RS1(insn); 1544 access_imm = GET_IMM(insn); 1545 access_size = 1 << GET_FUNCT3(insn); 1546 break; 1547 case OPC_RISC_STORE: 1548 case OPC_RISC_FP_STORE: 1549 xinsn = SET_S_IMM(insn, 0); 1550 access_rs1 = GET_RS1(insn); 1551 access_imm = GET_STORE_IMM(insn); 1552 access_size = 1 << GET_FUNCT3(insn); 1553 break; 1554 case OPC_RISC_SYSTEM: 1555 if (MASK_OP_SYSTEM(insn) == OPC_RISC_HLVHSV) { 1556 xinsn = insn; 1557 access_rs1 = GET_RS1(insn); 1558 access_size = 1 << ((GET_FUNCT7(insn) >> 1) & 0x3); 1559 access_size = 1 << access_size; 1560 } 1561 break; 1562 default: 1563 break; 1564 } 1565 } 1566 1567 if (access_size) { 1568 xinsn = SET_RS1(xinsn, (taddr - (env->gpr[access_rs1] + access_imm)) & 1569 (access_size - 1)); 1570 } 1571 1572 return xinsn; 1573 } 1574 #endif /* !CONFIG_USER_ONLY */ 1575 1576 /* 1577 * Handle Traps 1578 * 1579 * Adapted from Spike's processor_t::take_trap. 1580 * 1581 */ 1582 void riscv_cpu_do_interrupt(CPUState *cs) 1583 { 1584 #if !defined(CONFIG_USER_ONLY) 1585 1586 RISCVCPU *cpu = RISCV_CPU(cs); 1587 CPURISCVState *env = &cpu->env; 1588 bool write_gva = false; 1589 uint64_t s; 1590 1591 /* 1592 * cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide 1593 * so we mask off the MSB and separate into trap type and cause. 1594 */ 1595 bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG); 1596 target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK; 1597 uint64_t deleg = async ? env->mideleg : env->medeleg; 1598 target_ulong tval = 0; 1599 target_ulong tinst = 0; 1600 target_ulong htval = 0; 1601 target_ulong mtval2 = 0; 1602 1603 if (cause == RISCV_EXCP_SEMIHOST) { 1604 do_common_semihosting(cs); 1605 env->pc += 4; 1606 return; 1607 } 1608 1609 if (!async) { 1610 /* set tval to badaddr for traps with address information */ 1611 switch (cause) { 1612 case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT: 1613 case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT: 1614 case RISCV_EXCP_LOAD_ADDR_MIS: 1615 case RISCV_EXCP_STORE_AMO_ADDR_MIS: 1616 case RISCV_EXCP_LOAD_ACCESS_FAULT: 1617 case RISCV_EXCP_STORE_AMO_ACCESS_FAULT: 1618 case RISCV_EXCP_LOAD_PAGE_FAULT: 1619 case RISCV_EXCP_STORE_PAGE_FAULT: 1620 write_gva = env->two_stage_lookup; 1621 tval = env->badaddr; 1622 if (env->two_stage_indirect_lookup) { 1623 /* 1624 * special pseudoinstruction for G-stage fault taken while 1625 * doing VS-stage page table walk. 1626 */ 1627 tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000; 1628 } else { 1629 /* 1630 * The "Addr. Offset" field in transformed instruction is 1631 * non-zero only for misaligned access. 1632 */ 1633 tinst = riscv_transformed_insn(env, env->bins, tval); 1634 } 1635 break; 1636 case RISCV_EXCP_INST_GUEST_PAGE_FAULT: 1637 case RISCV_EXCP_INST_ADDR_MIS: 1638 case RISCV_EXCP_INST_ACCESS_FAULT: 1639 case RISCV_EXCP_INST_PAGE_FAULT: 1640 write_gva = env->two_stage_lookup; 1641 tval = env->badaddr; 1642 if (env->two_stage_indirect_lookup) { 1643 /* 1644 * special pseudoinstruction for G-stage fault taken while 1645 * doing VS-stage page table walk. 1646 */ 1647 tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000; 1648 } 1649 break; 1650 case RISCV_EXCP_ILLEGAL_INST: 1651 case RISCV_EXCP_VIRT_INSTRUCTION_FAULT: 1652 tval = env->bins; 1653 break; 1654 case RISCV_EXCP_BREAKPOINT: 1655 if (cs->watchpoint_hit) { 1656 tval = cs->watchpoint_hit->hitaddr; 1657 cs->watchpoint_hit = NULL; 1658 } 1659 break; 1660 default: 1661 break; 1662 } 1663 /* ecall is dispatched as one cause so translate based on mode */ 1664 if (cause == RISCV_EXCP_U_ECALL) { 1665 assert(env->priv <= 3); 1666 1667 if (env->priv == PRV_M) { 1668 cause = RISCV_EXCP_M_ECALL; 1669 } else if (env->priv == PRV_S && env->virt_enabled) { 1670 cause = RISCV_EXCP_VS_ECALL; 1671 } else if (env->priv == PRV_S && !env->virt_enabled) { 1672 cause = RISCV_EXCP_S_ECALL; 1673 } else if (env->priv == PRV_U) { 1674 cause = RISCV_EXCP_U_ECALL; 1675 } 1676 } 1677 } 1678 1679 trace_riscv_trap(env->mhartid, async, cause, env->pc, tval, 1680 riscv_cpu_get_trap_name(cause, async)); 1681 1682 qemu_log_mask(CPU_LOG_INT, 1683 "%s: hart:"TARGET_FMT_ld", async:%d, cause:"TARGET_FMT_lx", " 1684 "epc:0x"TARGET_FMT_lx", tval:0x"TARGET_FMT_lx", desc=%s\n", 1685 __func__, env->mhartid, async, cause, env->pc, tval, 1686 riscv_cpu_get_trap_name(cause, async)); 1687 1688 if (env->priv <= PRV_S && 1689 cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) { 1690 /* handle the trap in S-mode */ 1691 if (riscv_has_ext(env, RVH)) { 1692 uint64_t hdeleg = async ? env->hideleg : env->hedeleg; 1693 1694 if (env->virt_enabled && ((hdeleg >> cause) & 1)) { 1695 /* Trap to VS mode */ 1696 /* 1697 * See if we need to adjust cause. Yes if its VS mode interrupt 1698 * no if hypervisor has delegated one of hs mode's interrupt 1699 */ 1700 if (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT || 1701 cause == IRQ_VS_EXT) { 1702 cause = cause - 1; 1703 } 1704 write_gva = false; 1705 } else if (env->virt_enabled) { 1706 /* Trap into HS mode, from virt */ 1707 riscv_cpu_swap_hypervisor_regs(env); 1708 env->hstatus = set_field(env->hstatus, HSTATUS_SPVP, 1709 env->priv); 1710 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, true); 1711 1712 htval = env->guest_phys_fault_addr; 1713 1714 riscv_cpu_set_virt_enabled(env, 0); 1715 } else { 1716 /* Trap into HS mode */ 1717 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false); 1718 htval = env->guest_phys_fault_addr; 1719 } 1720 env->hstatus = set_field(env->hstatus, HSTATUS_GVA, write_gva); 1721 } 1722 1723 s = env->mstatus; 1724 s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE)); 1725 s = set_field(s, MSTATUS_SPP, env->priv); 1726 s = set_field(s, MSTATUS_SIE, 0); 1727 env->mstatus = s; 1728 env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1)); 1729 env->sepc = env->pc; 1730 env->stval = tval; 1731 env->htval = htval; 1732 env->htinst = tinst; 1733 env->pc = (env->stvec >> 2 << 2) + 1734 ((async && (env->stvec & 3) == 1) ? cause * 4 : 0); 1735 riscv_cpu_set_mode(env, PRV_S); 1736 } else { 1737 /* handle the trap in M-mode */ 1738 if (riscv_has_ext(env, RVH)) { 1739 if (env->virt_enabled) { 1740 riscv_cpu_swap_hypervisor_regs(env); 1741 } 1742 env->mstatus = set_field(env->mstatus, MSTATUS_MPV, 1743 env->virt_enabled); 1744 if (env->virt_enabled && tval) { 1745 env->mstatus = set_field(env->mstatus, MSTATUS_GVA, 1); 1746 } 1747 1748 mtval2 = env->guest_phys_fault_addr; 1749 1750 /* Trapping to M mode, virt is disabled */ 1751 riscv_cpu_set_virt_enabled(env, 0); 1752 } 1753 1754 s = env->mstatus; 1755 s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE)); 1756 s = set_field(s, MSTATUS_MPP, env->priv); 1757 s = set_field(s, MSTATUS_MIE, 0); 1758 env->mstatus = s; 1759 env->mcause = cause | ~(((target_ulong)-1) >> async); 1760 env->mepc = env->pc; 1761 env->mtval = tval; 1762 env->mtval2 = mtval2; 1763 env->mtinst = tinst; 1764 env->pc = (env->mtvec >> 2 << 2) + 1765 ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0); 1766 riscv_cpu_set_mode(env, PRV_M); 1767 } 1768 1769 /* 1770 * NOTE: it is not necessary to yield load reservations here. It is only 1771 * necessary for an SC from "another hart" to cause a load reservation 1772 * to be yielded. Refer to the memory consistency model section of the 1773 * RISC-V ISA Specification. 1774 */ 1775 1776 env->two_stage_lookup = false; 1777 env->two_stage_indirect_lookup = false; 1778 #endif 1779 cs->exception_index = RISCV_EXCP_NONE; /* mark handled to qemu */ 1780 } 1781