1 /* 2 * RISC-V CPU helpers for qemu. 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/log.h" 22 #include "qemu/main-loop.h" 23 #include "cpu.h" 24 #include "pmu.h" 25 #include "exec/exec-all.h" 26 #include "instmap.h" 27 #include "tcg/tcg-op.h" 28 #include "trace.h" 29 #include "semihosting/common-semi.h" 30 #include "sysemu/cpu-timers.h" 31 #include "cpu_bits.h" 32 #include "debug.h" 33 34 int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch) 35 { 36 #ifdef CONFIG_USER_ONLY 37 return 0; 38 #else 39 return env->priv; 40 #endif 41 } 42 43 void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc, 44 target_ulong *cs_base, uint32_t *pflags) 45 { 46 CPUState *cs = env_cpu(env); 47 RISCVCPU *cpu = RISCV_CPU(cs); 48 49 uint32_t flags = 0; 50 51 *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc; 52 *cs_base = 0; 53 54 if (riscv_has_ext(env, RVV) || cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) { 55 /* 56 * If env->vl equals to VLMAX, we can use generic vector operation 57 * expanders (GVEC) to accerlate the vector operations. 58 * However, as LMUL could be a fractional number. The maximum 59 * vector size can be operated might be less than 8 bytes, 60 * which is not supported by GVEC. So we set vl_eq_vlmax flag to true 61 * only when maxsz >= 8 bytes. 62 */ 63 uint32_t vlmax = vext_get_vlmax(env_archcpu(env), env->vtype); 64 uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW); 65 uint32_t maxsz = vlmax << sew; 66 bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) && 67 (maxsz >= 8); 68 flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill); 69 flags = FIELD_DP32(flags, TB_FLAGS, SEW, sew); 70 flags = FIELD_DP32(flags, TB_FLAGS, LMUL, 71 FIELD_EX64(env->vtype, VTYPE, VLMUL)); 72 flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax); 73 flags = FIELD_DP32(flags, TB_FLAGS, VTA, 74 FIELD_EX64(env->vtype, VTYPE, VTA)); 75 flags = FIELD_DP32(flags, TB_FLAGS, VMA, 76 FIELD_EX64(env->vtype, VTYPE, VMA)); 77 } else { 78 flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1); 79 } 80 81 #ifdef CONFIG_USER_ONLY 82 flags |= TB_FLAGS_MSTATUS_FS; 83 flags |= TB_FLAGS_MSTATUS_VS; 84 #else 85 flags |= cpu_mmu_index(env, 0); 86 if (riscv_cpu_fp_enabled(env)) { 87 flags |= env->mstatus & MSTATUS_FS; 88 } 89 90 if (riscv_cpu_vector_enabled(env)) { 91 flags |= env->mstatus & MSTATUS_VS; 92 } 93 94 if (riscv_has_ext(env, RVH)) { 95 if (env->priv == PRV_M || 96 (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) || 97 (env->priv == PRV_U && !riscv_cpu_virt_enabled(env) && 98 get_field(env->hstatus, HSTATUS_HU))) { 99 flags = FIELD_DP32(flags, TB_FLAGS, HLSX, 1); 100 } 101 102 flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_FS, 103 get_field(env->mstatus_hs, MSTATUS_FS)); 104 105 flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_VS, 106 get_field(env->mstatus_hs, MSTATUS_VS)); 107 } 108 if (riscv_feature(env, RISCV_FEATURE_DEBUG) && !icount_enabled()) { 109 flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled); 110 } 111 #endif 112 113 flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl); 114 if (env->cur_pmmask < (env->xl == MXL_RV32 ? UINT32_MAX : UINT64_MAX)) { 115 flags = FIELD_DP32(flags, TB_FLAGS, PM_MASK_ENABLED, 1); 116 } 117 if (env->cur_pmbase != 0) { 118 flags = FIELD_DP32(flags, TB_FLAGS, PM_BASE_ENABLED, 1); 119 } 120 121 *pflags = flags; 122 } 123 124 void riscv_cpu_update_mask(CPURISCVState *env) 125 { 126 target_ulong mask = -1, base = 0; 127 /* 128 * TODO: Current RVJ spec does not specify 129 * how the extension interacts with XLEN. 130 */ 131 #ifndef CONFIG_USER_ONLY 132 if (riscv_has_ext(env, RVJ)) { 133 switch (env->priv) { 134 case PRV_M: 135 if (env->mmte & M_PM_ENABLE) { 136 mask = env->mpmmask; 137 base = env->mpmbase; 138 } 139 break; 140 case PRV_S: 141 if (env->mmte & S_PM_ENABLE) { 142 mask = env->spmmask; 143 base = env->spmbase; 144 } 145 break; 146 case PRV_U: 147 if (env->mmte & U_PM_ENABLE) { 148 mask = env->upmmask; 149 base = env->upmbase; 150 } 151 break; 152 default: 153 g_assert_not_reached(); 154 } 155 } 156 #endif 157 if (env->xl == MXL_RV32) { 158 env->cur_pmmask = mask & UINT32_MAX; 159 env->cur_pmbase = base & UINT32_MAX; 160 } else { 161 env->cur_pmmask = mask; 162 env->cur_pmbase = base; 163 } 164 } 165 166 #ifndef CONFIG_USER_ONLY 167 168 /* 169 * The HS-mode is allowed to configure priority only for the 170 * following VS-mode local interrupts: 171 * 172 * 0 (Reserved interrupt, reads as zero) 173 * 1 Supervisor software interrupt 174 * 4 (Reserved interrupt, reads as zero) 175 * 5 Supervisor timer interrupt 176 * 8 (Reserved interrupt, reads as zero) 177 * 13 (Reserved interrupt) 178 * 14 " 179 * 15 " 180 * 16 " 181 * 17 " 182 * 18 " 183 * 19 " 184 * 20 " 185 * 21 " 186 * 22 " 187 * 23 " 188 */ 189 190 static const int hviprio_index2irq[] = { 191 0, 1, 4, 5, 8, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 }; 192 static const int hviprio_index2rdzero[] = { 193 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 194 195 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero) 196 { 197 if (index < 0 || ARRAY_SIZE(hviprio_index2irq) <= index) { 198 return -EINVAL; 199 } 200 201 if (out_irq) { 202 *out_irq = hviprio_index2irq[index]; 203 } 204 205 if (out_rdzero) { 206 *out_rdzero = hviprio_index2rdzero[index]; 207 } 208 209 return 0; 210 } 211 212 /* 213 * Default priorities of local interrupts are defined in the 214 * RISC-V Advanced Interrupt Architecture specification. 215 * 216 * ---------------------------------------------------------------- 217 * Default | 218 * Priority | Major Interrupt Numbers 219 * ---------------------------------------------------------------- 220 * Highest | 47, 23, 46, 45, 22, 44, 221 * | 43, 21, 42, 41, 20, 40 222 * | 223 * | 11 (0b), 3 (03), 7 (07) 224 * | 9 (09), 1 (01), 5 (05) 225 * | 12 (0c) 226 * | 10 (0a), 2 (02), 6 (06) 227 * | 228 * | 39, 19, 38, 37, 18, 36, 229 * Lowest | 35, 17, 34, 33, 16, 32 230 * ---------------------------------------------------------------- 231 */ 232 static const uint8_t default_iprio[64] = { 233 /* Custom interrupts 48 to 63 */ 234 [63] = IPRIO_MMAXIPRIO, 235 [62] = IPRIO_MMAXIPRIO, 236 [61] = IPRIO_MMAXIPRIO, 237 [60] = IPRIO_MMAXIPRIO, 238 [59] = IPRIO_MMAXIPRIO, 239 [58] = IPRIO_MMAXIPRIO, 240 [57] = IPRIO_MMAXIPRIO, 241 [56] = IPRIO_MMAXIPRIO, 242 [55] = IPRIO_MMAXIPRIO, 243 [54] = IPRIO_MMAXIPRIO, 244 [53] = IPRIO_MMAXIPRIO, 245 [52] = IPRIO_MMAXIPRIO, 246 [51] = IPRIO_MMAXIPRIO, 247 [50] = IPRIO_MMAXIPRIO, 248 [49] = IPRIO_MMAXIPRIO, 249 [48] = IPRIO_MMAXIPRIO, 250 251 /* Custom interrupts 24 to 31 */ 252 [31] = IPRIO_MMAXIPRIO, 253 [30] = IPRIO_MMAXIPRIO, 254 [29] = IPRIO_MMAXIPRIO, 255 [28] = IPRIO_MMAXIPRIO, 256 [27] = IPRIO_MMAXIPRIO, 257 [26] = IPRIO_MMAXIPRIO, 258 [25] = IPRIO_MMAXIPRIO, 259 [24] = IPRIO_MMAXIPRIO, 260 261 [47] = IPRIO_DEFAULT_UPPER, 262 [23] = IPRIO_DEFAULT_UPPER + 1, 263 [46] = IPRIO_DEFAULT_UPPER + 2, 264 [45] = IPRIO_DEFAULT_UPPER + 3, 265 [22] = IPRIO_DEFAULT_UPPER + 4, 266 [44] = IPRIO_DEFAULT_UPPER + 5, 267 268 [43] = IPRIO_DEFAULT_UPPER + 6, 269 [21] = IPRIO_DEFAULT_UPPER + 7, 270 [42] = IPRIO_DEFAULT_UPPER + 8, 271 [41] = IPRIO_DEFAULT_UPPER + 9, 272 [20] = IPRIO_DEFAULT_UPPER + 10, 273 [40] = IPRIO_DEFAULT_UPPER + 11, 274 275 [11] = IPRIO_DEFAULT_M, 276 [3] = IPRIO_DEFAULT_M + 1, 277 [7] = IPRIO_DEFAULT_M + 2, 278 279 [9] = IPRIO_DEFAULT_S, 280 [1] = IPRIO_DEFAULT_S + 1, 281 [5] = IPRIO_DEFAULT_S + 2, 282 283 [12] = IPRIO_DEFAULT_SGEXT, 284 285 [10] = IPRIO_DEFAULT_VS, 286 [2] = IPRIO_DEFAULT_VS + 1, 287 [6] = IPRIO_DEFAULT_VS + 2, 288 289 [39] = IPRIO_DEFAULT_LOWER, 290 [19] = IPRIO_DEFAULT_LOWER + 1, 291 [38] = IPRIO_DEFAULT_LOWER + 2, 292 [37] = IPRIO_DEFAULT_LOWER + 3, 293 [18] = IPRIO_DEFAULT_LOWER + 4, 294 [36] = IPRIO_DEFAULT_LOWER + 5, 295 296 [35] = IPRIO_DEFAULT_LOWER + 6, 297 [17] = IPRIO_DEFAULT_LOWER + 7, 298 [34] = IPRIO_DEFAULT_LOWER + 8, 299 [33] = IPRIO_DEFAULT_LOWER + 9, 300 [16] = IPRIO_DEFAULT_LOWER + 10, 301 [32] = IPRIO_DEFAULT_LOWER + 11, 302 }; 303 304 uint8_t riscv_cpu_default_priority(int irq) 305 { 306 if (irq < 0 || irq > 63) { 307 return IPRIO_MMAXIPRIO; 308 } 309 310 return default_iprio[irq] ? default_iprio[irq] : IPRIO_MMAXIPRIO; 311 }; 312 313 static int riscv_cpu_pending_to_irq(CPURISCVState *env, 314 int extirq, unsigned int extirq_def_prio, 315 uint64_t pending, uint8_t *iprio) 316 { 317 RISCVCPU *cpu = env_archcpu(env); 318 int irq, best_irq = RISCV_EXCP_NONE; 319 unsigned int prio, best_prio = UINT_MAX; 320 321 if (!pending) { 322 return RISCV_EXCP_NONE; 323 } 324 325 irq = ctz64(pending); 326 if (!((extirq == IRQ_M_EXT) ? cpu->cfg.ext_smaia : cpu->cfg.ext_ssaia)) { 327 return irq; 328 } 329 330 pending = pending >> irq; 331 while (pending) { 332 prio = iprio[irq]; 333 if (!prio) { 334 if (irq == extirq) { 335 prio = extirq_def_prio; 336 } else { 337 prio = (riscv_cpu_default_priority(irq) < extirq_def_prio) ? 338 1 : IPRIO_MMAXIPRIO; 339 } 340 } 341 if ((pending & 0x1) && (prio <= best_prio)) { 342 best_irq = irq; 343 best_prio = prio; 344 } 345 irq++; 346 pending = pending >> 1; 347 } 348 349 return best_irq; 350 } 351 352 uint64_t riscv_cpu_all_pending(CPURISCVState *env) 353 { 354 uint32_t gein = get_field(env->hstatus, HSTATUS_VGEIN); 355 uint64_t vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0; 356 uint64_t vstip = (env->vstime_irq) ? MIP_VSTIP : 0; 357 358 return (env->mip | vsgein | vstip) & env->mie; 359 } 360 361 int riscv_cpu_mirq_pending(CPURISCVState *env) 362 { 363 uint64_t irqs = riscv_cpu_all_pending(env) & ~env->mideleg & 364 ~(MIP_SGEIP | MIP_VSSIP | MIP_VSTIP | MIP_VSEIP); 365 366 return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M, 367 irqs, env->miprio); 368 } 369 370 int riscv_cpu_sirq_pending(CPURISCVState *env) 371 { 372 uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg & 373 ~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP); 374 375 return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S, 376 irqs, env->siprio); 377 } 378 379 int riscv_cpu_vsirq_pending(CPURISCVState *env) 380 { 381 uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg & 382 (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP); 383 384 return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S, 385 irqs >> 1, env->hviprio); 386 } 387 388 static int riscv_cpu_local_irq_pending(CPURISCVState *env) 389 { 390 int virq; 391 uint64_t irqs, pending, mie, hsie, vsie; 392 393 /* Determine interrupt enable state of all privilege modes */ 394 if (riscv_cpu_virt_enabled(env)) { 395 mie = 1; 396 hsie = 1; 397 vsie = (env->priv < PRV_S) || 398 (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE)); 399 } else { 400 mie = (env->priv < PRV_M) || 401 (env->priv == PRV_M && get_field(env->mstatus, MSTATUS_MIE)); 402 hsie = (env->priv < PRV_S) || 403 (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE)); 404 vsie = 0; 405 } 406 407 /* Determine all pending interrupts */ 408 pending = riscv_cpu_all_pending(env); 409 410 /* Check M-mode interrupts */ 411 irqs = pending & ~env->mideleg & -mie; 412 if (irqs) { 413 return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M, 414 irqs, env->miprio); 415 } 416 417 /* Check HS-mode interrupts */ 418 irqs = pending & env->mideleg & ~env->hideleg & -hsie; 419 if (irqs) { 420 return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S, 421 irqs, env->siprio); 422 } 423 424 /* Check VS-mode interrupts */ 425 irqs = pending & env->mideleg & env->hideleg & -vsie; 426 if (irqs) { 427 virq = riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S, 428 irqs >> 1, env->hviprio); 429 return (virq <= 0) ? virq : virq + 1; 430 } 431 432 /* Indicate no pending interrupt */ 433 return RISCV_EXCP_NONE; 434 } 435 436 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 437 { 438 if (interrupt_request & CPU_INTERRUPT_HARD) { 439 RISCVCPU *cpu = RISCV_CPU(cs); 440 CPURISCVState *env = &cpu->env; 441 int interruptno = riscv_cpu_local_irq_pending(env); 442 if (interruptno >= 0) { 443 cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno; 444 riscv_cpu_do_interrupt(cs); 445 return true; 446 } 447 } 448 return false; 449 } 450 451 /* Return true is floating point support is currently enabled */ 452 bool riscv_cpu_fp_enabled(CPURISCVState *env) 453 { 454 if (env->mstatus & MSTATUS_FS) { 455 if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_FS)) { 456 return false; 457 } 458 return true; 459 } 460 461 return false; 462 } 463 464 /* Return true is vector support is currently enabled */ 465 bool riscv_cpu_vector_enabled(CPURISCVState *env) 466 { 467 if (env->mstatus & MSTATUS_VS) { 468 if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_VS)) { 469 return false; 470 } 471 return true; 472 } 473 474 return false; 475 } 476 477 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env) 478 { 479 uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM | 480 MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE | 481 MSTATUS64_UXL | MSTATUS_VS; 482 483 if (riscv_has_ext(env, RVF)) { 484 mstatus_mask |= MSTATUS_FS; 485 } 486 bool current_virt = riscv_cpu_virt_enabled(env); 487 488 g_assert(riscv_has_ext(env, RVH)); 489 490 if (current_virt) { 491 /* Current V=1 and we are about to change to V=0 */ 492 env->vsstatus = env->mstatus & mstatus_mask; 493 env->mstatus &= ~mstatus_mask; 494 env->mstatus |= env->mstatus_hs; 495 496 env->vstvec = env->stvec; 497 env->stvec = env->stvec_hs; 498 499 env->vsscratch = env->sscratch; 500 env->sscratch = env->sscratch_hs; 501 502 env->vsepc = env->sepc; 503 env->sepc = env->sepc_hs; 504 505 env->vscause = env->scause; 506 env->scause = env->scause_hs; 507 508 env->vstval = env->stval; 509 env->stval = env->stval_hs; 510 511 env->vsatp = env->satp; 512 env->satp = env->satp_hs; 513 } else { 514 /* Current V=0 and we are about to change to V=1 */ 515 env->mstatus_hs = env->mstatus & mstatus_mask; 516 env->mstatus &= ~mstatus_mask; 517 env->mstatus |= env->vsstatus; 518 519 env->stvec_hs = env->stvec; 520 env->stvec = env->vstvec; 521 522 env->sscratch_hs = env->sscratch; 523 env->sscratch = env->vsscratch; 524 525 env->sepc_hs = env->sepc; 526 env->sepc = env->vsepc; 527 528 env->scause_hs = env->scause; 529 env->scause = env->vscause; 530 531 env->stval_hs = env->stval; 532 env->stval = env->vstval; 533 534 env->satp_hs = env->satp; 535 env->satp = env->vsatp; 536 } 537 } 538 539 target_ulong riscv_cpu_get_geilen(CPURISCVState *env) 540 { 541 if (!riscv_has_ext(env, RVH)) { 542 return 0; 543 } 544 545 return env->geilen; 546 } 547 548 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen) 549 { 550 if (!riscv_has_ext(env, RVH)) { 551 return; 552 } 553 554 if (geilen > (TARGET_LONG_BITS - 1)) { 555 return; 556 } 557 558 env->geilen = geilen; 559 } 560 561 bool riscv_cpu_virt_enabled(CPURISCVState *env) 562 { 563 if (!riscv_has_ext(env, RVH)) { 564 return false; 565 } 566 567 return get_field(env->virt, VIRT_ONOFF); 568 } 569 570 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable) 571 { 572 if (!riscv_has_ext(env, RVH)) { 573 return; 574 } 575 576 /* Flush the TLB on all virt mode changes. */ 577 if (get_field(env->virt, VIRT_ONOFF) != enable) { 578 tlb_flush(env_cpu(env)); 579 } 580 581 env->virt = set_field(env->virt, VIRT_ONOFF, enable); 582 583 if (enable) { 584 /* 585 * The guest external interrupts from an interrupt controller are 586 * delivered only when the Guest/VM is running (i.e. V=1). This means 587 * any guest external interrupt which is triggered while the Guest/VM 588 * is not running (i.e. V=0) will be missed on QEMU resulting in guest 589 * with sluggish response to serial console input and other I/O events. 590 * 591 * To solve this, we check and inject interrupt after setting V=1. 592 */ 593 riscv_cpu_update_mip(env_archcpu(env), 0, 0); 594 } 595 } 596 597 bool riscv_cpu_two_stage_lookup(int mmu_idx) 598 { 599 return mmu_idx & TB_FLAGS_PRIV_HYP_ACCESS_MASK; 600 } 601 602 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts) 603 { 604 CPURISCVState *env = &cpu->env; 605 if (env->miclaim & interrupts) { 606 return -1; 607 } else { 608 env->miclaim |= interrupts; 609 return 0; 610 } 611 } 612 613 uint64_t riscv_cpu_update_mip(RISCVCPU *cpu, uint64_t mask, uint64_t value) 614 { 615 CPURISCVState *env = &cpu->env; 616 CPUState *cs = CPU(cpu); 617 uint64_t gein, vsgein = 0, vstip = 0, old = env->mip; 618 619 if (riscv_cpu_virt_enabled(env)) { 620 gein = get_field(env->hstatus, HSTATUS_VGEIN); 621 vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0; 622 } 623 624 /* No need to update mip for VSTIP */ 625 mask = ((mask == MIP_VSTIP) && env->vstime_irq) ? 0 : mask; 626 vstip = env->vstime_irq ? MIP_VSTIP : 0; 627 628 QEMU_IOTHREAD_LOCK_GUARD(); 629 630 env->mip = (env->mip & ~mask) | (value & mask); 631 632 if (env->mip | vsgein | vstip) { 633 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 634 } else { 635 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); 636 } 637 638 return old; 639 } 640 641 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *), 642 void *arg) 643 { 644 env->rdtime_fn = fn; 645 env->rdtime_fn_arg = arg; 646 } 647 648 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv, 649 int (*rmw_fn)(void *arg, 650 target_ulong reg, 651 target_ulong *val, 652 target_ulong new_val, 653 target_ulong write_mask), 654 void *rmw_fn_arg) 655 { 656 if (priv <= PRV_M) { 657 env->aia_ireg_rmw_fn[priv] = rmw_fn; 658 env->aia_ireg_rmw_fn_arg[priv] = rmw_fn_arg; 659 } 660 } 661 662 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv) 663 { 664 if (newpriv > PRV_M) { 665 g_assert_not_reached(); 666 } 667 if (newpriv == PRV_H) { 668 newpriv = PRV_U; 669 } 670 if (icount_enabled() && newpriv != env->priv) { 671 riscv_itrigger_update_priv(env); 672 } 673 /* tlb_flush is unnecessary as mode is contained in mmu_idx */ 674 env->priv = newpriv; 675 env->xl = cpu_recompute_xl(env); 676 riscv_cpu_update_mask(env); 677 678 /* 679 * Clear the load reservation - otherwise a reservation placed in one 680 * context/process can be used by another, resulting in an SC succeeding 681 * incorrectly. Version 2.2 of the ISA specification explicitly requires 682 * this behaviour, while later revisions say that the kernel "should" use 683 * an SC instruction to force the yielding of a load reservation on a 684 * preemptive context switch. As a result, do both. 685 */ 686 env->load_res = -1; 687 } 688 689 /* 690 * get_physical_address_pmp - check PMP permission for this physical address 691 * 692 * Match the PMP region and check permission for this physical address and it's 693 * TLB page. Returns 0 if the permission checking was successful 694 * 695 * @env: CPURISCVState 696 * @prot: The returned protection attributes 697 * @tlb_size: TLB page size containing addr. It could be modified after PMP 698 * permission checking. NULL if not set TLB page for addr. 699 * @addr: The physical address to be checked permission 700 * @access_type: The type of MMU access 701 * @mode: Indicates current privilege level. 702 */ 703 static int get_physical_address_pmp(CPURISCVState *env, int *prot, 704 target_ulong *tlb_size, hwaddr addr, 705 int size, MMUAccessType access_type, 706 int mode) 707 { 708 pmp_priv_t pmp_priv; 709 int pmp_index = -1; 710 711 if (!riscv_feature(env, RISCV_FEATURE_PMP)) { 712 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 713 return TRANSLATE_SUCCESS; 714 } 715 716 pmp_index = pmp_hart_has_privs(env, addr, size, 1 << access_type, 717 &pmp_priv, mode); 718 if (pmp_index < 0) { 719 *prot = 0; 720 return TRANSLATE_PMP_FAIL; 721 } 722 723 *prot = pmp_priv_to_page_prot(pmp_priv); 724 if ((tlb_size != NULL) && pmp_index != MAX_RISCV_PMPS) { 725 target_ulong tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1); 726 target_ulong tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1; 727 728 *tlb_size = pmp_get_tlb_size(env, pmp_index, tlb_sa, tlb_ea); 729 } 730 731 return TRANSLATE_SUCCESS; 732 } 733 734 /* get_physical_address - get the physical address for this virtual address 735 * 736 * Do a page table walk to obtain the physical address corresponding to a 737 * virtual address. Returns 0 if the translation was successful 738 * 739 * Adapted from Spike's mmu_t::translate and mmu_t::walk 740 * 741 * @env: CPURISCVState 742 * @physical: This will be set to the calculated physical address 743 * @prot: The returned protection attributes 744 * @addr: The virtual address to be translated 745 * @fault_pte_addr: If not NULL, this will be set to fault pte address 746 * when a error occurs on pte address translation. 747 * This will already be shifted to match htval. 748 * @access_type: The type of MMU access 749 * @mmu_idx: Indicates current privilege level 750 * @first_stage: Are we in first stage translation? 751 * Second stage is used for hypervisor guest translation 752 * @two_stage: Are we going to perform two stage translation 753 * @is_debug: Is this access from a debugger or the monitor? 754 */ 755 static int get_physical_address(CPURISCVState *env, hwaddr *physical, 756 int *prot, target_ulong addr, 757 target_ulong *fault_pte_addr, 758 int access_type, int mmu_idx, 759 bool first_stage, bool two_stage, 760 bool is_debug) 761 { 762 /* NOTE: the env->pc value visible here will not be 763 * correct, but the value visible to the exception handler 764 * (riscv_cpu_do_interrupt) is correct */ 765 MemTxResult res; 766 MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; 767 int mode = mmu_idx & TB_FLAGS_PRIV_MMU_MASK; 768 bool use_background = false; 769 hwaddr ppn; 770 RISCVCPU *cpu = env_archcpu(env); 771 int napot_bits = 0; 772 target_ulong napot_mask; 773 774 /* 775 * Check if we should use the background registers for the two 776 * stage translation. We don't need to check if we actually need 777 * two stage translation as that happened before this function 778 * was called. Background registers will be used if the guest has 779 * forced a two stage translation to be on (in HS or M mode). 780 */ 781 if (!riscv_cpu_virt_enabled(env) && two_stage) { 782 use_background = true; 783 } 784 785 /* MPRV does not affect the virtual-machine load/store 786 instructions, HLV, HLVX, and HSV. */ 787 if (riscv_cpu_two_stage_lookup(mmu_idx)) { 788 mode = get_field(env->hstatus, HSTATUS_SPVP); 789 } else if (mode == PRV_M && access_type != MMU_INST_FETCH) { 790 if (get_field(env->mstatus, MSTATUS_MPRV)) { 791 mode = get_field(env->mstatus, MSTATUS_MPP); 792 } 793 } 794 795 if (first_stage == false) { 796 /* We are in stage 2 translation, this is similar to stage 1. */ 797 /* Stage 2 is always taken as U-mode */ 798 mode = PRV_U; 799 } 800 801 if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) { 802 *physical = addr; 803 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 804 return TRANSLATE_SUCCESS; 805 } 806 807 *prot = 0; 808 809 hwaddr base; 810 int levels, ptidxbits, ptesize, vm, sum, mxr, widened; 811 812 if (first_stage == true) { 813 mxr = get_field(env->mstatus, MSTATUS_MXR); 814 } else { 815 mxr = get_field(env->vsstatus, MSTATUS_MXR); 816 } 817 818 if (first_stage == true) { 819 if (use_background) { 820 if (riscv_cpu_mxl(env) == MXL_RV32) { 821 base = (hwaddr)get_field(env->vsatp, SATP32_PPN) << PGSHIFT; 822 vm = get_field(env->vsatp, SATP32_MODE); 823 } else { 824 base = (hwaddr)get_field(env->vsatp, SATP64_PPN) << PGSHIFT; 825 vm = get_field(env->vsatp, SATP64_MODE); 826 } 827 } else { 828 if (riscv_cpu_mxl(env) == MXL_RV32) { 829 base = (hwaddr)get_field(env->satp, SATP32_PPN) << PGSHIFT; 830 vm = get_field(env->satp, SATP32_MODE); 831 } else { 832 base = (hwaddr)get_field(env->satp, SATP64_PPN) << PGSHIFT; 833 vm = get_field(env->satp, SATP64_MODE); 834 } 835 } 836 widened = 0; 837 } else { 838 if (riscv_cpu_mxl(env) == MXL_RV32) { 839 base = (hwaddr)get_field(env->hgatp, SATP32_PPN) << PGSHIFT; 840 vm = get_field(env->hgatp, SATP32_MODE); 841 } else { 842 base = (hwaddr)get_field(env->hgatp, SATP64_PPN) << PGSHIFT; 843 vm = get_field(env->hgatp, SATP64_MODE); 844 } 845 widened = 2; 846 } 847 /* status.SUM will be ignored if execute on background */ 848 sum = get_field(env->mstatus, MSTATUS_SUM) || use_background || is_debug; 849 switch (vm) { 850 case VM_1_10_SV32: 851 levels = 2; ptidxbits = 10; ptesize = 4; break; 852 case VM_1_10_SV39: 853 levels = 3; ptidxbits = 9; ptesize = 8; break; 854 case VM_1_10_SV48: 855 levels = 4; ptidxbits = 9; ptesize = 8; break; 856 case VM_1_10_SV57: 857 levels = 5; ptidxbits = 9; ptesize = 8; break; 858 case VM_1_10_MBARE: 859 *physical = addr; 860 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 861 return TRANSLATE_SUCCESS; 862 default: 863 g_assert_not_reached(); 864 } 865 866 CPUState *cs = env_cpu(env); 867 int va_bits = PGSHIFT + levels * ptidxbits + widened; 868 target_ulong mask, masked_msbs; 869 870 if (TARGET_LONG_BITS > (va_bits - 1)) { 871 mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1; 872 } else { 873 mask = 0; 874 } 875 masked_msbs = (addr >> (va_bits - 1)) & mask; 876 877 if (masked_msbs != 0 && masked_msbs != mask) { 878 return TRANSLATE_FAIL; 879 } 880 881 int ptshift = (levels - 1) * ptidxbits; 882 int i; 883 884 #if !TCG_OVERSIZED_GUEST 885 restart: 886 #endif 887 for (i = 0; i < levels; i++, ptshift -= ptidxbits) { 888 target_ulong idx; 889 if (i == 0) { 890 idx = (addr >> (PGSHIFT + ptshift)) & 891 ((1 << (ptidxbits + widened)) - 1); 892 } else { 893 idx = (addr >> (PGSHIFT + ptshift)) & 894 ((1 << ptidxbits) - 1); 895 } 896 897 /* check that physical address of PTE is legal */ 898 hwaddr pte_addr; 899 900 if (two_stage && first_stage) { 901 int vbase_prot; 902 hwaddr vbase; 903 904 /* Do the second stage translation on the base PTE address. */ 905 int vbase_ret = get_physical_address(env, &vbase, &vbase_prot, 906 base, NULL, MMU_DATA_LOAD, 907 mmu_idx, false, true, 908 is_debug); 909 910 if (vbase_ret != TRANSLATE_SUCCESS) { 911 if (fault_pte_addr) { 912 *fault_pte_addr = (base + idx * ptesize) >> 2; 913 } 914 return TRANSLATE_G_STAGE_FAIL; 915 } 916 917 pte_addr = vbase + idx * ptesize; 918 } else { 919 pte_addr = base + idx * ptesize; 920 } 921 922 int pmp_prot; 923 int pmp_ret = get_physical_address_pmp(env, &pmp_prot, NULL, pte_addr, 924 sizeof(target_ulong), 925 MMU_DATA_LOAD, PRV_S); 926 if (pmp_ret != TRANSLATE_SUCCESS) { 927 return TRANSLATE_PMP_FAIL; 928 } 929 930 target_ulong pte; 931 if (riscv_cpu_mxl(env) == MXL_RV32) { 932 pte = address_space_ldl(cs->as, pte_addr, attrs, &res); 933 } else { 934 pte = address_space_ldq(cs->as, pte_addr, attrs, &res); 935 } 936 937 if (res != MEMTX_OK) { 938 return TRANSLATE_FAIL; 939 } 940 941 if (riscv_cpu_sxl(env) == MXL_RV32) { 942 ppn = pte >> PTE_PPN_SHIFT; 943 } else if (cpu->cfg.ext_svpbmt || cpu->cfg.ext_svnapot) { 944 ppn = (pte & (target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT; 945 } else { 946 ppn = pte >> PTE_PPN_SHIFT; 947 if ((pte & ~(target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT) { 948 return TRANSLATE_FAIL; 949 } 950 } 951 952 if (!(pte & PTE_V)) { 953 /* Invalid PTE */ 954 return TRANSLATE_FAIL; 955 } else if (!cpu->cfg.ext_svpbmt && (pte & PTE_PBMT)) { 956 return TRANSLATE_FAIL; 957 } else if (!(pte & (PTE_R | PTE_W | PTE_X))) { 958 /* Inner PTE, continue walking */ 959 if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) { 960 return TRANSLATE_FAIL; 961 } 962 base = ppn << PGSHIFT; 963 } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) { 964 /* Reserved leaf PTE flags: PTE_W */ 965 return TRANSLATE_FAIL; 966 } else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) { 967 /* Reserved leaf PTE flags: PTE_W + PTE_X */ 968 return TRANSLATE_FAIL; 969 } else if ((pte & PTE_U) && ((mode != PRV_U) && 970 (!sum || access_type == MMU_INST_FETCH))) { 971 /* User PTE flags when not U mode and mstatus.SUM is not set, 972 or the access type is an instruction fetch */ 973 return TRANSLATE_FAIL; 974 } else if (!(pte & PTE_U) && (mode != PRV_S)) { 975 /* Supervisor PTE flags when not S mode */ 976 return TRANSLATE_FAIL; 977 } else if (ppn & ((1ULL << ptshift) - 1)) { 978 /* Misaligned PPN */ 979 return TRANSLATE_FAIL; 980 } else if (access_type == MMU_DATA_LOAD && !((pte & PTE_R) || 981 ((pte & PTE_X) && mxr))) { 982 /* Read access check failed */ 983 return TRANSLATE_FAIL; 984 } else if (access_type == MMU_DATA_STORE && !(pte & PTE_W)) { 985 /* Write access check failed */ 986 return TRANSLATE_FAIL; 987 } else if (access_type == MMU_INST_FETCH && !(pte & PTE_X)) { 988 /* Fetch access check failed */ 989 return TRANSLATE_FAIL; 990 } else { 991 /* if necessary, set accessed and dirty bits. */ 992 target_ulong updated_pte = pte | PTE_A | 993 (access_type == MMU_DATA_STORE ? PTE_D : 0); 994 995 /* Page table updates need to be atomic with MTTCG enabled */ 996 if (updated_pte != pte) { 997 /* 998 * - if accessed or dirty bits need updating, and the PTE is 999 * in RAM, then we do so atomically with a compare and swap. 1000 * - if the PTE is in IO space or ROM, then it can't be updated 1001 * and we return TRANSLATE_FAIL. 1002 * - if the PTE changed by the time we went to update it, then 1003 * it is no longer valid and we must re-walk the page table. 1004 */ 1005 MemoryRegion *mr; 1006 hwaddr l = sizeof(target_ulong), addr1; 1007 mr = address_space_translate(cs->as, pte_addr, 1008 &addr1, &l, false, MEMTXATTRS_UNSPECIFIED); 1009 if (memory_region_is_ram(mr)) { 1010 target_ulong *pte_pa = 1011 qemu_map_ram_ptr(mr->ram_block, addr1); 1012 #if TCG_OVERSIZED_GUEST 1013 /* MTTCG is not enabled on oversized TCG guests so 1014 * page table updates do not need to be atomic */ 1015 *pte_pa = pte = updated_pte; 1016 #else 1017 target_ulong old_pte = 1018 qatomic_cmpxchg(pte_pa, pte, updated_pte); 1019 if (old_pte != pte) { 1020 goto restart; 1021 } else { 1022 pte = updated_pte; 1023 } 1024 #endif 1025 } else { 1026 /* misconfigured PTE in ROM (AD bits are not preset) or 1027 * PTE is in IO space and can't be updated atomically */ 1028 return TRANSLATE_FAIL; 1029 } 1030 } 1031 1032 /* for superpage mappings, make a fake leaf PTE for the TLB's 1033 benefit. */ 1034 target_ulong vpn = addr >> PGSHIFT; 1035 1036 if (cpu->cfg.ext_svnapot && (pte & PTE_N)) { 1037 napot_bits = ctzl(ppn) + 1; 1038 if ((i != (levels - 1)) || (napot_bits != 4)) { 1039 return TRANSLATE_FAIL; 1040 } 1041 } 1042 1043 napot_mask = (1 << napot_bits) - 1; 1044 *physical = (((ppn & ~napot_mask) | (vpn & napot_mask) | 1045 (vpn & (((target_ulong)1 << ptshift) - 1)) 1046 ) << PGSHIFT) | (addr & ~TARGET_PAGE_MASK); 1047 1048 /* set permissions on the TLB entry */ 1049 if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) { 1050 *prot |= PAGE_READ; 1051 } 1052 if ((pte & PTE_X)) { 1053 *prot |= PAGE_EXEC; 1054 } 1055 /* add write permission on stores or if the page is already dirty, 1056 so that we TLB miss on later writes to update the dirty bit */ 1057 if ((pte & PTE_W) && 1058 (access_type == MMU_DATA_STORE || (pte & PTE_D))) { 1059 *prot |= PAGE_WRITE; 1060 } 1061 return TRANSLATE_SUCCESS; 1062 } 1063 } 1064 return TRANSLATE_FAIL; 1065 } 1066 1067 static void raise_mmu_exception(CPURISCVState *env, target_ulong address, 1068 MMUAccessType access_type, bool pmp_violation, 1069 bool first_stage, bool two_stage, 1070 bool two_stage_indirect) 1071 { 1072 CPUState *cs = env_cpu(env); 1073 int page_fault_exceptions, vm; 1074 uint64_t stap_mode; 1075 1076 if (riscv_cpu_mxl(env) == MXL_RV32) { 1077 stap_mode = SATP32_MODE; 1078 } else { 1079 stap_mode = SATP64_MODE; 1080 } 1081 1082 if (first_stage) { 1083 vm = get_field(env->satp, stap_mode); 1084 } else { 1085 vm = get_field(env->hgatp, stap_mode); 1086 } 1087 1088 page_fault_exceptions = vm != VM_1_10_MBARE && !pmp_violation; 1089 1090 switch (access_type) { 1091 case MMU_INST_FETCH: 1092 if (riscv_cpu_virt_enabled(env) && !first_stage) { 1093 cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT; 1094 } else { 1095 cs->exception_index = page_fault_exceptions ? 1096 RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT; 1097 } 1098 break; 1099 case MMU_DATA_LOAD: 1100 if (two_stage && !first_stage) { 1101 cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT; 1102 } else { 1103 cs->exception_index = page_fault_exceptions ? 1104 RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT; 1105 } 1106 break; 1107 case MMU_DATA_STORE: 1108 if (two_stage && !first_stage) { 1109 cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT; 1110 } else { 1111 cs->exception_index = page_fault_exceptions ? 1112 RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT; 1113 } 1114 break; 1115 default: 1116 g_assert_not_reached(); 1117 } 1118 env->badaddr = address; 1119 env->two_stage_lookup = two_stage; 1120 env->two_stage_indirect_lookup = two_stage_indirect; 1121 } 1122 1123 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 1124 { 1125 RISCVCPU *cpu = RISCV_CPU(cs); 1126 CPURISCVState *env = &cpu->env; 1127 hwaddr phys_addr; 1128 int prot; 1129 int mmu_idx = cpu_mmu_index(&cpu->env, false); 1130 1131 if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx, 1132 true, riscv_cpu_virt_enabled(env), true)) { 1133 return -1; 1134 } 1135 1136 if (riscv_cpu_virt_enabled(env)) { 1137 if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL, 1138 0, mmu_idx, false, true, true)) { 1139 return -1; 1140 } 1141 } 1142 1143 return phys_addr & TARGET_PAGE_MASK; 1144 } 1145 1146 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 1147 vaddr addr, unsigned size, 1148 MMUAccessType access_type, 1149 int mmu_idx, MemTxAttrs attrs, 1150 MemTxResult response, uintptr_t retaddr) 1151 { 1152 RISCVCPU *cpu = RISCV_CPU(cs); 1153 CPURISCVState *env = &cpu->env; 1154 1155 if (access_type == MMU_DATA_STORE) { 1156 cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT; 1157 } else if (access_type == MMU_DATA_LOAD) { 1158 cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT; 1159 } else { 1160 cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT; 1161 } 1162 1163 env->badaddr = addr; 1164 env->two_stage_lookup = riscv_cpu_virt_enabled(env) || 1165 riscv_cpu_two_stage_lookup(mmu_idx); 1166 env->two_stage_indirect_lookup = false; 1167 cpu_loop_exit_restore(cs, retaddr); 1168 } 1169 1170 void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 1171 MMUAccessType access_type, int mmu_idx, 1172 uintptr_t retaddr) 1173 { 1174 RISCVCPU *cpu = RISCV_CPU(cs); 1175 CPURISCVState *env = &cpu->env; 1176 switch (access_type) { 1177 case MMU_INST_FETCH: 1178 cs->exception_index = RISCV_EXCP_INST_ADDR_MIS; 1179 break; 1180 case MMU_DATA_LOAD: 1181 cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS; 1182 break; 1183 case MMU_DATA_STORE: 1184 cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS; 1185 break; 1186 default: 1187 g_assert_not_reached(); 1188 } 1189 env->badaddr = addr; 1190 env->two_stage_lookup = riscv_cpu_virt_enabled(env) || 1191 riscv_cpu_two_stage_lookup(mmu_idx); 1192 env->two_stage_indirect_lookup = false; 1193 cpu_loop_exit_restore(cs, retaddr); 1194 } 1195 1196 1197 static void pmu_tlb_fill_incr_ctr(RISCVCPU *cpu, MMUAccessType access_type) 1198 { 1199 enum riscv_pmu_event_idx pmu_event_type; 1200 1201 switch (access_type) { 1202 case MMU_INST_FETCH: 1203 pmu_event_type = RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS; 1204 break; 1205 case MMU_DATA_LOAD: 1206 pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS; 1207 break; 1208 case MMU_DATA_STORE: 1209 pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS; 1210 break; 1211 default: 1212 return; 1213 } 1214 1215 riscv_pmu_incr_ctr(cpu, pmu_event_type); 1216 } 1217 1218 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 1219 MMUAccessType access_type, int mmu_idx, 1220 bool probe, uintptr_t retaddr) 1221 { 1222 RISCVCPU *cpu = RISCV_CPU(cs); 1223 CPURISCVState *env = &cpu->env; 1224 vaddr im_address; 1225 hwaddr pa = 0; 1226 int prot, prot2, prot_pmp; 1227 bool pmp_violation = false; 1228 bool first_stage_error = true; 1229 bool two_stage_lookup = false; 1230 bool two_stage_indirect_error = false; 1231 int ret = TRANSLATE_FAIL; 1232 int mode = mmu_idx; 1233 /* default TLB page size */ 1234 target_ulong tlb_size = TARGET_PAGE_SIZE; 1235 1236 env->guest_phys_fault_addr = 0; 1237 1238 qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n", 1239 __func__, address, access_type, mmu_idx); 1240 1241 /* MPRV does not affect the virtual-machine load/store 1242 instructions, HLV, HLVX, and HSV. */ 1243 if (riscv_cpu_two_stage_lookup(mmu_idx)) { 1244 mode = get_field(env->hstatus, HSTATUS_SPVP); 1245 } else if (mode == PRV_M && access_type != MMU_INST_FETCH && 1246 get_field(env->mstatus, MSTATUS_MPRV)) { 1247 mode = get_field(env->mstatus, MSTATUS_MPP); 1248 if (riscv_has_ext(env, RVH) && get_field(env->mstatus, MSTATUS_MPV)) { 1249 two_stage_lookup = true; 1250 } 1251 } 1252 1253 pmu_tlb_fill_incr_ctr(cpu, access_type); 1254 if (riscv_cpu_virt_enabled(env) || 1255 ((riscv_cpu_two_stage_lookup(mmu_idx) || two_stage_lookup) && 1256 access_type != MMU_INST_FETCH)) { 1257 /* Two stage lookup */ 1258 ret = get_physical_address(env, &pa, &prot, address, 1259 &env->guest_phys_fault_addr, access_type, 1260 mmu_idx, true, true, false); 1261 1262 /* 1263 * A G-stage exception may be triggered during two state lookup. 1264 * And the env->guest_phys_fault_addr has already been set in 1265 * get_physical_address(). 1266 */ 1267 if (ret == TRANSLATE_G_STAGE_FAIL) { 1268 first_stage_error = false; 1269 two_stage_indirect_error = true; 1270 access_type = MMU_DATA_LOAD; 1271 } 1272 1273 qemu_log_mask(CPU_LOG_MMU, 1274 "%s 1st-stage address=%" VADDR_PRIx " ret %d physical " 1275 TARGET_FMT_plx " prot %d\n", 1276 __func__, address, ret, pa, prot); 1277 1278 if (ret == TRANSLATE_SUCCESS) { 1279 /* Second stage lookup */ 1280 im_address = pa; 1281 1282 ret = get_physical_address(env, &pa, &prot2, im_address, NULL, 1283 access_type, mmu_idx, false, true, 1284 false); 1285 1286 qemu_log_mask(CPU_LOG_MMU, 1287 "%s 2nd-stage address=%" VADDR_PRIx " ret %d physical " 1288 TARGET_FMT_plx " prot %d\n", 1289 __func__, im_address, ret, pa, prot2); 1290 1291 prot &= prot2; 1292 1293 if (ret == TRANSLATE_SUCCESS) { 1294 ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa, 1295 size, access_type, mode); 1296 1297 qemu_log_mask(CPU_LOG_MMU, 1298 "%s PMP address=" TARGET_FMT_plx " ret %d prot" 1299 " %d tlb_size " TARGET_FMT_lu "\n", 1300 __func__, pa, ret, prot_pmp, tlb_size); 1301 1302 prot &= prot_pmp; 1303 } 1304 1305 if (ret != TRANSLATE_SUCCESS) { 1306 /* 1307 * Guest physical address translation failed, this is a HS 1308 * level exception 1309 */ 1310 first_stage_error = false; 1311 env->guest_phys_fault_addr = (im_address | 1312 (address & 1313 (TARGET_PAGE_SIZE - 1))) >> 2; 1314 } 1315 } 1316 } else { 1317 /* Single stage lookup */ 1318 ret = get_physical_address(env, &pa, &prot, address, NULL, 1319 access_type, mmu_idx, true, false, false); 1320 1321 qemu_log_mask(CPU_LOG_MMU, 1322 "%s address=%" VADDR_PRIx " ret %d physical " 1323 TARGET_FMT_plx " prot %d\n", 1324 __func__, address, ret, pa, prot); 1325 1326 if (ret == TRANSLATE_SUCCESS) { 1327 ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa, 1328 size, access_type, mode); 1329 1330 qemu_log_mask(CPU_LOG_MMU, 1331 "%s PMP address=" TARGET_FMT_plx " ret %d prot" 1332 " %d tlb_size " TARGET_FMT_lu "\n", 1333 __func__, pa, ret, prot_pmp, tlb_size); 1334 1335 prot &= prot_pmp; 1336 } 1337 } 1338 1339 if (ret == TRANSLATE_PMP_FAIL) { 1340 pmp_violation = true; 1341 } 1342 1343 if (ret == TRANSLATE_SUCCESS) { 1344 tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1), 1345 prot, mmu_idx, tlb_size); 1346 return true; 1347 } else if (probe) { 1348 return false; 1349 } else { 1350 raise_mmu_exception(env, address, access_type, pmp_violation, 1351 first_stage_error, 1352 riscv_cpu_virt_enabled(env) || 1353 riscv_cpu_two_stage_lookup(mmu_idx), 1354 two_stage_indirect_error); 1355 cpu_loop_exit_restore(cs, retaddr); 1356 } 1357 1358 return true; 1359 } 1360 1361 static target_ulong riscv_transformed_insn(CPURISCVState *env, 1362 target_ulong insn, 1363 target_ulong taddr) 1364 { 1365 target_ulong xinsn = 0; 1366 target_ulong access_rs1 = 0, access_imm = 0, access_size = 0; 1367 1368 /* 1369 * Only Quadrant 0 and Quadrant 2 of RVC instruction space need to 1370 * be uncompressed. The Quadrant 1 of RVC instruction space need 1371 * not be transformed because these instructions won't generate 1372 * any load/store trap. 1373 */ 1374 1375 if ((insn & 0x3) != 0x3) { 1376 /* Transform 16bit instruction into 32bit instruction */ 1377 switch (GET_C_OP(insn)) { 1378 case OPC_RISC_C_OP_QUAD0: /* Quadrant 0 */ 1379 switch (GET_C_FUNC(insn)) { 1380 case OPC_RISC_C_FUNC_FLD_LQ: 1381 if (riscv_cpu_xlen(env) != 128) { /* C.FLD (RV32/64) */ 1382 xinsn = OPC_RISC_FLD; 1383 xinsn = SET_RD(xinsn, GET_C_RS2S(insn)); 1384 access_rs1 = GET_C_RS1S(insn); 1385 access_imm = GET_C_LD_IMM(insn); 1386 access_size = 8; 1387 } 1388 break; 1389 case OPC_RISC_C_FUNC_LW: /* C.LW */ 1390 xinsn = OPC_RISC_LW; 1391 xinsn = SET_RD(xinsn, GET_C_RS2S(insn)); 1392 access_rs1 = GET_C_RS1S(insn); 1393 access_imm = GET_C_LW_IMM(insn); 1394 access_size = 4; 1395 break; 1396 case OPC_RISC_C_FUNC_FLW_LD: 1397 if (riscv_cpu_xlen(env) == 32) { /* C.FLW (RV32) */ 1398 xinsn = OPC_RISC_FLW; 1399 xinsn = SET_RD(xinsn, GET_C_RS2S(insn)); 1400 access_rs1 = GET_C_RS1S(insn); 1401 access_imm = GET_C_LW_IMM(insn); 1402 access_size = 4; 1403 } else { /* C.LD (RV64/RV128) */ 1404 xinsn = OPC_RISC_LD; 1405 xinsn = SET_RD(xinsn, GET_C_RS2S(insn)); 1406 access_rs1 = GET_C_RS1S(insn); 1407 access_imm = GET_C_LD_IMM(insn); 1408 access_size = 8; 1409 } 1410 break; 1411 case OPC_RISC_C_FUNC_FSD_SQ: 1412 if (riscv_cpu_xlen(env) != 128) { /* C.FSD (RV32/64) */ 1413 xinsn = OPC_RISC_FSD; 1414 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn)); 1415 access_rs1 = GET_C_RS1S(insn); 1416 access_imm = GET_C_SD_IMM(insn); 1417 access_size = 8; 1418 } 1419 break; 1420 case OPC_RISC_C_FUNC_SW: /* C.SW */ 1421 xinsn = OPC_RISC_SW; 1422 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn)); 1423 access_rs1 = GET_C_RS1S(insn); 1424 access_imm = GET_C_SW_IMM(insn); 1425 access_size = 4; 1426 break; 1427 case OPC_RISC_C_FUNC_FSW_SD: 1428 if (riscv_cpu_xlen(env) == 32) { /* C.FSW (RV32) */ 1429 xinsn = OPC_RISC_FSW; 1430 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn)); 1431 access_rs1 = GET_C_RS1S(insn); 1432 access_imm = GET_C_SW_IMM(insn); 1433 access_size = 4; 1434 } else { /* C.SD (RV64/RV128) */ 1435 xinsn = OPC_RISC_SD; 1436 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn)); 1437 access_rs1 = GET_C_RS1S(insn); 1438 access_imm = GET_C_SD_IMM(insn); 1439 access_size = 8; 1440 } 1441 break; 1442 default: 1443 break; 1444 } 1445 break; 1446 case OPC_RISC_C_OP_QUAD2: /* Quadrant 2 */ 1447 switch (GET_C_FUNC(insn)) { 1448 case OPC_RISC_C_FUNC_FLDSP_LQSP: 1449 if (riscv_cpu_xlen(env) != 128) { /* C.FLDSP (RV32/64) */ 1450 xinsn = OPC_RISC_FLD; 1451 xinsn = SET_RD(xinsn, GET_C_RD(insn)); 1452 access_rs1 = 2; 1453 access_imm = GET_C_LDSP_IMM(insn); 1454 access_size = 8; 1455 } 1456 break; 1457 case OPC_RISC_C_FUNC_LWSP: /* C.LWSP */ 1458 xinsn = OPC_RISC_LW; 1459 xinsn = SET_RD(xinsn, GET_C_RD(insn)); 1460 access_rs1 = 2; 1461 access_imm = GET_C_LWSP_IMM(insn); 1462 access_size = 4; 1463 break; 1464 case OPC_RISC_C_FUNC_FLWSP_LDSP: 1465 if (riscv_cpu_xlen(env) == 32) { /* C.FLWSP (RV32) */ 1466 xinsn = OPC_RISC_FLW; 1467 xinsn = SET_RD(xinsn, GET_C_RD(insn)); 1468 access_rs1 = 2; 1469 access_imm = GET_C_LWSP_IMM(insn); 1470 access_size = 4; 1471 } else { /* C.LDSP (RV64/RV128) */ 1472 xinsn = OPC_RISC_LD; 1473 xinsn = SET_RD(xinsn, GET_C_RD(insn)); 1474 access_rs1 = 2; 1475 access_imm = GET_C_LDSP_IMM(insn); 1476 access_size = 8; 1477 } 1478 break; 1479 case OPC_RISC_C_FUNC_FSDSP_SQSP: 1480 if (riscv_cpu_xlen(env) != 128) { /* C.FSDSP (RV32/64) */ 1481 xinsn = OPC_RISC_FSD; 1482 xinsn = SET_RS2(xinsn, GET_C_RS2(insn)); 1483 access_rs1 = 2; 1484 access_imm = GET_C_SDSP_IMM(insn); 1485 access_size = 8; 1486 } 1487 break; 1488 case OPC_RISC_C_FUNC_SWSP: /* C.SWSP */ 1489 xinsn = OPC_RISC_SW; 1490 xinsn = SET_RS2(xinsn, GET_C_RS2(insn)); 1491 access_rs1 = 2; 1492 access_imm = GET_C_SWSP_IMM(insn); 1493 access_size = 4; 1494 break; 1495 case 7: 1496 if (riscv_cpu_xlen(env) == 32) { /* C.FSWSP (RV32) */ 1497 xinsn = OPC_RISC_FSW; 1498 xinsn = SET_RS2(xinsn, GET_C_RS2(insn)); 1499 access_rs1 = 2; 1500 access_imm = GET_C_SWSP_IMM(insn); 1501 access_size = 4; 1502 } else { /* C.SDSP (RV64/RV128) */ 1503 xinsn = OPC_RISC_SD; 1504 xinsn = SET_RS2(xinsn, GET_C_RS2(insn)); 1505 access_rs1 = 2; 1506 access_imm = GET_C_SDSP_IMM(insn); 1507 access_size = 8; 1508 } 1509 break; 1510 default: 1511 break; 1512 } 1513 break; 1514 default: 1515 break; 1516 } 1517 1518 /* 1519 * Clear Bit1 of transformed instruction to indicate that 1520 * original insruction was a 16bit instruction 1521 */ 1522 xinsn &= ~((target_ulong)0x2); 1523 } else { 1524 /* Transform 32bit (or wider) instructions */ 1525 switch (MASK_OP_MAJOR(insn)) { 1526 case OPC_RISC_ATOMIC: 1527 xinsn = insn; 1528 access_rs1 = GET_RS1(insn); 1529 access_size = 1 << GET_FUNCT3(insn); 1530 break; 1531 case OPC_RISC_LOAD: 1532 case OPC_RISC_FP_LOAD: 1533 xinsn = SET_I_IMM(insn, 0); 1534 access_rs1 = GET_RS1(insn); 1535 access_imm = GET_IMM(insn); 1536 access_size = 1 << GET_FUNCT3(insn); 1537 break; 1538 case OPC_RISC_STORE: 1539 case OPC_RISC_FP_STORE: 1540 xinsn = SET_S_IMM(insn, 0); 1541 access_rs1 = GET_RS1(insn); 1542 access_imm = GET_STORE_IMM(insn); 1543 access_size = 1 << GET_FUNCT3(insn); 1544 break; 1545 case OPC_RISC_SYSTEM: 1546 if (MASK_OP_SYSTEM(insn) == OPC_RISC_HLVHSV) { 1547 xinsn = insn; 1548 access_rs1 = GET_RS1(insn); 1549 access_size = 1 << ((GET_FUNCT7(insn) >> 1) & 0x3); 1550 access_size = 1 << access_size; 1551 } 1552 break; 1553 default: 1554 break; 1555 } 1556 } 1557 1558 if (access_size) { 1559 xinsn = SET_RS1(xinsn, (taddr - (env->gpr[access_rs1] + access_imm)) & 1560 (access_size - 1)); 1561 } 1562 1563 return xinsn; 1564 } 1565 #endif /* !CONFIG_USER_ONLY */ 1566 1567 /* 1568 * Handle Traps 1569 * 1570 * Adapted from Spike's processor_t::take_trap. 1571 * 1572 */ 1573 void riscv_cpu_do_interrupt(CPUState *cs) 1574 { 1575 #if !defined(CONFIG_USER_ONLY) 1576 1577 RISCVCPU *cpu = RISCV_CPU(cs); 1578 CPURISCVState *env = &cpu->env; 1579 bool write_gva = false; 1580 uint64_t s; 1581 1582 /* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide 1583 * so we mask off the MSB and separate into trap type and cause. 1584 */ 1585 bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG); 1586 target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK; 1587 uint64_t deleg = async ? env->mideleg : env->medeleg; 1588 target_ulong tval = 0; 1589 target_ulong tinst = 0; 1590 target_ulong htval = 0; 1591 target_ulong mtval2 = 0; 1592 1593 if (cause == RISCV_EXCP_SEMIHOST) { 1594 do_common_semihosting(cs); 1595 env->pc += 4; 1596 return; 1597 } 1598 1599 if (!async) { 1600 /* set tval to badaddr for traps with address information */ 1601 switch (cause) { 1602 case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT: 1603 case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT: 1604 case RISCV_EXCP_LOAD_ADDR_MIS: 1605 case RISCV_EXCP_STORE_AMO_ADDR_MIS: 1606 case RISCV_EXCP_LOAD_ACCESS_FAULT: 1607 case RISCV_EXCP_STORE_AMO_ACCESS_FAULT: 1608 case RISCV_EXCP_LOAD_PAGE_FAULT: 1609 case RISCV_EXCP_STORE_PAGE_FAULT: 1610 write_gva = env->two_stage_lookup; 1611 tval = env->badaddr; 1612 if (env->two_stage_indirect_lookup) { 1613 /* 1614 * special pseudoinstruction for G-stage fault taken while 1615 * doing VS-stage page table walk. 1616 */ 1617 tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000; 1618 } else { 1619 /* 1620 * The "Addr. Offset" field in transformed instruction is 1621 * non-zero only for misaligned access. 1622 */ 1623 tinst = riscv_transformed_insn(env, env->bins, tval); 1624 } 1625 break; 1626 case RISCV_EXCP_INST_GUEST_PAGE_FAULT: 1627 case RISCV_EXCP_INST_ADDR_MIS: 1628 case RISCV_EXCP_INST_ACCESS_FAULT: 1629 case RISCV_EXCP_INST_PAGE_FAULT: 1630 write_gva = env->two_stage_lookup; 1631 tval = env->badaddr; 1632 if (env->two_stage_indirect_lookup) { 1633 /* 1634 * special pseudoinstruction for G-stage fault taken while 1635 * doing VS-stage page table walk. 1636 */ 1637 tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000; 1638 } 1639 break; 1640 case RISCV_EXCP_ILLEGAL_INST: 1641 case RISCV_EXCP_VIRT_INSTRUCTION_FAULT: 1642 tval = env->bins; 1643 break; 1644 default: 1645 break; 1646 } 1647 /* ecall is dispatched as one cause so translate based on mode */ 1648 if (cause == RISCV_EXCP_U_ECALL) { 1649 assert(env->priv <= 3); 1650 1651 if (env->priv == PRV_M) { 1652 cause = RISCV_EXCP_M_ECALL; 1653 } else if (env->priv == PRV_S && riscv_cpu_virt_enabled(env)) { 1654 cause = RISCV_EXCP_VS_ECALL; 1655 } else if (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) { 1656 cause = RISCV_EXCP_S_ECALL; 1657 } else if (env->priv == PRV_U) { 1658 cause = RISCV_EXCP_U_ECALL; 1659 } 1660 } 1661 } 1662 1663 trace_riscv_trap(env->mhartid, async, cause, env->pc, tval, 1664 riscv_cpu_get_trap_name(cause, async)); 1665 1666 qemu_log_mask(CPU_LOG_INT, 1667 "%s: hart:"TARGET_FMT_ld", async:%d, cause:"TARGET_FMT_lx", " 1668 "epc:0x"TARGET_FMT_lx", tval:0x"TARGET_FMT_lx", desc=%s\n", 1669 __func__, env->mhartid, async, cause, env->pc, tval, 1670 riscv_cpu_get_trap_name(cause, async)); 1671 1672 if (env->priv <= PRV_S && 1673 cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) { 1674 /* handle the trap in S-mode */ 1675 if (riscv_has_ext(env, RVH)) { 1676 uint64_t hdeleg = async ? env->hideleg : env->hedeleg; 1677 1678 if (riscv_cpu_virt_enabled(env) && ((hdeleg >> cause) & 1)) { 1679 /* Trap to VS mode */ 1680 /* 1681 * See if we need to adjust cause. Yes if its VS mode interrupt 1682 * no if hypervisor has delegated one of hs mode's interrupt 1683 */ 1684 if (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT || 1685 cause == IRQ_VS_EXT) { 1686 cause = cause - 1; 1687 } 1688 write_gva = false; 1689 } else if (riscv_cpu_virt_enabled(env)) { 1690 /* Trap into HS mode, from virt */ 1691 riscv_cpu_swap_hypervisor_regs(env); 1692 env->hstatus = set_field(env->hstatus, HSTATUS_SPVP, 1693 env->priv); 1694 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, 1695 riscv_cpu_virt_enabled(env)); 1696 1697 1698 htval = env->guest_phys_fault_addr; 1699 1700 riscv_cpu_set_virt_enabled(env, 0); 1701 } else { 1702 /* Trap into HS mode */ 1703 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false); 1704 htval = env->guest_phys_fault_addr; 1705 } 1706 env->hstatus = set_field(env->hstatus, HSTATUS_GVA, write_gva); 1707 } 1708 1709 s = env->mstatus; 1710 s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE)); 1711 s = set_field(s, MSTATUS_SPP, env->priv); 1712 s = set_field(s, MSTATUS_SIE, 0); 1713 env->mstatus = s; 1714 env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1)); 1715 env->sepc = env->pc; 1716 env->stval = tval; 1717 env->htval = htval; 1718 env->htinst = tinst; 1719 env->pc = (env->stvec >> 2 << 2) + 1720 ((async && (env->stvec & 3) == 1) ? cause * 4 : 0); 1721 riscv_cpu_set_mode(env, PRV_S); 1722 } else { 1723 /* handle the trap in M-mode */ 1724 if (riscv_has_ext(env, RVH)) { 1725 if (riscv_cpu_virt_enabled(env)) { 1726 riscv_cpu_swap_hypervisor_regs(env); 1727 } 1728 env->mstatus = set_field(env->mstatus, MSTATUS_MPV, 1729 riscv_cpu_virt_enabled(env)); 1730 if (riscv_cpu_virt_enabled(env) && tval) { 1731 env->mstatus = set_field(env->mstatus, MSTATUS_GVA, 1); 1732 } 1733 1734 mtval2 = env->guest_phys_fault_addr; 1735 1736 /* Trapping to M mode, virt is disabled */ 1737 riscv_cpu_set_virt_enabled(env, 0); 1738 } 1739 1740 s = env->mstatus; 1741 s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE)); 1742 s = set_field(s, MSTATUS_MPP, env->priv); 1743 s = set_field(s, MSTATUS_MIE, 0); 1744 env->mstatus = s; 1745 env->mcause = cause | ~(((target_ulong)-1) >> async); 1746 env->mepc = env->pc; 1747 env->mtval = tval; 1748 env->mtval2 = mtval2; 1749 env->mtinst = tinst; 1750 env->pc = (env->mtvec >> 2 << 2) + 1751 ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0); 1752 riscv_cpu_set_mode(env, PRV_M); 1753 } 1754 1755 /* NOTE: it is not necessary to yield load reservations here. It is only 1756 * necessary for an SC from "another hart" to cause a load reservation 1757 * to be yielded. Refer to the memory consistency model section of the 1758 * RISC-V ISA Specification. 1759 */ 1760 1761 env->two_stage_lookup = false; 1762 env->two_stage_indirect_lookup = false; 1763 #endif 1764 cs->exception_index = RISCV_EXCP_NONE; /* mark handled to qemu */ 1765 } 1766