1 /* 2 * RISC-V CPU helpers for qemu. 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/log.h" 22 #include "qemu/main-loop.h" 23 #include "cpu.h" 24 #include "exec/exec-all.h" 25 #include "tcg/tcg-op.h" 26 #include "trace.h" 27 #include "semihosting/common-semi.h" 28 29 int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch) 30 { 31 #ifdef CONFIG_USER_ONLY 32 return 0; 33 #else 34 return env->priv; 35 #endif 36 } 37 38 void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc, 39 target_ulong *cs_base, uint32_t *pflags) 40 { 41 CPUState *cs = env_cpu(env); 42 RISCVCPU *cpu = RISCV_CPU(cs); 43 44 uint32_t flags = 0; 45 46 *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc; 47 *cs_base = 0; 48 49 if (riscv_has_ext(env, RVV) || cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) { 50 /* 51 * If env->vl equals to VLMAX, we can use generic vector operation 52 * expanders (GVEC) to accerlate the vector operations. 53 * However, as LMUL could be a fractional number. The maximum 54 * vector size can be operated might be less than 8 bytes, 55 * which is not supported by GVEC. So we set vl_eq_vlmax flag to true 56 * only when maxsz >= 8 bytes. 57 */ 58 uint32_t vlmax = vext_get_vlmax(env_archcpu(env), env->vtype); 59 uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW); 60 uint32_t maxsz = vlmax << sew; 61 bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) && 62 (maxsz >= 8); 63 flags = FIELD_DP32(flags, TB_FLAGS, VILL, 64 FIELD_EX64(env->vtype, VTYPE, VILL)); 65 flags = FIELD_DP32(flags, TB_FLAGS, SEW, sew); 66 flags = FIELD_DP32(flags, TB_FLAGS, LMUL, 67 FIELD_EX64(env->vtype, VTYPE, VLMUL)); 68 flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax); 69 } else { 70 flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1); 71 } 72 73 #ifdef CONFIG_USER_ONLY 74 flags |= TB_FLAGS_MSTATUS_FS; 75 flags |= TB_FLAGS_MSTATUS_VS; 76 #else 77 flags |= cpu_mmu_index(env, 0); 78 if (riscv_cpu_fp_enabled(env)) { 79 flags |= env->mstatus & MSTATUS_FS; 80 } 81 82 if (riscv_cpu_vector_enabled(env)) { 83 flags |= env->mstatus & MSTATUS_VS; 84 } 85 86 if (riscv_has_ext(env, RVH)) { 87 if (env->priv == PRV_M || 88 (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) || 89 (env->priv == PRV_U && !riscv_cpu_virt_enabled(env) && 90 get_field(env->hstatus, HSTATUS_HU))) { 91 flags = FIELD_DP32(flags, TB_FLAGS, HLSX, 1); 92 } 93 94 flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_FS, 95 get_field(env->mstatus_hs, MSTATUS_FS)); 96 97 flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_VS, 98 get_field(env->mstatus_hs, MSTATUS_VS)); 99 } 100 if (riscv_has_ext(env, RVJ)) { 101 int priv = flags & TB_FLAGS_PRIV_MMU_MASK; 102 bool pm_enabled = false; 103 switch (priv) { 104 case PRV_U: 105 pm_enabled = env->mmte & U_PM_ENABLE; 106 break; 107 case PRV_S: 108 pm_enabled = env->mmte & S_PM_ENABLE; 109 break; 110 case PRV_M: 111 pm_enabled = env->mmte & M_PM_ENABLE; 112 break; 113 default: 114 g_assert_not_reached(); 115 } 116 flags = FIELD_DP32(flags, TB_FLAGS, PM_ENABLED, pm_enabled); 117 } 118 #endif 119 120 flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl); 121 122 *pflags = flags; 123 } 124 125 void riscv_cpu_update_mask(CPURISCVState *env) 126 { 127 target_ulong mask = -1, base = 0; 128 /* 129 * TODO: Current RVJ spec does not specify 130 * how the extension interacts with XLEN. 131 */ 132 #ifndef CONFIG_USER_ONLY 133 if (riscv_has_ext(env, RVJ)) { 134 switch (env->priv) { 135 case PRV_M: 136 if (env->mmte & M_PM_ENABLE) { 137 mask = env->mpmmask; 138 base = env->mpmbase; 139 } 140 break; 141 case PRV_S: 142 if (env->mmte & S_PM_ENABLE) { 143 mask = env->spmmask; 144 base = env->spmbase; 145 } 146 break; 147 case PRV_U: 148 if (env->mmte & U_PM_ENABLE) { 149 mask = env->upmmask; 150 base = env->upmbase; 151 } 152 break; 153 default: 154 g_assert_not_reached(); 155 } 156 } 157 #endif 158 if (env->xl == MXL_RV32) { 159 env->cur_pmmask = mask & UINT32_MAX; 160 env->cur_pmbase = base & UINT32_MAX; 161 } else { 162 env->cur_pmmask = mask; 163 env->cur_pmbase = base; 164 } 165 } 166 167 #ifndef CONFIG_USER_ONLY 168 static int riscv_cpu_local_irq_pending(CPURISCVState *env) 169 { 170 target_ulong virt_enabled = riscv_cpu_virt_enabled(env); 171 172 target_ulong mstatus_mie = get_field(env->mstatus, MSTATUS_MIE); 173 target_ulong mstatus_sie = get_field(env->mstatus, MSTATUS_SIE); 174 175 target_ulong pending = env->mip & env->mie; 176 177 target_ulong mie = env->priv < PRV_M || 178 (env->priv == PRV_M && mstatus_mie); 179 target_ulong sie = env->priv < PRV_S || 180 (env->priv == PRV_S && mstatus_sie); 181 target_ulong hsie = virt_enabled || sie; 182 target_ulong vsie = virt_enabled && sie; 183 184 target_ulong irqs = 185 (pending & ~env->mideleg & -mie) | 186 (pending & env->mideleg & ~env->hideleg & -hsie) | 187 (pending & env->mideleg & env->hideleg & -vsie); 188 189 if (irqs) { 190 return ctz64(irqs); /* since non-zero */ 191 } else { 192 return RISCV_EXCP_NONE; /* indicates no pending interrupt */ 193 } 194 } 195 196 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 197 { 198 if (interrupt_request & CPU_INTERRUPT_HARD) { 199 RISCVCPU *cpu = RISCV_CPU(cs); 200 CPURISCVState *env = &cpu->env; 201 int interruptno = riscv_cpu_local_irq_pending(env); 202 if (interruptno >= 0) { 203 cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno; 204 riscv_cpu_do_interrupt(cs); 205 return true; 206 } 207 } 208 return false; 209 } 210 211 /* Return true is floating point support is currently enabled */ 212 bool riscv_cpu_fp_enabled(CPURISCVState *env) 213 { 214 if (env->mstatus & MSTATUS_FS) { 215 if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_FS)) { 216 return false; 217 } 218 return true; 219 } 220 221 return false; 222 } 223 224 /* Return true is vector support is currently enabled */ 225 bool riscv_cpu_vector_enabled(CPURISCVState *env) 226 { 227 if (env->mstatus & MSTATUS_VS) { 228 if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_VS)) { 229 return false; 230 } 231 return true; 232 } 233 234 return false; 235 } 236 237 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env) 238 { 239 uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM | MSTATUS_FS | 240 MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE | 241 MSTATUS64_UXL | MSTATUS_VS; 242 bool current_virt = riscv_cpu_virt_enabled(env); 243 244 g_assert(riscv_has_ext(env, RVH)); 245 246 if (current_virt) { 247 /* Current V=1 and we are about to change to V=0 */ 248 env->vsstatus = env->mstatus & mstatus_mask; 249 env->mstatus &= ~mstatus_mask; 250 env->mstatus |= env->mstatus_hs; 251 252 env->vstvec = env->stvec; 253 env->stvec = env->stvec_hs; 254 255 env->vsscratch = env->sscratch; 256 env->sscratch = env->sscratch_hs; 257 258 env->vsepc = env->sepc; 259 env->sepc = env->sepc_hs; 260 261 env->vscause = env->scause; 262 env->scause = env->scause_hs; 263 264 env->vstval = env->stval; 265 env->stval = env->stval_hs; 266 267 env->vsatp = env->satp; 268 env->satp = env->satp_hs; 269 } else { 270 /* Current V=0 and we are about to change to V=1 */ 271 env->mstatus_hs = env->mstatus & mstatus_mask; 272 env->mstatus &= ~mstatus_mask; 273 env->mstatus |= env->vsstatus; 274 275 env->stvec_hs = env->stvec; 276 env->stvec = env->vstvec; 277 278 env->sscratch_hs = env->sscratch; 279 env->sscratch = env->vsscratch; 280 281 env->sepc_hs = env->sepc; 282 env->sepc = env->vsepc; 283 284 env->scause_hs = env->scause; 285 env->scause = env->vscause; 286 287 env->stval_hs = env->stval; 288 env->stval = env->vstval; 289 290 env->satp_hs = env->satp; 291 env->satp = env->vsatp; 292 } 293 } 294 295 bool riscv_cpu_virt_enabled(CPURISCVState *env) 296 { 297 if (!riscv_has_ext(env, RVH)) { 298 return false; 299 } 300 301 return get_field(env->virt, VIRT_ONOFF); 302 } 303 304 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable) 305 { 306 if (!riscv_has_ext(env, RVH)) { 307 return; 308 } 309 310 /* Flush the TLB on all virt mode changes. */ 311 if (get_field(env->virt, VIRT_ONOFF) != enable) { 312 tlb_flush(env_cpu(env)); 313 } 314 315 env->virt = set_field(env->virt, VIRT_ONOFF, enable); 316 } 317 318 bool riscv_cpu_two_stage_lookup(int mmu_idx) 319 { 320 return mmu_idx & TB_FLAGS_PRIV_HYP_ACCESS_MASK; 321 } 322 323 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts) 324 { 325 CPURISCVState *env = &cpu->env; 326 if (env->miclaim & interrupts) { 327 return -1; 328 } else { 329 env->miclaim |= interrupts; 330 return 0; 331 } 332 } 333 334 uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value) 335 { 336 CPURISCVState *env = &cpu->env; 337 CPUState *cs = CPU(cpu); 338 uint32_t old = env->mip; 339 bool locked = false; 340 341 if (!qemu_mutex_iothread_locked()) { 342 locked = true; 343 qemu_mutex_lock_iothread(); 344 } 345 346 env->mip = (env->mip & ~mask) | (value & mask); 347 348 if (env->mip) { 349 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 350 } else { 351 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); 352 } 353 354 if (locked) { 355 qemu_mutex_unlock_iothread(); 356 } 357 358 return old; 359 } 360 361 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(uint32_t), 362 uint32_t arg) 363 { 364 env->rdtime_fn = fn; 365 env->rdtime_fn_arg = arg; 366 } 367 368 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv) 369 { 370 if (newpriv > PRV_M) { 371 g_assert_not_reached(); 372 } 373 if (newpriv == PRV_H) { 374 newpriv = PRV_U; 375 } 376 /* tlb_flush is unnecessary as mode is contained in mmu_idx */ 377 env->priv = newpriv; 378 env->xl = cpu_recompute_xl(env); 379 riscv_cpu_update_mask(env); 380 381 /* 382 * Clear the load reservation - otherwise a reservation placed in one 383 * context/process can be used by another, resulting in an SC succeeding 384 * incorrectly. Version 2.2 of the ISA specification explicitly requires 385 * this behaviour, while later revisions say that the kernel "should" use 386 * an SC instruction to force the yielding of a load reservation on a 387 * preemptive context switch. As a result, do both. 388 */ 389 env->load_res = -1; 390 } 391 392 /* 393 * get_physical_address_pmp - check PMP permission for this physical address 394 * 395 * Match the PMP region and check permission for this physical address and it's 396 * TLB page. Returns 0 if the permission checking was successful 397 * 398 * @env: CPURISCVState 399 * @prot: The returned protection attributes 400 * @tlb_size: TLB page size containing addr. It could be modified after PMP 401 * permission checking. NULL if not set TLB page for addr. 402 * @addr: The physical address to be checked permission 403 * @access_type: The type of MMU access 404 * @mode: Indicates current privilege level. 405 */ 406 static int get_physical_address_pmp(CPURISCVState *env, int *prot, 407 target_ulong *tlb_size, hwaddr addr, 408 int size, MMUAccessType access_type, 409 int mode) 410 { 411 pmp_priv_t pmp_priv; 412 target_ulong tlb_size_pmp = 0; 413 414 if (!riscv_feature(env, RISCV_FEATURE_PMP)) { 415 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 416 return TRANSLATE_SUCCESS; 417 } 418 419 if (!pmp_hart_has_privs(env, addr, size, 1 << access_type, &pmp_priv, 420 mode)) { 421 *prot = 0; 422 return TRANSLATE_PMP_FAIL; 423 } 424 425 *prot = pmp_priv_to_page_prot(pmp_priv); 426 if (tlb_size != NULL) { 427 if (pmp_is_range_in_tlb(env, addr & ~(*tlb_size - 1), &tlb_size_pmp)) { 428 *tlb_size = tlb_size_pmp; 429 } 430 } 431 432 return TRANSLATE_SUCCESS; 433 } 434 435 /* get_physical_address - get the physical address for this virtual address 436 * 437 * Do a page table walk to obtain the physical address corresponding to a 438 * virtual address. Returns 0 if the translation was successful 439 * 440 * Adapted from Spike's mmu_t::translate and mmu_t::walk 441 * 442 * @env: CPURISCVState 443 * @physical: This will be set to the calculated physical address 444 * @prot: The returned protection attributes 445 * @addr: The virtual address to be translated 446 * @fault_pte_addr: If not NULL, this will be set to fault pte address 447 * when a error occurs on pte address translation. 448 * This will already be shifted to match htval. 449 * @access_type: The type of MMU access 450 * @mmu_idx: Indicates current privilege level 451 * @first_stage: Are we in first stage translation? 452 * Second stage is used for hypervisor guest translation 453 * @two_stage: Are we going to perform two stage translation 454 * @is_debug: Is this access from a debugger or the monitor? 455 */ 456 static int get_physical_address(CPURISCVState *env, hwaddr *physical, 457 int *prot, target_ulong addr, 458 target_ulong *fault_pte_addr, 459 int access_type, int mmu_idx, 460 bool first_stage, bool two_stage, 461 bool is_debug) 462 { 463 /* NOTE: the env->pc value visible here will not be 464 * correct, but the value visible to the exception handler 465 * (riscv_cpu_do_interrupt) is correct */ 466 MemTxResult res; 467 MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; 468 int mode = mmu_idx & TB_FLAGS_PRIV_MMU_MASK; 469 bool use_background = false; 470 471 /* 472 * Check if we should use the background registers for the two 473 * stage translation. We don't need to check if we actually need 474 * two stage translation as that happened before this function 475 * was called. Background registers will be used if the guest has 476 * forced a two stage translation to be on (in HS or M mode). 477 */ 478 if (!riscv_cpu_virt_enabled(env) && two_stage) { 479 use_background = true; 480 } 481 482 /* MPRV does not affect the virtual-machine load/store 483 instructions, HLV, HLVX, and HSV. */ 484 if (riscv_cpu_two_stage_lookup(mmu_idx)) { 485 mode = get_field(env->hstatus, HSTATUS_SPVP); 486 } else if (mode == PRV_M && access_type != MMU_INST_FETCH) { 487 if (get_field(env->mstatus, MSTATUS_MPRV)) { 488 mode = get_field(env->mstatus, MSTATUS_MPP); 489 } 490 } 491 492 if (first_stage == false) { 493 /* We are in stage 2 translation, this is similar to stage 1. */ 494 /* Stage 2 is always taken as U-mode */ 495 mode = PRV_U; 496 } 497 498 if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) { 499 *physical = addr; 500 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 501 return TRANSLATE_SUCCESS; 502 } 503 504 *prot = 0; 505 506 hwaddr base; 507 int levels, ptidxbits, ptesize, vm, sum, mxr, widened; 508 509 if (first_stage == true) { 510 mxr = get_field(env->mstatus, MSTATUS_MXR); 511 } else { 512 mxr = get_field(env->vsstatus, MSTATUS_MXR); 513 } 514 515 if (first_stage == true) { 516 if (use_background) { 517 if (riscv_cpu_mxl(env) == MXL_RV32) { 518 base = (hwaddr)get_field(env->vsatp, SATP32_PPN) << PGSHIFT; 519 vm = get_field(env->vsatp, SATP32_MODE); 520 } else { 521 base = (hwaddr)get_field(env->vsatp, SATP64_PPN) << PGSHIFT; 522 vm = get_field(env->vsatp, SATP64_MODE); 523 } 524 } else { 525 if (riscv_cpu_mxl(env) == MXL_RV32) { 526 base = (hwaddr)get_field(env->satp, SATP32_PPN) << PGSHIFT; 527 vm = get_field(env->satp, SATP32_MODE); 528 } else { 529 base = (hwaddr)get_field(env->satp, SATP64_PPN) << PGSHIFT; 530 vm = get_field(env->satp, SATP64_MODE); 531 } 532 } 533 widened = 0; 534 } else { 535 if (riscv_cpu_mxl(env) == MXL_RV32) { 536 base = (hwaddr)get_field(env->hgatp, SATP32_PPN) << PGSHIFT; 537 vm = get_field(env->hgatp, SATP32_MODE); 538 } else { 539 base = (hwaddr)get_field(env->hgatp, SATP64_PPN) << PGSHIFT; 540 vm = get_field(env->hgatp, SATP64_MODE); 541 } 542 widened = 2; 543 } 544 /* status.SUM will be ignored if execute on background */ 545 sum = get_field(env->mstatus, MSTATUS_SUM) || use_background || is_debug; 546 switch (vm) { 547 case VM_1_10_SV32: 548 levels = 2; ptidxbits = 10; ptesize = 4; break; 549 case VM_1_10_SV39: 550 levels = 3; ptidxbits = 9; ptesize = 8; break; 551 case VM_1_10_SV48: 552 levels = 4; ptidxbits = 9; ptesize = 8; break; 553 case VM_1_10_SV57: 554 levels = 5; ptidxbits = 9; ptesize = 8; break; 555 case VM_1_10_MBARE: 556 *physical = addr; 557 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 558 return TRANSLATE_SUCCESS; 559 default: 560 g_assert_not_reached(); 561 } 562 563 CPUState *cs = env_cpu(env); 564 int va_bits = PGSHIFT + levels * ptidxbits + widened; 565 target_ulong mask, masked_msbs; 566 567 if (TARGET_LONG_BITS > (va_bits - 1)) { 568 mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1; 569 } else { 570 mask = 0; 571 } 572 masked_msbs = (addr >> (va_bits - 1)) & mask; 573 574 if (masked_msbs != 0 && masked_msbs != mask) { 575 return TRANSLATE_FAIL; 576 } 577 578 int ptshift = (levels - 1) * ptidxbits; 579 int i; 580 581 #if !TCG_OVERSIZED_GUEST 582 restart: 583 #endif 584 for (i = 0; i < levels; i++, ptshift -= ptidxbits) { 585 target_ulong idx; 586 if (i == 0) { 587 idx = (addr >> (PGSHIFT + ptshift)) & 588 ((1 << (ptidxbits + widened)) - 1); 589 } else { 590 idx = (addr >> (PGSHIFT + ptshift)) & 591 ((1 << ptidxbits) - 1); 592 } 593 594 /* check that physical address of PTE is legal */ 595 hwaddr pte_addr; 596 597 if (two_stage && first_stage) { 598 int vbase_prot; 599 hwaddr vbase; 600 601 /* Do the second stage translation on the base PTE address. */ 602 int vbase_ret = get_physical_address(env, &vbase, &vbase_prot, 603 base, NULL, MMU_DATA_LOAD, 604 mmu_idx, false, true, 605 is_debug); 606 607 if (vbase_ret != TRANSLATE_SUCCESS) { 608 if (fault_pte_addr) { 609 *fault_pte_addr = (base + idx * ptesize) >> 2; 610 } 611 return TRANSLATE_G_STAGE_FAIL; 612 } 613 614 pte_addr = vbase + idx * ptesize; 615 } else { 616 pte_addr = base + idx * ptesize; 617 } 618 619 int pmp_prot; 620 int pmp_ret = get_physical_address_pmp(env, &pmp_prot, NULL, pte_addr, 621 sizeof(target_ulong), 622 MMU_DATA_LOAD, PRV_S); 623 if (pmp_ret != TRANSLATE_SUCCESS) { 624 return TRANSLATE_PMP_FAIL; 625 } 626 627 target_ulong pte; 628 if (riscv_cpu_mxl(env) == MXL_RV32) { 629 pte = address_space_ldl(cs->as, pte_addr, attrs, &res); 630 } else { 631 pte = address_space_ldq(cs->as, pte_addr, attrs, &res); 632 } 633 634 if (res != MEMTX_OK) { 635 return TRANSLATE_FAIL; 636 } 637 638 hwaddr ppn = pte >> PTE_PPN_SHIFT; 639 640 if (!(pte & PTE_V)) { 641 /* Invalid PTE */ 642 return TRANSLATE_FAIL; 643 } else if (!(pte & (PTE_R | PTE_W | PTE_X))) { 644 /* Inner PTE, continue walking */ 645 base = ppn << PGSHIFT; 646 } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) { 647 /* Reserved leaf PTE flags: PTE_W */ 648 return TRANSLATE_FAIL; 649 } else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) { 650 /* Reserved leaf PTE flags: PTE_W + PTE_X */ 651 return TRANSLATE_FAIL; 652 } else if ((pte & PTE_U) && ((mode != PRV_U) && 653 (!sum || access_type == MMU_INST_FETCH))) { 654 /* User PTE flags when not U mode and mstatus.SUM is not set, 655 or the access type is an instruction fetch */ 656 return TRANSLATE_FAIL; 657 } else if (!(pte & PTE_U) && (mode != PRV_S)) { 658 /* Supervisor PTE flags when not S mode */ 659 return TRANSLATE_FAIL; 660 } else if (ppn & ((1ULL << ptshift) - 1)) { 661 /* Misaligned PPN */ 662 return TRANSLATE_FAIL; 663 } else if (access_type == MMU_DATA_LOAD && !((pte & PTE_R) || 664 ((pte & PTE_X) && mxr))) { 665 /* Read access check failed */ 666 return TRANSLATE_FAIL; 667 } else if (access_type == MMU_DATA_STORE && !(pte & PTE_W)) { 668 /* Write access check failed */ 669 return TRANSLATE_FAIL; 670 } else if (access_type == MMU_INST_FETCH && !(pte & PTE_X)) { 671 /* Fetch access check failed */ 672 return TRANSLATE_FAIL; 673 } else { 674 /* if necessary, set accessed and dirty bits. */ 675 target_ulong updated_pte = pte | PTE_A | 676 (access_type == MMU_DATA_STORE ? PTE_D : 0); 677 678 /* Page table updates need to be atomic with MTTCG enabled */ 679 if (updated_pte != pte) { 680 /* 681 * - if accessed or dirty bits need updating, and the PTE is 682 * in RAM, then we do so atomically with a compare and swap. 683 * - if the PTE is in IO space or ROM, then it can't be updated 684 * and we return TRANSLATE_FAIL. 685 * - if the PTE changed by the time we went to update it, then 686 * it is no longer valid and we must re-walk the page table. 687 */ 688 MemoryRegion *mr; 689 hwaddr l = sizeof(target_ulong), addr1; 690 mr = address_space_translate(cs->as, pte_addr, 691 &addr1, &l, false, MEMTXATTRS_UNSPECIFIED); 692 if (memory_region_is_ram(mr)) { 693 target_ulong *pte_pa = 694 qemu_map_ram_ptr(mr->ram_block, addr1); 695 #if TCG_OVERSIZED_GUEST 696 /* MTTCG is not enabled on oversized TCG guests so 697 * page table updates do not need to be atomic */ 698 *pte_pa = pte = updated_pte; 699 #else 700 target_ulong old_pte = 701 qatomic_cmpxchg(pte_pa, pte, updated_pte); 702 if (old_pte != pte) { 703 goto restart; 704 } else { 705 pte = updated_pte; 706 } 707 #endif 708 } else { 709 /* misconfigured PTE in ROM (AD bits are not preset) or 710 * PTE is in IO space and can't be updated atomically */ 711 return TRANSLATE_FAIL; 712 } 713 } 714 715 /* for superpage mappings, make a fake leaf PTE for the TLB's 716 benefit. */ 717 target_ulong vpn = addr >> PGSHIFT; 718 *physical = ((ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT) | 719 (addr & ~TARGET_PAGE_MASK); 720 721 /* set permissions on the TLB entry */ 722 if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) { 723 *prot |= PAGE_READ; 724 } 725 if ((pte & PTE_X)) { 726 *prot |= PAGE_EXEC; 727 } 728 /* add write permission on stores or if the page is already dirty, 729 so that we TLB miss on later writes to update the dirty bit */ 730 if ((pte & PTE_W) && 731 (access_type == MMU_DATA_STORE || (pte & PTE_D))) { 732 *prot |= PAGE_WRITE; 733 } 734 return TRANSLATE_SUCCESS; 735 } 736 } 737 return TRANSLATE_FAIL; 738 } 739 740 static void raise_mmu_exception(CPURISCVState *env, target_ulong address, 741 MMUAccessType access_type, bool pmp_violation, 742 bool first_stage, bool two_stage) 743 { 744 CPUState *cs = env_cpu(env); 745 int page_fault_exceptions, vm; 746 uint64_t stap_mode; 747 748 if (riscv_cpu_mxl(env) == MXL_RV32) { 749 stap_mode = SATP32_MODE; 750 } else { 751 stap_mode = SATP64_MODE; 752 } 753 754 if (first_stage) { 755 vm = get_field(env->satp, stap_mode); 756 } else { 757 vm = get_field(env->hgatp, stap_mode); 758 } 759 760 page_fault_exceptions = vm != VM_1_10_MBARE && !pmp_violation; 761 762 switch (access_type) { 763 case MMU_INST_FETCH: 764 if (riscv_cpu_virt_enabled(env) && !first_stage) { 765 cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT; 766 } else { 767 cs->exception_index = page_fault_exceptions ? 768 RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT; 769 } 770 break; 771 case MMU_DATA_LOAD: 772 if (two_stage && !first_stage) { 773 cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT; 774 } else { 775 cs->exception_index = page_fault_exceptions ? 776 RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT; 777 } 778 break; 779 case MMU_DATA_STORE: 780 if (two_stage && !first_stage) { 781 cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT; 782 } else { 783 cs->exception_index = page_fault_exceptions ? 784 RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT; 785 } 786 break; 787 default: 788 g_assert_not_reached(); 789 } 790 env->badaddr = address; 791 env->two_stage_lookup = two_stage; 792 } 793 794 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 795 { 796 RISCVCPU *cpu = RISCV_CPU(cs); 797 CPURISCVState *env = &cpu->env; 798 hwaddr phys_addr; 799 int prot; 800 int mmu_idx = cpu_mmu_index(&cpu->env, false); 801 802 if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx, 803 true, riscv_cpu_virt_enabled(env), true)) { 804 return -1; 805 } 806 807 if (riscv_cpu_virt_enabled(env)) { 808 if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL, 809 0, mmu_idx, false, true, true)) { 810 return -1; 811 } 812 } 813 814 return phys_addr & TARGET_PAGE_MASK; 815 } 816 817 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 818 vaddr addr, unsigned size, 819 MMUAccessType access_type, 820 int mmu_idx, MemTxAttrs attrs, 821 MemTxResult response, uintptr_t retaddr) 822 { 823 RISCVCPU *cpu = RISCV_CPU(cs); 824 CPURISCVState *env = &cpu->env; 825 826 if (access_type == MMU_DATA_STORE) { 827 cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT; 828 } else if (access_type == MMU_DATA_LOAD) { 829 cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT; 830 } else { 831 cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT; 832 } 833 834 env->badaddr = addr; 835 env->two_stage_lookup = riscv_cpu_virt_enabled(env) || 836 riscv_cpu_two_stage_lookup(mmu_idx); 837 riscv_raise_exception(&cpu->env, cs->exception_index, retaddr); 838 } 839 840 void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 841 MMUAccessType access_type, int mmu_idx, 842 uintptr_t retaddr) 843 { 844 RISCVCPU *cpu = RISCV_CPU(cs); 845 CPURISCVState *env = &cpu->env; 846 switch (access_type) { 847 case MMU_INST_FETCH: 848 cs->exception_index = RISCV_EXCP_INST_ADDR_MIS; 849 break; 850 case MMU_DATA_LOAD: 851 cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS; 852 break; 853 case MMU_DATA_STORE: 854 cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS; 855 break; 856 default: 857 g_assert_not_reached(); 858 } 859 env->badaddr = addr; 860 env->two_stage_lookup = riscv_cpu_virt_enabled(env) || 861 riscv_cpu_two_stage_lookup(mmu_idx); 862 riscv_raise_exception(env, cs->exception_index, retaddr); 863 } 864 865 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 866 MMUAccessType access_type, int mmu_idx, 867 bool probe, uintptr_t retaddr) 868 { 869 RISCVCPU *cpu = RISCV_CPU(cs); 870 CPURISCVState *env = &cpu->env; 871 vaddr im_address; 872 hwaddr pa = 0; 873 int prot, prot2, prot_pmp; 874 bool pmp_violation = false; 875 bool first_stage_error = true; 876 bool two_stage_lookup = false; 877 int ret = TRANSLATE_FAIL; 878 int mode = mmu_idx; 879 /* default TLB page size */ 880 target_ulong tlb_size = TARGET_PAGE_SIZE; 881 882 env->guest_phys_fault_addr = 0; 883 884 qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n", 885 __func__, address, access_type, mmu_idx); 886 887 /* MPRV does not affect the virtual-machine load/store 888 instructions, HLV, HLVX, and HSV. */ 889 if (riscv_cpu_two_stage_lookup(mmu_idx)) { 890 mode = get_field(env->hstatus, HSTATUS_SPVP); 891 } else if (mode == PRV_M && access_type != MMU_INST_FETCH && 892 get_field(env->mstatus, MSTATUS_MPRV)) { 893 mode = get_field(env->mstatus, MSTATUS_MPP); 894 if (riscv_has_ext(env, RVH) && get_field(env->mstatus, MSTATUS_MPV)) { 895 two_stage_lookup = true; 896 } 897 } 898 899 if (riscv_cpu_virt_enabled(env) || 900 ((riscv_cpu_two_stage_lookup(mmu_idx) || two_stage_lookup) && 901 access_type != MMU_INST_FETCH)) { 902 /* Two stage lookup */ 903 ret = get_physical_address(env, &pa, &prot, address, 904 &env->guest_phys_fault_addr, access_type, 905 mmu_idx, true, true, false); 906 907 /* 908 * A G-stage exception may be triggered during two state lookup. 909 * And the env->guest_phys_fault_addr has already been set in 910 * get_physical_address(). 911 */ 912 if (ret == TRANSLATE_G_STAGE_FAIL) { 913 first_stage_error = false; 914 access_type = MMU_DATA_LOAD; 915 } 916 917 qemu_log_mask(CPU_LOG_MMU, 918 "%s 1st-stage address=%" VADDR_PRIx " ret %d physical " 919 TARGET_FMT_plx " prot %d\n", 920 __func__, address, ret, pa, prot); 921 922 if (ret == TRANSLATE_SUCCESS) { 923 /* Second stage lookup */ 924 im_address = pa; 925 926 ret = get_physical_address(env, &pa, &prot2, im_address, NULL, 927 access_type, mmu_idx, false, true, 928 false); 929 930 qemu_log_mask(CPU_LOG_MMU, 931 "%s 2nd-stage address=%" VADDR_PRIx " ret %d physical " 932 TARGET_FMT_plx " prot %d\n", 933 __func__, im_address, ret, pa, prot2); 934 935 prot &= prot2; 936 937 if (ret == TRANSLATE_SUCCESS) { 938 ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa, 939 size, access_type, mode); 940 941 qemu_log_mask(CPU_LOG_MMU, 942 "%s PMP address=" TARGET_FMT_plx " ret %d prot" 943 " %d tlb_size " TARGET_FMT_lu "\n", 944 __func__, pa, ret, prot_pmp, tlb_size); 945 946 prot &= prot_pmp; 947 } 948 949 if (ret != TRANSLATE_SUCCESS) { 950 /* 951 * Guest physical address translation failed, this is a HS 952 * level exception 953 */ 954 first_stage_error = false; 955 env->guest_phys_fault_addr = (im_address | 956 (address & 957 (TARGET_PAGE_SIZE - 1))) >> 2; 958 } 959 } 960 } else { 961 /* Single stage lookup */ 962 ret = get_physical_address(env, &pa, &prot, address, NULL, 963 access_type, mmu_idx, true, false, false); 964 965 qemu_log_mask(CPU_LOG_MMU, 966 "%s address=%" VADDR_PRIx " ret %d physical " 967 TARGET_FMT_plx " prot %d\n", 968 __func__, address, ret, pa, prot); 969 970 if (ret == TRANSLATE_SUCCESS) { 971 ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa, 972 size, access_type, mode); 973 974 qemu_log_mask(CPU_LOG_MMU, 975 "%s PMP address=" TARGET_FMT_plx " ret %d prot" 976 " %d tlb_size " TARGET_FMT_lu "\n", 977 __func__, pa, ret, prot_pmp, tlb_size); 978 979 prot &= prot_pmp; 980 } 981 } 982 983 if (ret == TRANSLATE_PMP_FAIL) { 984 pmp_violation = true; 985 } 986 987 if (ret == TRANSLATE_SUCCESS) { 988 tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1), 989 prot, mmu_idx, tlb_size); 990 return true; 991 } else if (probe) { 992 return false; 993 } else { 994 raise_mmu_exception(env, address, access_type, pmp_violation, 995 first_stage_error, 996 riscv_cpu_virt_enabled(env) || 997 riscv_cpu_two_stage_lookup(mmu_idx)); 998 riscv_raise_exception(env, cs->exception_index, retaddr); 999 } 1000 1001 return true; 1002 } 1003 #endif /* !CONFIG_USER_ONLY */ 1004 1005 /* 1006 * Handle Traps 1007 * 1008 * Adapted from Spike's processor_t::take_trap. 1009 * 1010 */ 1011 void riscv_cpu_do_interrupt(CPUState *cs) 1012 { 1013 #if !defined(CONFIG_USER_ONLY) 1014 1015 RISCVCPU *cpu = RISCV_CPU(cs); 1016 CPURISCVState *env = &cpu->env; 1017 bool write_gva = false; 1018 uint64_t s; 1019 1020 /* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide 1021 * so we mask off the MSB and separate into trap type and cause. 1022 */ 1023 bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG); 1024 target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK; 1025 target_ulong deleg = async ? env->mideleg : env->medeleg; 1026 target_ulong tval = 0; 1027 target_ulong htval = 0; 1028 target_ulong mtval2 = 0; 1029 1030 if (cause == RISCV_EXCP_SEMIHOST) { 1031 if (env->priv >= PRV_S) { 1032 env->gpr[xA0] = do_common_semihosting(cs); 1033 env->pc += 4; 1034 return; 1035 } 1036 cause = RISCV_EXCP_BREAKPOINT; 1037 } 1038 1039 if (!async) { 1040 /* set tval to badaddr for traps with address information */ 1041 switch (cause) { 1042 case RISCV_EXCP_INST_GUEST_PAGE_FAULT: 1043 case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT: 1044 case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT: 1045 case RISCV_EXCP_INST_ADDR_MIS: 1046 case RISCV_EXCP_INST_ACCESS_FAULT: 1047 case RISCV_EXCP_LOAD_ADDR_MIS: 1048 case RISCV_EXCP_STORE_AMO_ADDR_MIS: 1049 case RISCV_EXCP_LOAD_ACCESS_FAULT: 1050 case RISCV_EXCP_STORE_AMO_ACCESS_FAULT: 1051 case RISCV_EXCP_INST_PAGE_FAULT: 1052 case RISCV_EXCP_LOAD_PAGE_FAULT: 1053 case RISCV_EXCP_STORE_PAGE_FAULT: 1054 write_gva = true; 1055 tval = env->badaddr; 1056 break; 1057 case RISCV_EXCP_ILLEGAL_INST: 1058 tval = env->bins; 1059 break; 1060 default: 1061 break; 1062 } 1063 /* ecall is dispatched as one cause so translate based on mode */ 1064 if (cause == RISCV_EXCP_U_ECALL) { 1065 assert(env->priv <= 3); 1066 1067 if (env->priv == PRV_M) { 1068 cause = RISCV_EXCP_M_ECALL; 1069 } else if (env->priv == PRV_S && riscv_cpu_virt_enabled(env)) { 1070 cause = RISCV_EXCP_VS_ECALL; 1071 } else if (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) { 1072 cause = RISCV_EXCP_S_ECALL; 1073 } else if (env->priv == PRV_U) { 1074 cause = RISCV_EXCP_U_ECALL; 1075 } 1076 } 1077 } 1078 1079 trace_riscv_trap(env->mhartid, async, cause, env->pc, tval, 1080 riscv_cpu_get_trap_name(cause, async)); 1081 1082 qemu_log_mask(CPU_LOG_INT, 1083 "%s: hart:"TARGET_FMT_ld", async:%d, cause:"TARGET_FMT_lx", " 1084 "epc:0x"TARGET_FMT_lx", tval:0x"TARGET_FMT_lx", desc=%s\n", 1085 __func__, env->mhartid, async, cause, env->pc, tval, 1086 riscv_cpu_get_trap_name(cause, async)); 1087 1088 if (env->priv <= PRV_S && 1089 cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) { 1090 /* handle the trap in S-mode */ 1091 if (riscv_has_ext(env, RVH)) { 1092 target_ulong hdeleg = async ? env->hideleg : env->hedeleg; 1093 1094 if (riscv_cpu_virt_enabled(env) && ((hdeleg >> cause) & 1)) { 1095 /* Trap to VS mode */ 1096 /* 1097 * See if we need to adjust cause. Yes if its VS mode interrupt 1098 * no if hypervisor has delegated one of hs mode's interrupt 1099 */ 1100 if (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT || 1101 cause == IRQ_VS_EXT) { 1102 cause = cause - 1; 1103 } 1104 write_gva = false; 1105 } else if (riscv_cpu_virt_enabled(env)) { 1106 /* Trap into HS mode, from virt */ 1107 riscv_cpu_swap_hypervisor_regs(env); 1108 env->hstatus = set_field(env->hstatus, HSTATUS_SPVP, 1109 env->priv); 1110 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, 1111 riscv_cpu_virt_enabled(env)); 1112 1113 1114 htval = env->guest_phys_fault_addr; 1115 1116 riscv_cpu_set_virt_enabled(env, 0); 1117 } else { 1118 /* Trap into HS mode */ 1119 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false); 1120 htval = env->guest_phys_fault_addr; 1121 write_gva = false; 1122 } 1123 env->hstatus = set_field(env->hstatus, HSTATUS_GVA, write_gva); 1124 } 1125 1126 s = env->mstatus; 1127 s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE)); 1128 s = set_field(s, MSTATUS_SPP, env->priv); 1129 s = set_field(s, MSTATUS_SIE, 0); 1130 env->mstatus = s; 1131 env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1)); 1132 env->sepc = env->pc; 1133 env->stval = tval; 1134 env->htval = htval; 1135 env->pc = (env->stvec >> 2 << 2) + 1136 ((async && (env->stvec & 3) == 1) ? cause * 4 : 0); 1137 riscv_cpu_set_mode(env, PRV_S); 1138 } else { 1139 /* handle the trap in M-mode */ 1140 if (riscv_has_ext(env, RVH)) { 1141 if (riscv_cpu_virt_enabled(env)) { 1142 riscv_cpu_swap_hypervisor_regs(env); 1143 } 1144 env->mstatus = set_field(env->mstatus, MSTATUS_MPV, 1145 riscv_cpu_virt_enabled(env)); 1146 if (riscv_cpu_virt_enabled(env) && tval) { 1147 env->mstatus = set_field(env->mstatus, MSTATUS_GVA, 1); 1148 } 1149 1150 mtval2 = env->guest_phys_fault_addr; 1151 1152 /* Trapping to M mode, virt is disabled */ 1153 riscv_cpu_set_virt_enabled(env, 0); 1154 } 1155 1156 s = env->mstatus; 1157 s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE)); 1158 s = set_field(s, MSTATUS_MPP, env->priv); 1159 s = set_field(s, MSTATUS_MIE, 0); 1160 env->mstatus = s; 1161 env->mcause = cause | ~(((target_ulong)-1) >> async); 1162 env->mepc = env->pc; 1163 env->mtval = tval; 1164 env->mtval2 = mtval2; 1165 env->pc = (env->mtvec >> 2 << 2) + 1166 ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0); 1167 riscv_cpu_set_mode(env, PRV_M); 1168 } 1169 1170 /* NOTE: it is not necessary to yield load reservations here. It is only 1171 * necessary for an SC from "another hart" to cause a load reservation 1172 * to be yielded. Refer to the memory consistency model section of the 1173 * RISC-V ISA Specification. 1174 */ 1175 1176 env->two_stage_lookup = false; 1177 #endif 1178 cs->exception_index = RISCV_EXCP_NONE; /* mark handled to qemu */ 1179 } 1180