1 /* 2 * RISC-V CPU helpers for qemu. 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/log.h" 22 #include "qemu/main-loop.h" 23 #include "cpu.h" 24 #include "exec/exec-all.h" 25 #include "tcg/tcg-op.h" 26 #include "trace.h" 27 28 int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch) 29 { 30 #ifdef CONFIG_USER_ONLY 31 return 0; 32 #else 33 return env->priv; 34 #endif 35 } 36 37 #ifndef CONFIG_USER_ONLY 38 static int riscv_cpu_local_irq_pending(CPURISCVState *env) 39 { 40 target_ulong irqs; 41 42 target_ulong mstatus_mie = get_field(env->mstatus, MSTATUS_MIE); 43 target_ulong mstatus_sie = get_field(env->mstatus, MSTATUS_SIE); 44 target_ulong hs_mstatus_sie = get_field(env->mstatus_hs, MSTATUS_SIE); 45 46 target_ulong pending = env->mip & env->mie & 47 ~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP); 48 target_ulong vspending = (env->mip & env->mie & 49 (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP)) >> 1; 50 51 target_ulong mie = env->priv < PRV_M || 52 (env->priv == PRV_M && mstatus_mie); 53 target_ulong sie = env->priv < PRV_S || 54 (env->priv == PRV_S && mstatus_sie); 55 target_ulong hs_sie = env->priv < PRV_S || 56 (env->priv == PRV_S && hs_mstatus_sie); 57 58 if (riscv_cpu_virt_enabled(env)) { 59 target_ulong pending_hs_irq = pending & -hs_sie; 60 61 if (pending_hs_irq) { 62 riscv_cpu_set_force_hs_excep(env, FORCE_HS_EXCEP); 63 return ctz64(pending_hs_irq); 64 } 65 66 pending = vspending; 67 } 68 69 irqs = (pending & ~env->mideleg & -mie) | (pending & env->mideleg & -sie); 70 71 if (irqs) { 72 return ctz64(irqs); /* since non-zero */ 73 } else { 74 return EXCP_NONE; /* indicates no pending interrupt */ 75 } 76 } 77 #endif 78 79 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 80 { 81 #if !defined(CONFIG_USER_ONLY) 82 if (interrupt_request & CPU_INTERRUPT_HARD) { 83 RISCVCPU *cpu = RISCV_CPU(cs); 84 CPURISCVState *env = &cpu->env; 85 int interruptno = riscv_cpu_local_irq_pending(env); 86 if (interruptno >= 0) { 87 cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno; 88 riscv_cpu_do_interrupt(cs); 89 return true; 90 } 91 } 92 #endif 93 return false; 94 } 95 96 #if !defined(CONFIG_USER_ONLY) 97 98 /* Return true is floating point support is currently enabled */ 99 bool riscv_cpu_fp_enabled(CPURISCVState *env) 100 { 101 if (env->mstatus & MSTATUS_FS) { 102 if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_FS)) { 103 return false; 104 } 105 return true; 106 } 107 108 return false; 109 } 110 111 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env) 112 { 113 target_ulong mstatus_mask = MSTATUS_MXR | MSTATUS_SUM | MSTATUS_FS | 114 MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE; 115 bool current_virt = riscv_cpu_virt_enabled(env); 116 117 g_assert(riscv_has_ext(env, RVH)); 118 119 #if defined(TARGET_RISCV64) 120 mstatus_mask |= MSTATUS64_UXL; 121 #endif 122 123 if (current_virt) { 124 /* Current V=1 and we are about to change to V=0 */ 125 env->vsstatus = env->mstatus & mstatus_mask; 126 env->mstatus &= ~mstatus_mask; 127 env->mstatus |= env->mstatus_hs; 128 129 env->vstvec = env->stvec; 130 env->stvec = env->stvec_hs; 131 132 env->vsscratch = env->sscratch; 133 env->sscratch = env->sscratch_hs; 134 135 env->vsepc = env->sepc; 136 env->sepc = env->sepc_hs; 137 138 env->vscause = env->scause; 139 env->scause = env->scause_hs; 140 141 env->vstval = env->sbadaddr; 142 env->sbadaddr = env->stval_hs; 143 144 env->vsatp = env->satp; 145 env->satp = env->satp_hs; 146 } else { 147 /* Current V=0 and we are about to change to V=1 */ 148 env->mstatus_hs = env->mstatus & mstatus_mask; 149 env->mstatus &= ~mstatus_mask; 150 env->mstatus |= env->vsstatus; 151 152 env->stvec_hs = env->stvec; 153 env->stvec = env->vstvec; 154 155 env->sscratch_hs = env->sscratch; 156 env->sscratch = env->vsscratch; 157 158 env->sepc_hs = env->sepc; 159 env->sepc = env->vsepc; 160 161 env->scause_hs = env->scause; 162 env->scause = env->vscause; 163 164 env->stval_hs = env->sbadaddr; 165 env->sbadaddr = env->vstval; 166 167 env->satp_hs = env->satp; 168 env->satp = env->vsatp; 169 } 170 } 171 172 bool riscv_cpu_virt_enabled(CPURISCVState *env) 173 { 174 if (!riscv_has_ext(env, RVH)) { 175 return false; 176 } 177 178 return get_field(env->virt, VIRT_ONOFF); 179 } 180 181 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable) 182 { 183 if (!riscv_has_ext(env, RVH)) { 184 return; 185 } 186 187 /* Flush the TLB on all virt mode changes. */ 188 if (get_field(env->virt, VIRT_ONOFF) != enable) { 189 tlb_flush(env_cpu(env)); 190 } 191 192 env->virt = set_field(env->virt, VIRT_ONOFF, enable); 193 } 194 195 bool riscv_cpu_force_hs_excep_enabled(CPURISCVState *env) 196 { 197 if (!riscv_has_ext(env, RVH)) { 198 return false; 199 } 200 201 return get_field(env->virt, FORCE_HS_EXCEP); 202 } 203 204 void riscv_cpu_set_force_hs_excep(CPURISCVState *env, bool enable) 205 { 206 if (!riscv_has_ext(env, RVH)) { 207 return; 208 } 209 210 env->virt = set_field(env->virt, FORCE_HS_EXCEP, enable); 211 } 212 213 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts) 214 { 215 CPURISCVState *env = &cpu->env; 216 if (env->miclaim & interrupts) { 217 return -1; 218 } else { 219 env->miclaim |= interrupts; 220 return 0; 221 } 222 } 223 224 uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value) 225 { 226 CPURISCVState *env = &cpu->env; 227 CPUState *cs = CPU(cpu); 228 uint32_t old = env->mip; 229 bool locked = false; 230 231 if (!qemu_mutex_iothread_locked()) { 232 locked = true; 233 qemu_mutex_lock_iothread(); 234 } 235 236 env->mip = (env->mip & ~mask) | (value & mask); 237 238 if (env->mip) { 239 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 240 } else { 241 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); 242 } 243 244 if (locked) { 245 qemu_mutex_unlock_iothread(); 246 } 247 248 return old; 249 } 250 251 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv) 252 { 253 if (newpriv > PRV_M) { 254 g_assert_not_reached(); 255 } 256 if (newpriv == PRV_H) { 257 newpriv = PRV_U; 258 } 259 /* tlb_flush is unnecessary as mode is contained in mmu_idx */ 260 env->priv = newpriv; 261 262 /* 263 * Clear the load reservation - otherwise a reservation placed in one 264 * context/process can be used by another, resulting in an SC succeeding 265 * incorrectly. Version 2.2 of the ISA specification explicitly requires 266 * this behaviour, while later revisions say that the kernel "should" use 267 * an SC instruction to force the yielding of a load reservation on a 268 * preemptive context switch. As a result, do both. 269 */ 270 env->load_res = -1; 271 } 272 273 /* get_physical_address - get the physical address for this virtual address 274 * 275 * Do a page table walk to obtain the physical address corresponding to a 276 * virtual address. Returns 0 if the translation was successful 277 * 278 * Adapted from Spike's mmu_t::translate and mmu_t::walk 279 * 280 * @env: CPURISCVState 281 * @physical: This will be set to the calculated physical address 282 * @prot: The returned protection attributes 283 * @addr: The virtual address to be translated 284 * @access_type: The type of MMU access 285 * @mmu_idx: Indicates current privilege level 286 * @first_stage: Are we in first stage translation? 287 * Second stage is used for hypervisor guest translation 288 * @two_stage: Are we going to perform two stage translation 289 */ 290 static int get_physical_address(CPURISCVState *env, hwaddr *physical, 291 int *prot, target_ulong addr, 292 int access_type, int mmu_idx, 293 bool first_stage, bool two_stage) 294 { 295 /* NOTE: the env->pc value visible here will not be 296 * correct, but the value visible to the exception handler 297 * (riscv_cpu_do_interrupt) is correct */ 298 MemTxResult res; 299 MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; 300 int mode = mmu_idx; 301 bool use_background = false; 302 303 /* 304 * Check if we should use the background registers for the two 305 * stage translation. We don't need to check if we actually need 306 * two stage translation as that happened before this function 307 * was called. Background registers will be used if the guest has 308 * forced a two stage translation to be on (in HS or M mode). 309 */ 310 if (mode == PRV_M && access_type != MMU_INST_FETCH) { 311 if (get_field(env->mstatus, MSTATUS_MPRV)) { 312 mode = get_field(env->mstatus, MSTATUS_MPP); 313 314 if (riscv_has_ext(env, RVH) && 315 get_field(env->mstatus, MSTATUS_MPV)) { 316 use_background = true; 317 } 318 } 319 } 320 321 if (mode == PRV_S && access_type != MMU_INST_FETCH && 322 riscv_has_ext(env, RVH) && !riscv_cpu_virt_enabled(env)) { 323 if (get_field(env->hstatus, HSTATUS_SPRV)) { 324 mode = get_field(env->mstatus, SSTATUS_SPP); 325 use_background = true; 326 } 327 } 328 329 if (first_stage == false) { 330 /* We are in stage 2 translation, this is similar to stage 1. */ 331 /* Stage 2 is always taken as U-mode */ 332 mode = PRV_U; 333 } 334 335 if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) { 336 *physical = addr; 337 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 338 return TRANSLATE_SUCCESS; 339 } 340 341 *prot = 0; 342 343 hwaddr base; 344 int levels, ptidxbits, ptesize, vm, sum, mxr, widened; 345 346 if (first_stage == true) { 347 mxr = get_field(env->mstatus, MSTATUS_MXR); 348 } else { 349 mxr = get_field(env->vsstatus, MSTATUS_MXR); 350 } 351 352 if (env->priv_ver >= PRIV_VERSION_1_10_0) { 353 if (first_stage == true) { 354 if (use_background) { 355 base = (hwaddr)get_field(env->vsatp, SATP_PPN) << PGSHIFT; 356 vm = get_field(env->vsatp, SATP_MODE); 357 } else { 358 base = (hwaddr)get_field(env->satp, SATP_PPN) << PGSHIFT; 359 vm = get_field(env->satp, SATP_MODE); 360 } 361 widened = 0; 362 } else { 363 base = (hwaddr)get_field(env->hgatp, HGATP_PPN) << PGSHIFT; 364 vm = get_field(env->hgatp, HGATP_MODE); 365 widened = 2; 366 } 367 sum = get_field(env->mstatus, MSTATUS_SUM); 368 switch (vm) { 369 case VM_1_10_SV32: 370 levels = 2; ptidxbits = 10; ptesize = 4; break; 371 case VM_1_10_SV39: 372 levels = 3; ptidxbits = 9; ptesize = 8; break; 373 case VM_1_10_SV48: 374 levels = 4; ptidxbits = 9; ptesize = 8; break; 375 case VM_1_10_SV57: 376 levels = 5; ptidxbits = 9; ptesize = 8; break; 377 case VM_1_10_MBARE: 378 *physical = addr; 379 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 380 return TRANSLATE_SUCCESS; 381 default: 382 g_assert_not_reached(); 383 } 384 } else { 385 widened = 0; 386 base = (hwaddr)(env->sptbr) << PGSHIFT; 387 sum = !get_field(env->mstatus, MSTATUS_PUM); 388 vm = get_field(env->mstatus, MSTATUS_VM); 389 switch (vm) { 390 case VM_1_09_SV32: 391 levels = 2; ptidxbits = 10; ptesize = 4; break; 392 case VM_1_09_SV39: 393 levels = 3; ptidxbits = 9; ptesize = 8; break; 394 case VM_1_09_SV48: 395 levels = 4; ptidxbits = 9; ptesize = 8; break; 396 case VM_1_09_MBARE: 397 *physical = addr; 398 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 399 return TRANSLATE_SUCCESS; 400 default: 401 g_assert_not_reached(); 402 } 403 } 404 405 CPUState *cs = env_cpu(env); 406 int va_bits = PGSHIFT + levels * ptidxbits + widened; 407 target_ulong mask, masked_msbs; 408 409 if (TARGET_LONG_BITS > (va_bits - 1)) { 410 mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1; 411 } else { 412 mask = 0; 413 } 414 masked_msbs = (addr >> (va_bits - 1)) & mask; 415 416 if (masked_msbs != 0 && masked_msbs != mask) { 417 return TRANSLATE_FAIL; 418 } 419 420 int ptshift = (levels - 1) * ptidxbits; 421 int i; 422 423 #if !TCG_OVERSIZED_GUEST 424 restart: 425 #endif 426 for (i = 0; i < levels; i++, ptshift -= ptidxbits) { 427 target_ulong idx; 428 if (i == 0) { 429 idx = (addr >> (PGSHIFT + ptshift)) & 430 ((1 << (ptidxbits + widened)) - 1); 431 } else { 432 idx = (addr >> (PGSHIFT + ptshift)) & 433 ((1 << ptidxbits) - 1); 434 } 435 436 /* check that physical address of PTE is legal */ 437 hwaddr pte_addr; 438 439 if (two_stage && first_stage) { 440 hwaddr vbase; 441 442 /* Do the second stage translation on the base PTE address. */ 443 get_physical_address(env, &vbase, prot, base, access_type, 444 mmu_idx, false, true); 445 446 pte_addr = vbase + idx * ptesize; 447 } else { 448 pte_addr = base + idx * ptesize; 449 } 450 451 if (riscv_feature(env, RISCV_FEATURE_PMP) && 452 !pmp_hart_has_privs(env, pte_addr, sizeof(target_ulong), 453 1 << MMU_DATA_LOAD, PRV_S)) { 454 return TRANSLATE_PMP_FAIL; 455 } 456 457 #if defined(TARGET_RISCV32) 458 target_ulong pte = address_space_ldl(cs->as, pte_addr, attrs, &res); 459 #elif defined(TARGET_RISCV64) 460 target_ulong pte = address_space_ldq(cs->as, pte_addr, attrs, &res); 461 #endif 462 if (res != MEMTX_OK) { 463 return TRANSLATE_FAIL; 464 } 465 466 hwaddr ppn = pte >> PTE_PPN_SHIFT; 467 468 if (!(pte & PTE_V)) { 469 /* Invalid PTE */ 470 return TRANSLATE_FAIL; 471 } else if (!(pte & (PTE_R | PTE_W | PTE_X))) { 472 /* Inner PTE, continue walking */ 473 base = ppn << PGSHIFT; 474 } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) { 475 /* Reserved leaf PTE flags: PTE_W */ 476 return TRANSLATE_FAIL; 477 } else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) { 478 /* Reserved leaf PTE flags: PTE_W + PTE_X */ 479 return TRANSLATE_FAIL; 480 } else if ((pte & PTE_U) && ((mode != PRV_U) && 481 (!sum || access_type == MMU_INST_FETCH))) { 482 /* User PTE flags when not U mode and mstatus.SUM is not set, 483 or the access type is an instruction fetch */ 484 return TRANSLATE_FAIL; 485 } else if (!(pte & PTE_U) && (mode != PRV_S)) { 486 /* Supervisor PTE flags when not S mode */ 487 return TRANSLATE_FAIL; 488 } else if (ppn & ((1ULL << ptshift) - 1)) { 489 /* Misaligned PPN */ 490 return TRANSLATE_FAIL; 491 } else if (access_type == MMU_DATA_LOAD && !((pte & PTE_R) || 492 ((pte & PTE_X) && mxr))) { 493 /* Read access check failed */ 494 return TRANSLATE_FAIL; 495 } else if (access_type == MMU_DATA_STORE && !(pte & PTE_W)) { 496 /* Write access check failed */ 497 return TRANSLATE_FAIL; 498 } else if (access_type == MMU_INST_FETCH && !(pte & PTE_X)) { 499 /* Fetch access check failed */ 500 return TRANSLATE_FAIL; 501 } else { 502 /* if necessary, set accessed and dirty bits. */ 503 target_ulong updated_pte = pte | PTE_A | 504 (access_type == MMU_DATA_STORE ? PTE_D : 0); 505 506 /* Page table updates need to be atomic with MTTCG enabled */ 507 if (updated_pte != pte) { 508 /* 509 * - if accessed or dirty bits need updating, and the PTE is 510 * in RAM, then we do so atomically with a compare and swap. 511 * - if the PTE is in IO space or ROM, then it can't be updated 512 * and we return TRANSLATE_FAIL. 513 * - if the PTE changed by the time we went to update it, then 514 * it is no longer valid and we must re-walk the page table. 515 */ 516 MemoryRegion *mr; 517 hwaddr l = sizeof(target_ulong), addr1; 518 mr = address_space_translate(cs->as, pte_addr, 519 &addr1, &l, false, MEMTXATTRS_UNSPECIFIED); 520 if (memory_region_is_ram(mr)) { 521 target_ulong *pte_pa = 522 qemu_map_ram_ptr(mr->ram_block, addr1); 523 #if TCG_OVERSIZED_GUEST 524 /* MTTCG is not enabled on oversized TCG guests so 525 * page table updates do not need to be atomic */ 526 *pte_pa = pte = updated_pte; 527 #else 528 target_ulong old_pte = 529 atomic_cmpxchg(pte_pa, pte, updated_pte); 530 if (old_pte != pte) { 531 goto restart; 532 } else { 533 pte = updated_pte; 534 } 535 #endif 536 } else { 537 /* misconfigured PTE in ROM (AD bits are not preset) or 538 * PTE is in IO space and can't be updated atomically */ 539 return TRANSLATE_FAIL; 540 } 541 } 542 543 /* for superpage mappings, make a fake leaf PTE for the TLB's 544 benefit. */ 545 target_ulong vpn = addr >> PGSHIFT; 546 if (i == 0) { 547 *physical = (ppn | (vpn & ((1L << (ptshift + widened)) - 1))) << 548 PGSHIFT; 549 } else { 550 *physical = (ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT; 551 } 552 553 /* set permissions on the TLB entry */ 554 if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) { 555 *prot |= PAGE_READ; 556 } 557 if ((pte & PTE_X)) { 558 *prot |= PAGE_EXEC; 559 } 560 /* add write permission on stores or if the page is already dirty, 561 so that we TLB miss on later writes to update the dirty bit */ 562 if ((pte & PTE_W) && 563 (access_type == MMU_DATA_STORE || (pte & PTE_D))) { 564 *prot |= PAGE_WRITE; 565 } 566 return TRANSLATE_SUCCESS; 567 } 568 } 569 return TRANSLATE_FAIL; 570 } 571 572 static void raise_mmu_exception(CPURISCVState *env, target_ulong address, 573 MMUAccessType access_type, bool pmp_violation, 574 bool first_stage) 575 { 576 CPUState *cs = env_cpu(env); 577 int page_fault_exceptions; 578 if (first_stage) { 579 page_fault_exceptions = 580 (env->priv_ver >= PRIV_VERSION_1_10_0) && 581 get_field(env->satp, SATP_MODE) != VM_1_10_MBARE && 582 !pmp_violation; 583 } else { 584 page_fault_exceptions = 585 get_field(env->hgatp, HGATP_MODE) != VM_1_10_MBARE && 586 !pmp_violation; 587 } 588 switch (access_type) { 589 case MMU_INST_FETCH: 590 if (riscv_cpu_virt_enabled(env) && !first_stage) { 591 cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT; 592 } else { 593 cs->exception_index = page_fault_exceptions ? 594 RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT; 595 } 596 break; 597 case MMU_DATA_LOAD: 598 if (riscv_cpu_virt_enabled(env) && !first_stage) { 599 cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT; 600 } else { 601 cs->exception_index = page_fault_exceptions ? 602 RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT; 603 } 604 break; 605 case MMU_DATA_STORE: 606 if (riscv_cpu_virt_enabled(env) && !first_stage) { 607 cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT; 608 } else { 609 cs->exception_index = page_fault_exceptions ? 610 RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT; 611 } 612 break; 613 default: 614 g_assert_not_reached(); 615 } 616 env->badaddr = address; 617 } 618 619 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 620 { 621 RISCVCPU *cpu = RISCV_CPU(cs); 622 CPURISCVState *env = &cpu->env; 623 hwaddr phys_addr; 624 int prot; 625 int mmu_idx = cpu_mmu_index(&cpu->env, false); 626 627 if (get_physical_address(env, &phys_addr, &prot, addr, 0, mmu_idx, 628 true, riscv_cpu_virt_enabled(env))) { 629 return -1; 630 } 631 632 if (riscv_cpu_virt_enabled(env)) { 633 if (get_physical_address(env, &phys_addr, &prot, phys_addr, 634 0, mmu_idx, false, true)) { 635 return -1; 636 } 637 } 638 639 return phys_addr; 640 } 641 642 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 643 vaddr addr, unsigned size, 644 MMUAccessType access_type, 645 int mmu_idx, MemTxAttrs attrs, 646 MemTxResult response, uintptr_t retaddr) 647 { 648 RISCVCPU *cpu = RISCV_CPU(cs); 649 CPURISCVState *env = &cpu->env; 650 651 if (access_type == MMU_DATA_STORE) { 652 cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT; 653 } else { 654 cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT; 655 } 656 657 env->badaddr = addr; 658 riscv_raise_exception(&cpu->env, cs->exception_index, retaddr); 659 } 660 661 void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 662 MMUAccessType access_type, int mmu_idx, 663 uintptr_t retaddr) 664 { 665 RISCVCPU *cpu = RISCV_CPU(cs); 666 CPURISCVState *env = &cpu->env; 667 switch (access_type) { 668 case MMU_INST_FETCH: 669 cs->exception_index = RISCV_EXCP_INST_ADDR_MIS; 670 break; 671 case MMU_DATA_LOAD: 672 cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS; 673 break; 674 case MMU_DATA_STORE: 675 cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS; 676 break; 677 default: 678 g_assert_not_reached(); 679 } 680 env->badaddr = addr; 681 riscv_raise_exception(env, cs->exception_index, retaddr); 682 } 683 #endif 684 685 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 686 MMUAccessType access_type, int mmu_idx, 687 bool probe, uintptr_t retaddr) 688 { 689 RISCVCPU *cpu = RISCV_CPU(cs); 690 CPURISCVState *env = &cpu->env; 691 #ifndef CONFIG_USER_ONLY 692 vaddr im_address; 693 hwaddr pa = 0; 694 int prot; 695 bool pmp_violation = false; 696 bool m_mode_two_stage = false; 697 bool hs_mode_two_stage = false; 698 bool first_stage_error = true; 699 int ret = TRANSLATE_FAIL; 700 int mode = mmu_idx; 701 702 env->guest_phys_fault_addr = 0; 703 704 qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n", 705 __func__, address, access_type, mmu_idx); 706 707 /* 708 * Determine if we are in M mode and MPRV is set or in HS mode and SPRV is 709 * set and we want to access a virtulisation address. 710 */ 711 if (riscv_has_ext(env, RVH)) { 712 m_mode_two_stage = env->priv == PRV_M && 713 access_type != MMU_INST_FETCH && 714 get_field(env->mstatus, MSTATUS_MPRV) && 715 get_field(env->mstatus, MSTATUS_MPV); 716 717 hs_mode_two_stage = env->priv == PRV_S && 718 !riscv_cpu_virt_enabled(env) && 719 access_type != MMU_INST_FETCH && 720 get_field(env->hstatus, HSTATUS_SPRV) && 721 get_field(env->hstatus, HSTATUS_SPV); 722 } 723 724 if (mode == PRV_M && access_type != MMU_INST_FETCH) { 725 if (get_field(env->mstatus, MSTATUS_MPRV)) { 726 mode = get_field(env->mstatus, MSTATUS_MPP); 727 } 728 } 729 730 if (riscv_cpu_virt_enabled(env) || m_mode_two_stage || hs_mode_two_stage) { 731 /* Two stage lookup */ 732 ret = get_physical_address(env, &pa, &prot, address, access_type, 733 mmu_idx, true, true); 734 735 qemu_log_mask(CPU_LOG_MMU, 736 "%s 1st-stage address=%" VADDR_PRIx " ret %d physical " 737 TARGET_FMT_plx " prot %d\n", 738 __func__, address, ret, pa, prot); 739 740 if (ret != TRANSLATE_FAIL) { 741 /* Second stage lookup */ 742 im_address = pa; 743 744 ret = get_physical_address(env, &pa, &prot, im_address, 745 access_type, mmu_idx, false, true); 746 747 qemu_log_mask(CPU_LOG_MMU, 748 "%s 2nd-stage address=%" VADDR_PRIx " ret %d physical " 749 TARGET_FMT_plx " prot %d\n", 750 __func__, im_address, ret, pa, prot); 751 752 if (riscv_feature(env, RISCV_FEATURE_PMP) && 753 (ret == TRANSLATE_SUCCESS) && 754 !pmp_hart_has_privs(env, pa, size, 1 << access_type, mode)) { 755 ret = TRANSLATE_PMP_FAIL; 756 } 757 758 if (ret != TRANSLATE_SUCCESS) { 759 /* 760 * Guest physical address translation failed, this is a HS 761 * level exception 762 */ 763 first_stage_error = false; 764 env->guest_phys_fault_addr = (im_address | 765 (address & 766 (TARGET_PAGE_SIZE - 1))) >> 2; 767 } 768 } 769 } else { 770 /* Single stage lookup */ 771 ret = get_physical_address(env, &pa, &prot, address, access_type, 772 mmu_idx, true, false); 773 774 qemu_log_mask(CPU_LOG_MMU, 775 "%s address=%" VADDR_PRIx " ret %d physical " 776 TARGET_FMT_plx " prot %d\n", 777 __func__, address, ret, pa, prot); 778 } 779 780 if (riscv_feature(env, RISCV_FEATURE_PMP) && 781 (ret == TRANSLATE_SUCCESS) && 782 !pmp_hart_has_privs(env, pa, size, 1 << access_type, mode)) { 783 ret = TRANSLATE_PMP_FAIL; 784 } 785 if (ret == TRANSLATE_PMP_FAIL) { 786 pmp_violation = true; 787 } 788 789 if (ret == TRANSLATE_SUCCESS) { 790 tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK, 791 prot, mmu_idx, TARGET_PAGE_SIZE); 792 return true; 793 } else if (probe) { 794 return false; 795 } else { 796 raise_mmu_exception(env, address, access_type, pmp_violation, first_stage_error); 797 riscv_raise_exception(env, cs->exception_index, retaddr); 798 } 799 800 return true; 801 802 #else 803 switch (access_type) { 804 case MMU_INST_FETCH: 805 cs->exception_index = RISCV_EXCP_INST_PAGE_FAULT; 806 break; 807 case MMU_DATA_LOAD: 808 cs->exception_index = RISCV_EXCP_LOAD_PAGE_FAULT; 809 break; 810 case MMU_DATA_STORE: 811 cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT; 812 break; 813 default: 814 g_assert_not_reached(); 815 } 816 env->badaddr = address; 817 cpu_loop_exit_restore(cs, retaddr); 818 #endif 819 } 820 821 /* 822 * Handle Traps 823 * 824 * Adapted from Spike's processor_t::take_trap. 825 * 826 */ 827 void riscv_cpu_do_interrupt(CPUState *cs) 828 { 829 #if !defined(CONFIG_USER_ONLY) 830 831 RISCVCPU *cpu = RISCV_CPU(cs); 832 CPURISCVState *env = &cpu->env; 833 bool force_hs_execp = riscv_cpu_force_hs_excep_enabled(env); 834 target_ulong s; 835 836 /* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide 837 * so we mask off the MSB and separate into trap type and cause. 838 */ 839 bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG); 840 target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK; 841 target_ulong deleg = async ? env->mideleg : env->medeleg; 842 target_ulong tval = 0; 843 target_ulong htval = 0; 844 target_ulong mtval2 = 0; 845 846 if (!async) { 847 /* set tval to badaddr for traps with address information */ 848 switch (cause) { 849 case RISCV_EXCP_INST_GUEST_PAGE_FAULT: 850 case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT: 851 case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT: 852 force_hs_execp = true; 853 /* fallthrough */ 854 case RISCV_EXCP_INST_ADDR_MIS: 855 case RISCV_EXCP_INST_ACCESS_FAULT: 856 case RISCV_EXCP_LOAD_ADDR_MIS: 857 case RISCV_EXCP_STORE_AMO_ADDR_MIS: 858 case RISCV_EXCP_LOAD_ACCESS_FAULT: 859 case RISCV_EXCP_STORE_AMO_ACCESS_FAULT: 860 case RISCV_EXCP_INST_PAGE_FAULT: 861 case RISCV_EXCP_LOAD_PAGE_FAULT: 862 case RISCV_EXCP_STORE_PAGE_FAULT: 863 tval = env->badaddr; 864 break; 865 default: 866 break; 867 } 868 /* ecall is dispatched as one cause so translate based on mode */ 869 if (cause == RISCV_EXCP_U_ECALL) { 870 assert(env->priv <= 3); 871 872 if (env->priv == PRV_M) { 873 cause = RISCV_EXCP_M_ECALL; 874 } else if (env->priv == PRV_S && riscv_cpu_virt_enabled(env)) { 875 cause = RISCV_EXCP_VS_ECALL; 876 } else if (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) { 877 cause = RISCV_EXCP_S_ECALL; 878 } else if (env->priv == PRV_U) { 879 cause = RISCV_EXCP_U_ECALL; 880 } 881 } 882 } 883 884 trace_riscv_trap(env->mhartid, async, cause, env->pc, tval, cause < 23 ? 885 (async ? riscv_intr_names : riscv_excp_names)[cause] : "(unknown)"); 886 887 if (env->priv <= PRV_S && 888 cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) { 889 /* handle the trap in S-mode */ 890 if (riscv_has_ext(env, RVH)) { 891 target_ulong hdeleg = async ? env->hideleg : env->hedeleg; 892 893 if (riscv_cpu_virt_enabled(env) && ((hdeleg >> cause) & 1) && 894 !force_hs_execp) { 895 /* Trap to VS mode */ 896 } else if (riscv_cpu_virt_enabled(env)) { 897 /* Trap into HS mode, from virt */ 898 riscv_cpu_swap_hypervisor_regs(env); 899 env->hstatus = set_field(env->hstatus, HSTATUS_SP2V, 900 get_field(env->hstatus, HSTATUS_SPV)); 901 env->hstatus = set_field(env->hstatus, HSTATUS_SP2P, 902 get_field(env->mstatus, SSTATUS_SPP)); 903 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, 904 riscv_cpu_virt_enabled(env)); 905 906 htval = env->guest_phys_fault_addr; 907 908 riscv_cpu_set_virt_enabled(env, 0); 909 riscv_cpu_set_force_hs_excep(env, 0); 910 } else { 911 /* Trap into HS mode */ 912 env->hstatus = set_field(env->hstatus, HSTATUS_SP2V, 913 get_field(env->hstatus, HSTATUS_SPV)); 914 env->hstatus = set_field(env->hstatus, HSTATUS_SP2P, 915 get_field(env->mstatus, SSTATUS_SPP)); 916 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, 917 riscv_cpu_virt_enabled(env)); 918 919 htval = env->guest_phys_fault_addr; 920 } 921 } 922 923 s = env->mstatus; 924 s = set_field(s, MSTATUS_SPIE, env->priv_ver >= PRIV_VERSION_1_10_0 ? 925 get_field(s, MSTATUS_SIE) : get_field(s, MSTATUS_UIE << env->priv)); 926 s = set_field(s, MSTATUS_SPP, env->priv); 927 s = set_field(s, MSTATUS_SIE, 0); 928 env->mstatus = s; 929 env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1)); 930 env->sepc = env->pc; 931 env->sbadaddr = tval; 932 env->htval = htval; 933 env->pc = (env->stvec >> 2 << 2) + 934 ((async && (env->stvec & 3) == 1) ? cause * 4 : 0); 935 riscv_cpu_set_mode(env, PRV_S); 936 } else { 937 /* handle the trap in M-mode */ 938 if (riscv_has_ext(env, RVH)) { 939 if (riscv_cpu_virt_enabled(env)) { 940 riscv_cpu_swap_hypervisor_regs(env); 941 } 942 env->mstatus = set_field(env->mstatus, MSTATUS_MPV, 943 riscv_cpu_virt_enabled(env)); 944 env->mstatus = set_field(env->mstatus, MSTATUS_MTL, 945 riscv_cpu_force_hs_excep_enabled(env)); 946 947 mtval2 = env->guest_phys_fault_addr; 948 949 /* Trapping to M mode, virt is disabled */ 950 riscv_cpu_set_virt_enabled(env, 0); 951 riscv_cpu_set_force_hs_excep(env, 0); 952 } 953 954 s = env->mstatus; 955 s = set_field(s, MSTATUS_MPIE, env->priv_ver >= PRIV_VERSION_1_10_0 ? 956 get_field(s, MSTATUS_MIE) : get_field(s, MSTATUS_UIE << env->priv)); 957 s = set_field(s, MSTATUS_MPP, env->priv); 958 s = set_field(s, MSTATUS_MIE, 0); 959 env->mstatus = s; 960 env->mcause = cause | ~(((target_ulong)-1) >> async); 961 env->mepc = env->pc; 962 env->mbadaddr = tval; 963 env->mtval2 = mtval2; 964 env->pc = (env->mtvec >> 2 << 2) + 965 ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0); 966 riscv_cpu_set_mode(env, PRV_M); 967 } 968 969 /* NOTE: it is not necessary to yield load reservations here. It is only 970 * necessary for an SC from "another hart" to cause a load reservation 971 * to be yielded. Refer to the memory consistency model section of the 972 * RISC-V ISA Specification. 973 */ 974 975 #endif 976 cs->exception_index = EXCP_NONE; /* mark handled to qemu */ 977 } 978