1 /* 2 * RISC-V CPU helpers for qemu. 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/log.h" 22 #include "qemu/main-loop.h" 23 #include "cpu.h" 24 #include "exec/exec-all.h" 25 #include "tcg/tcg-op.h" 26 #include "trace.h" 27 28 int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch) 29 { 30 #ifdef CONFIG_USER_ONLY 31 return 0; 32 #else 33 return env->priv; 34 #endif 35 } 36 37 #ifndef CONFIG_USER_ONLY 38 static int riscv_cpu_local_irq_pending(CPURISCVState *env) 39 { 40 target_ulong irqs; 41 42 target_ulong mstatus_mie = get_field(env->mstatus, MSTATUS_MIE); 43 target_ulong mstatus_sie = get_field(env->mstatus, MSTATUS_SIE); 44 target_ulong hs_mstatus_sie = get_field(env->mstatus_hs, MSTATUS_SIE); 45 46 target_ulong pending = env->mip & env->mie & 47 ~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP); 48 target_ulong vspending = (env->mip & env->mie & 49 (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP)); 50 51 target_ulong mie = env->priv < PRV_M || 52 (env->priv == PRV_M && mstatus_mie); 53 target_ulong sie = env->priv < PRV_S || 54 (env->priv == PRV_S && mstatus_sie); 55 target_ulong hs_sie = env->priv < PRV_S || 56 (env->priv == PRV_S && hs_mstatus_sie); 57 58 if (riscv_cpu_virt_enabled(env)) { 59 target_ulong pending_hs_irq = pending & -hs_sie; 60 61 if (pending_hs_irq) { 62 riscv_cpu_set_force_hs_excep(env, FORCE_HS_EXCEP); 63 return ctz64(pending_hs_irq); 64 } 65 66 pending = vspending; 67 } 68 69 irqs = (pending & ~env->mideleg & -mie) | (pending & env->mideleg & -sie); 70 71 if (irqs) { 72 return ctz64(irqs); /* since non-zero */ 73 } else { 74 return EXCP_NONE; /* indicates no pending interrupt */ 75 } 76 } 77 #endif 78 79 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 80 { 81 #if !defined(CONFIG_USER_ONLY) 82 if (interrupt_request & CPU_INTERRUPT_HARD) { 83 RISCVCPU *cpu = RISCV_CPU(cs); 84 CPURISCVState *env = &cpu->env; 85 int interruptno = riscv_cpu_local_irq_pending(env); 86 if (interruptno >= 0) { 87 cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno; 88 riscv_cpu_do_interrupt(cs); 89 return true; 90 } 91 } 92 #endif 93 return false; 94 } 95 96 #if !defined(CONFIG_USER_ONLY) 97 98 /* Return true is floating point support is currently enabled */ 99 bool riscv_cpu_fp_enabled(CPURISCVState *env) 100 { 101 if (env->mstatus & MSTATUS_FS) { 102 if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_FS)) { 103 return false; 104 } 105 return true; 106 } 107 108 return false; 109 } 110 111 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env) 112 { 113 uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM | MSTATUS_FS | 114 MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE | 115 MSTATUS64_UXL; 116 bool current_virt = riscv_cpu_virt_enabled(env); 117 118 g_assert(riscv_has_ext(env, RVH)); 119 120 if (current_virt) { 121 /* Current V=1 and we are about to change to V=0 */ 122 env->vsstatus = env->mstatus & mstatus_mask; 123 env->mstatus &= ~mstatus_mask; 124 env->mstatus |= env->mstatus_hs; 125 126 env->vstvec = env->stvec; 127 env->stvec = env->stvec_hs; 128 129 env->vsscratch = env->sscratch; 130 env->sscratch = env->sscratch_hs; 131 132 env->vsepc = env->sepc; 133 env->sepc = env->sepc_hs; 134 135 env->vscause = env->scause; 136 env->scause = env->scause_hs; 137 138 env->vstval = env->sbadaddr; 139 env->sbadaddr = env->stval_hs; 140 141 env->vsatp = env->satp; 142 env->satp = env->satp_hs; 143 } else { 144 /* Current V=0 and we are about to change to V=1 */ 145 env->mstatus_hs = env->mstatus & mstatus_mask; 146 env->mstatus &= ~mstatus_mask; 147 env->mstatus |= env->vsstatus; 148 149 env->stvec_hs = env->stvec; 150 env->stvec = env->vstvec; 151 152 env->sscratch_hs = env->sscratch; 153 env->sscratch = env->vsscratch; 154 155 env->sepc_hs = env->sepc; 156 env->sepc = env->vsepc; 157 158 env->scause_hs = env->scause; 159 env->scause = env->vscause; 160 161 env->stval_hs = env->sbadaddr; 162 env->sbadaddr = env->vstval; 163 164 env->satp_hs = env->satp; 165 env->satp = env->vsatp; 166 } 167 } 168 169 bool riscv_cpu_virt_enabled(CPURISCVState *env) 170 { 171 if (!riscv_has_ext(env, RVH)) { 172 return false; 173 } 174 175 return get_field(env->virt, VIRT_ONOFF); 176 } 177 178 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable) 179 { 180 if (!riscv_has_ext(env, RVH)) { 181 return; 182 } 183 184 /* Flush the TLB on all virt mode changes. */ 185 if (get_field(env->virt, VIRT_ONOFF) != enable) { 186 tlb_flush(env_cpu(env)); 187 } 188 189 env->virt = set_field(env->virt, VIRT_ONOFF, enable); 190 } 191 192 bool riscv_cpu_force_hs_excep_enabled(CPURISCVState *env) 193 { 194 if (!riscv_has_ext(env, RVH)) { 195 return false; 196 } 197 198 return get_field(env->virt, FORCE_HS_EXCEP); 199 } 200 201 void riscv_cpu_set_force_hs_excep(CPURISCVState *env, bool enable) 202 { 203 if (!riscv_has_ext(env, RVH)) { 204 return; 205 } 206 207 env->virt = set_field(env->virt, FORCE_HS_EXCEP, enable); 208 } 209 210 bool riscv_cpu_two_stage_lookup(int mmu_idx) 211 { 212 return mmu_idx & TB_FLAGS_PRIV_HYP_ACCESS_MASK; 213 } 214 215 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts) 216 { 217 CPURISCVState *env = &cpu->env; 218 if (env->miclaim & interrupts) { 219 return -1; 220 } else { 221 env->miclaim |= interrupts; 222 return 0; 223 } 224 } 225 226 uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value) 227 { 228 CPURISCVState *env = &cpu->env; 229 CPUState *cs = CPU(cpu); 230 uint32_t old = env->mip; 231 bool locked = false; 232 233 if (!qemu_mutex_iothread_locked()) { 234 locked = true; 235 qemu_mutex_lock_iothread(); 236 } 237 238 env->mip = (env->mip & ~mask) | (value & mask); 239 240 if (env->mip) { 241 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 242 } else { 243 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); 244 } 245 246 if (locked) { 247 qemu_mutex_unlock_iothread(); 248 } 249 250 return old; 251 } 252 253 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(uint32_t), 254 uint32_t arg) 255 { 256 env->rdtime_fn = fn; 257 env->rdtime_fn_arg = arg; 258 } 259 260 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv) 261 { 262 if (newpriv > PRV_M) { 263 g_assert_not_reached(); 264 } 265 if (newpriv == PRV_H) { 266 newpriv = PRV_U; 267 } 268 /* tlb_flush is unnecessary as mode is contained in mmu_idx */ 269 env->priv = newpriv; 270 271 /* 272 * Clear the load reservation - otherwise a reservation placed in one 273 * context/process can be used by another, resulting in an SC succeeding 274 * incorrectly. Version 2.2 of the ISA specification explicitly requires 275 * this behaviour, while later revisions say that the kernel "should" use 276 * an SC instruction to force the yielding of a load reservation on a 277 * preemptive context switch. As a result, do both. 278 */ 279 env->load_res = -1; 280 } 281 282 /* get_physical_address - get the physical address for this virtual address 283 * 284 * Do a page table walk to obtain the physical address corresponding to a 285 * virtual address. Returns 0 if the translation was successful 286 * 287 * Adapted from Spike's mmu_t::translate and mmu_t::walk 288 * 289 * @env: CPURISCVState 290 * @physical: This will be set to the calculated physical address 291 * @prot: The returned protection attributes 292 * @addr: The virtual address to be translated 293 * @fault_pte_addr: If not NULL, this will be set to fault pte address 294 * when a error occurs on pte address translation. 295 * This will already be shifted to match htval. 296 * @access_type: The type of MMU access 297 * @mmu_idx: Indicates current privilege level 298 * @first_stage: Are we in first stage translation? 299 * Second stage is used for hypervisor guest translation 300 * @two_stage: Are we going to perform two stage translation 301 */ 302 static int get_physical_address(CPURISCVState *env, hwaddr *physical, 303 int *prot, target_ulong addr, 304 target_ulong *fault_pte_addr, 305 int access_type, int mmu_idx, 306 bool first_stage, bool two_stage) 307 { 308 /* NOTE: the env->pc value visible here will not be 309 * correct, but the value visible to the exception handler 310 * (riscv_cpu_do_interrupt) is correct */ 311 MemTxResult res; 312 MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; 313 int mode = mmu_idx & TB_FLAGS_PRIV_MMU_MASK; 314 bool use_background = false; 315 316 /* 317 * Check if we should use the background registers for the two 318 * stage translation. We don't need to check if we actually need 319 * two stage translation as that happened before this function 320 * was called. Background registers will be used if the guest has 321 * forced a two stage translation to be on (in HS or M mode). 322 */ 323 if (!riscv_cpu_virt_enabled(env) && riscv_cpu_two_stage_lookup(mmu_idx)) { 324 use_background = true; 325 } 326 327 if (mode == PRV_M && access_type != MMU_INST_FETCH) { 328 if (get_field(env->mstatus, MSTATUS_MPRV)) { 329 mode = get_field(env->mstatus, MSTATUS_MPP); 330 } 331 } 332 333 if (first_stage == false) { 334 /* We are in stage 2 translation, this is similar to stage 1. */ 335 /* Stage 2 is always taken as U-mode */ 336 mode = PRV_U; 337 } 338 339 if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) { 340 *physical = addr; 341 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 342 return TRANSLATE_SUCCESS; 343 } 344 345 *prot = 0; 346 347 hwaddr base; 348 int levels, ptidxbits, ptesize, vm, sum, mxr, widened; 349 350 if (first_stage == true) { 351 mxr = get_field(env->mstatus, MSTATUS_MXR); 352 } else { 353 mxr = get_field(env->vsstatus, MSTATUS_MXR); 354 } 355 356 if (first_stage == true) { 357 if (use_background) { 358 base = (hwaddr)get_field(env->vsatp, SATP_PPN) << PGSHIFT; 359 vm = get_field(env->vsatp, SATP_MODE); 360 } else { 361 base = (hwaddr)get_field(env->satp, SATP_PPN) << PGSHIFT; 362 vm = get_field(env->satp, SATP_MODE); 363 } 364 widened = 0; 365 } else { 366 base = (hwaddr)get_field(env->hgatp, HGATP_PPN) << PGSHIFT; 367 vm = get_field(env->hgatp, HGATP_MODE); 368 widened = 2; 369 } 370 sum = get_field(env->mstatus, MSTATUS_SUM); 371 switch (vm) { 372 case VM_1_10_SV32: 373 levels = 2; ptidxbits = 10; ptesize = 4; break; 374 case VM_1_10_SV39: 375 levels = 3; ptidxbits = 9; ptesize = 8; break; 376 case VM_1_10_SV48: 377 levels = 4; ptidxbits = 9; ptesize = 8; break; 378 case VM_1_10_SV57: 379 levels = 5; ptidxbits = 9; ptesize = 8; break; 380 case VM_1_10_MBARE: 381 *physical = addr; 382 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 383 return TRANSLATE_SUCCESS; 384 default: 385 g_assert_not_reached(); 386 } 387 388 CPUState *cs = env_cpu(env); 389 int va_bits = PGSHIFT + levels * ptidxbits + widened; 390 target_ulong mask, masked_msbs; 391 392 if (TARGET_LONG_BITS > (va_bits - 1)) { 393 mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1; 394 } else { 395 mask = 0; 396 } 397 masked_msbs = (addr >> (va_bits - 1)) & mask; 398 399 if (masked_msbs != 0 && masked_msbs != mask) { 400 return TRANSLATE_FAIL; 401 } 402 403 int ptshift = (levels - 1) * ptidxbits; 404 int i; 405 406 #if !TCG_OVERSIZED_GUEST 407 restart: 408 #endif 409 for (i = 0; i < levels; i++, ptshift -= ptidxbits) { 410 target_ulong idx; 411 if (i == 0) { 412 idx = (addr >> (PGSHIFT + ptshift)) & 413 ((1 << (ptidxbits + widened)) - 1); 414 } else { 415 idx = (addr >> (PGSHIFT + ptshift)) & 416 ((1 << ptidxbits) - 1); 417 } 418 419 /* check that physical address of PTE is legal */ 420 hwaddr pte_addr; 421 422 if (two_stage && first_stage) { 423 int vbase_prot; 424 hwaddr vbase; 425 426 /* Do the second stage translation on the base PTE address. */ 427 int vbase_ret = get_physical_address(env, &vbase, &vbase_prot, 428 base, NULL, MMU_DATA_LOAD, 429 mmu_idx, false, true); 430 431 if (vbase_ret != TRANSLATE_SUCCESS) { 432 if (fault_pte_addr) { 433 *fault_pte_addr = (base + idx * ptesize) >> 2; 434 } 435 return TRANSLATE_G_STAGE_FAIL; 436 } 437 438 pte_addr = vbase + idx * ptesize; 439 } else { 440 pte_addr = base + idx * ptesize; 441 } 442 443 if (riscv_feature(env, RISCV_FEATURE_PMP) && 444 !pmp_hart_has_privs(env, pte_addr, sizeof(target_ulong), 445 1 << MMU_DATA_LOAD, PRV_S)) { 446 return TRANSLATE_PMP_FAIL; 447 } 448 449 #if defined(TARGET_RISCV32) 450 target_ulong pte = address_space_ldl(cs->as, pte_addr, attrs, &res); 451 #elif defined(TARGET_RISCV64) 452 target_ulong pte = address_space_ldq(cs->as, pte_addr, attrs, &res); 453 #endif 454 if (res != MEMTX_OK) { 455 return TRANSLATE_FAIL; 456 } 457 458 hwaddr ppn = pte >> PTE_PPN_SHIFT; 459 460 if (!(pte & PTE_V)) { 461 /* Invalid PTE */ 462 return TRANSLATE_FAIL; 463 } else if (!(pte & (PTE_R | PTE_W | PTE_X))) { 464 /* Inner PTE, continue walking */ 465 base = ppn << PGSHIFT; 466 } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) { 467 /* Reserved leaf PTE flags: PTE_W */ 468 return TRANSLATE_FAIL; 469 } else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) { 470 /* Reserved leaf PTE flags: PTE_W + PTE_X */ 471 return TRANSLATE_FAIL; 472 } else if ((pte & PTE_U) && ((mode != PRV_U) && 473 (!sum || access_type == MMU_INST_FETCH))) { 474 /* User PTE flags when not U mode and mstatus.SUM is not set, 475 or the access type is an instruction fetch */ 476 return TRANSLATE_FAIL; 477 } else if (!(pte & PTE_U) && (mode != PRV_S)) { 478 /* Supervisor PTE flags when not S mode */ 479 return TRANSLATE_FAIL; 480 } else if (ppn & ((1ULL << ptshift) - 1)) { 481 /* Misaligned PPN */ 482 return TRANSLATE_FAIL; 483 } else if (access_type == MMU_DATA_LOAD && !((pte & PTE_R) || 484 ((pte & PTE_X) && mxr))) { 485 /* Read access check failed */ 486 return TRANSLATE_FAIL; 487 } else if (access_type == MMU_DATA_STORE && !(pte & PTE_W)) { 488 /* Write access check failed */ 489 return TRANSLATE_FAIL; 490 } else if (access_type == MMU_INST_FETCH && !(pte & PTE_X)) { 491 /* Fetch access check failed */ 492 return TRANSLATE_FAIL; 493 } else { 494 /* if necessary, set accessed and dirty bits. */ 495 target_ulong updated_pte = pte | PTE_A | 496 (access_type == MMU_DATA_STORE ? PTE_D : 0); 497 498 /* Page table updates need to be atomic with MTTCG enabled */ 499 if (updated_pte != pte) { 500 /* 501 * - if accessed or dirty bits need updating, and the PTE is 502 * in RAM, then we do so atomically with a compare and swap. 503 * - if the PTE is in IO space or ROM, then it can't be updated 504 * and we return TRANSLATE_FAIL. 505 * - if the PTE changed by the time we went to update it, then 506 * it is no longer valid and we must re-walk the page table. 507 */ 508 MemoryRegion *mr; 509 hwaddr l = sizeof(target_ulong), addr1; 510 mr = address_space_translate(cs->as, pte_addr, 511 &addr1, &l, false, MEMTXATTRS_UNSPECIFIED); 512 if (memory_region_is_ram(mr)) { 513 target_ulong *pte_pa = 514 qemu_map_ram_ptr(mr->ram_block, addr1); 515 #if TCG_OVERSIZED_GUEST 516 /* MTTCG is not enabled on oversized TCG guests so 517 * page table updates do not need to be atomic */ 518 *pte_pa = pte = updated_pte; 519 #else 520 target_ulong old_pte = 521 qatomic_cmpxchg(pte_pa, pte, updated_pte); 522 if (old_pte != pte) { 523 goto restart; 524 } else { 525 pte = updated_pte; 526 } 527 #endif 528 } else { 529 /* misconfigured PTE in ROM (AD bits are not preset) or 530 * PTE is in IO space and can't be updated atomically */ 531 return TRANSLATE_FAIL; 532 } 533 } 534 535 /* for superpage mappings, make a fake leaf PTE for the TLB's 536 benefit. */ 537 target_ulong vpn = addr >> PGSHIFT; 538 *physical = ((ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT) | 539 (addr & ~TARGET_PAGE_MASK); 540 541 /* set permissions on the TLB entry */ 542 if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) { 543 *prot |= PAGE_READ; 544 } 545 if ((pte & PTE_X)) { 546 *prot |= PAGE_EXEC; 547 } 548 /* add write permission on stores or if the page is already dirty, 549 so that we TLB miss on later writes to update the dirty bit */ 550 if ((pte & PTE_W) && 551 (access_type == MMU_DATA_STORE || (pte & PTE_D))) { 552 *prot |= PAGE_WRITE; 553 } 554 return TRANSLATE_SUCCESS; 555 } 556 } 557 return TRANSLATE_FAIL; 558 } 559 560 static void raise_mmu_exception(CPURISCVState *env, target_ulong address, 561 MMUAccessType access_type, bool pmp_violation, 562 bool first_stage, bool two_stage) 563 { 564 CPUState *cs = env_cpu(env); 565 int page_fault_exceptions; 566 if (first_stage) { 567 page_fault_exceptions = 568 get_field(env->satp, SATP_MODE) != VM_1_10_MBARE && 569 !pmp_violation; 570 } else { 571 page_fault_exceptions = 572 get_field(env->hgatp, HGATP_MODE) != VM_1_10_MBARE && 573 !pmp_violation; 574 } 575 switch (access_type) { 576 case MMU_INST_FETCH: 577 if (riscv_cpu_virt_enabled(env) && !first_stage) { 578 cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT; 579 } else { 580 cs->exception_index = page_fault_exceptions ? 581 RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT; 582 } 583 break; 584 case MMU_DATA_LOAD: 585 if (two_stage && !first_stage) { 586 cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT; 587 } else { 588 cs->exception_index = page_fault_exceptions ? 589 RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT; 590 } 591 break; 592 case MMU_DATA_STORE: 593 if (two_stage && !first_stage) { 594 cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT; 595 } else { 596 cs->exception_index = page_fault_exceptions ? 597 RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT; 598 } 599 break; 600 default: 601 g_assert_not_reached(); 602 } 603 env->badaddr = address; 604 } 605 606 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 607 { 608 RISCVCPU *cpu = RISCV_CPU(cs); 609 CPURISCVState *env = &cpu->env; 610 hwaddr phys_addr; 611 int prot; 612 int mmu_idx = cpu_mmu_index(&cpu->env, false); 613 614 if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx, 615 true, riscv_cpu_virt_enabled(env))) { 616 return -1; 617 } 618 619 if (riscv_cpu_virt_enabled(env)) { 620 if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL, 621 0, mmu_idx, false, true)) { 622 return -1; 623 } 624 } 625 626 return phys_addr & TARGET_PAGE_MASK; 627 } 628 629 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 630 vaddr addr, unsigned size, 631 MMUAccessType access_type, 632 int mmu_idx, MemTxAttrs attrs, 633 MemTxResult response, uintptr_t retaddr) 634 { 635 RISCVCPU *cpu = RISCV_CPU(cs); 636 CPURISCVState *env = &cpu->env; 637 638 if (access_type == MMU_DATA_STORE) { 639 cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT; 640 } else { 641 cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT; 642 } 643 644 env->badaddr = addr; 645 riscv_raise_exception(&cpu->env, cs->exception_index, retaddr); 646 } 647 648 void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 649 MMUAccessType access_type, int mmu_idx, 650 uintptr_t retaddr) 651 { 652 RISCVCPU *cpu = RISCV_CPU(cs); 653 CPURISCVState *env = &cpu->env; 654 switch (access_type) { 655 case MMU_INST_FETCH: 656 cs->exception_index = RISCV_EXCP_INST_ADDR_MIS; 657 break; 658 case MMU_DATA_LOAD: 659 cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS; 660 break; 661 case MMU_DATA_STORE: 662 cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS; 663 break; 664 default: 665 g_assert_not_reached(); 666 } 667 env->badaddr = addr; 668 riscv_raise_exception(env, cs->exception_index, retaddr); 669 } 670 #endif 671 672 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 673 MMUAccessType access_type, int mmu_idx, 674 bool probe, uintptr_t retaddr) 675 { 676 RISCVCPU *cpu = RISCV_CPU(cs); 677 CPURISCVState *env = &cpu->env; 678 #ifndef CONFIG_USER_ONLY 679 vaddr im_address; 680 hwaddr pa = 0; 681 int prot, prot2; 682 bool pmp_violation = false; 683 bool first_stage_error = true; 684 bool two_stage_lookup = false; 685 int ret = TRANSLATE_FAIL; 686 int mode = mmu_idx; 687 target_ulong tlb_size = 0; 688 689 env->guest_phys_fault_addr = 0; 690 691 qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n", 692 __func__, address, access_type, mmu_idx); 693 694 if (mode == PRV_M && access_type != MMU_INST_FETCH) { 695 if (get_field(env->mstatus, MSTATUS_MPRV)) { 696 mode = get_field(env->mstatus, MSTATUS_MPP); 697 } 698 } 699 700 if (riscv_has_ext(env, RVH) && env->priv == PRV_M && 701 access_type != MMU_INST_FETCH && 702 get_field(env->mstatus, MSTATUS_MPRV) && 703 get_field(env->mstatus, MSTATUS_MPV)) { 704 two_stage_lookup = true; 705 } 706 707 if (riscv_cpu_virt_enabled(env) || 708 ((riscv_cpu_two_stage_lookup(mmu_idx) || two_stage_lookup) && 709 access_type != MMU_INST_FETCH)) { 710 /* Two stage lookup */ 711 ret = get_physical_address(env, &pa, &prot, address, 712 &env->guest_phys_fault_addr, access_type, 713 mmu_idx, true, true); 714 715 /* 716 * A G-stage exception may be triggered during two state lookup. 717 * And the env->guest_phys_fault_addr has already been set in 718 * get_physical_address(). 719 */ 720 if (ret == TRANSLATE_G_STAGE_FAIL) { 721 first_stage_error = false; 722 access_type = MMU_DATA_LOAD; 723 } 724 725 qemu_log_mask(CPU_LOG_MMU, 726 "%s 1st-stage address=%" VADDR_PRIx " ret %d physical " 727 TARGET_FMT_plx " prot %d\n", 728 __func__, address, ret, pa, prot); 729 730 if (ret == TRANSLATE_SUCCESS) { 731 /* Second stage lookup */ 732 im_address = pa; 733 734 ret = get_physical_address(env, &pa, &prot2, im_address, NULL, 735 access_type, mmu_idx, false, true); 736 737 qemu_log_mask(CPU_LOG_MMU, 738 "%s 2nd-stage address=%" VADDR_PRIx " ret %d physical " 739 TARGET_FMT_plx " prot %d\n", 740 __func__, im_address, ret, pa, prot2); 741 742 prot &= prot2; 743 744 if (riscv_feature(env, RISCV_FEATURE_PMP) && 745 (ret == TRANSLATE_SUCCESS) && 746 !pmp_hart_has_privs(env, pa, size, 1 << access_type, mode)) { 747 ret = TRANSLATE_PMP_FAIL; 748 } 749 750 if (ret != TRANSLATE_SUCCESS) { 751 /* 752 * Guest physical address translation failed, this is a HS 753 * level exception 754 */ 755 first_stage_error = false; 756 env->guest_phys_fault_addr = (im_address | 757 (address & 758 (TARGET_PAGE_SIZE - 1))) >> 2; 759 } 760 } 761 } else { 762 /* Single stage lookup */ 763 ret = get_physical_address(env, &pa, &prot, address, NULL, 764 access_type, mmu_idx, true, false); 765 766 qemu_log_mask(CPU_LOG_MMU, 767 "%s address=%" VADDR_PRIx " ret %d physical " 768 TARGET_FMT_plx " prot %d\n", 769 __func__, address, ret, pa, prot); 770 } 771 772 if (riscv_feature(env, RISCV_FEATURE_PMP) && 773 (ret == TRANSLATE_SUCCESS) && 774 !pmp_hart_has_privs(env, pa, size, 1 << access_type, mode)) { 775 ret = TRANSLATE_PMP_FAIL; 776 } 777 if (ret == TRANSLATE_PMP_FAIL) { 778 pmp_violation = true; 779 } 780 781 if (ret == TRANSLATE_SUCCESS) { 782 if (pmp_is_range_in_tlb(env, pa & TARGET_PAGE_MASK, &tlb_size)) { 783 tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1), 784 prot, mmu_idx, tlb_size); 785 } else { 786 tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK, 787 prot, mmu_idx, TARGET_PAGE_SIZE); 788 } 789 return true; 790 } else if (probe) { 791 return false; 792 } else { 793 raise_mmu_exception(env, address, access_type, pmp_violation, 794 first_stage_error, 795 riscv_cpu_virt_enabled(env) || 796 riscv_cpu_two_stage_lookup(mmu_idx)); 797 riscv_raise_exception(env, cs->exception_index, retaddr); 798 } 799 800 return true; 801 802 #else 803 switch (access_type) { 804 case MMU_INST_FETCH: 805 cs->exception_index = RISCV_EXCP_INST_PAGE_FAULT; 806 break; 807 case MMU_DATA_LOAD: 808 cs->exception_index = RISCV_EXCP_LOAD_PAGE_FAULT; 809 break; 810 case MMU_DATA_STORE: 811 cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT; 812 break; 813 default: 814 g_assert_not_reached(); 815 } 816 env->badaddr = address; 817 cpu_loop_exit_restore(cs, retaddr); 818 #endif 819 } 820 821 /* 822 * Handle Traps 823 * 824 * Adapted from Spike's processor_t::take_trap. 825 * 826 */ 827 void riscv_cpu_do_interrupt(CPUState *cs) 828 { 829 #if !defined(CONFIG_USER_ONLY) 830 831 RISCVCPU *cpu = RISCV_CPU(cs); 832 CPURISCVState *env = &cpu->env; 833 bool force_hs_execp = riscv_cpu_force_hs_excep_enabled(env); 834 uint64_t s; 835 836 /* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide 837 * so we mask off the MSB and separate into trap type and cause. 838 */ 839 bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG); 840 target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK; 841 target_ulong deleg = async ? env->mideleg : env->medeleg; 842 bool write_tval = false; 843 target_ulong tval = 0; 844 target_ulong htval = 0; 845 target_ulong mtval2 = 0; 846 847 if (!async) { 848 /* set tval to badaddr for traps with address information */ 849 switch (cause) { 850 case RISCV_EXCP_INST_GUEST_PAGE_FAULT: 851 case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT: 852 case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT: 853 force_hs_execp = true; 854 /* fallthrough */ 855 case RISCV_EXCP_INST_ADDR_MIS: 856 case RISCV_EXCP_INST_ACCESS_FAULT: 857 case RISCV_EXCP_LOAD_ADDR_MIS: 858 case RISCV_EXCP_STORE_AMO_ADDR_MIS: 859 case RISCV_EXCP_LOAD_ACCESS_FAULT: 860 case RISCV_EXCP_STORE_AMO_ACCESS_FAULT: 861 case RISCV_EXCP_INST_PAGE_FAULT: 862 case RISCV_EXCP_LOAD_PAGE_FAULT: 863 case RISCV_EXCP_STORE_PAGE_FAULT: 864 write_tval = true; 865 tval = env->badaddr; 866 break; 867 default: 868 break; 869 } 870 /* ecall is dispatched as one cause so translate based on mode */ 871 if (cause == RISCV_EXCP_U_ECALL) { 872 assert(env->priv <= 3); 873 874 if (env->priv == PRV_M) { 875 cause = RISCV_EXCP_M_ECALL; 876 } else if (env->priv == PRV_S && riscv_cpu_virt_enabled(env)) { 877 cause = RISCV_EXCP_VS_ECALL; 878 } else if (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) { 879 cause = RISCV_EXCP_S_ECALL; 880 } else if (env->priv == PRV_U) { 881 cause = RISCV_EXCP_U_ECALL; 882 } 883 } 884 } 885 886 trace_riscv_trap(env->mhartid, async, cause, env->pc, tval, 887 riscv_cpu_get_trap_name(cause, async)); 888 889 qemu_log_mask(CPU_LOG_INT, 890 "%s: hart:"TARGET_FMT_ld", async:%d, cause:"TARGET_FMT_lx", " 891 "epc:0x"TARGET_FMT_lx", tval:0x"TARGET_FMT_lx", desc=%s\n", 892 __func__, env->mhartid, async, cause, env->pc, tval, 893 riscv_cpu_get_trap_name(cause, async)); 894 895 if (env->priv <= PRV_S && 896 cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) { 897 /* handle the trap in S-mode */ 898 if (riscv_has_ext(env, RVH)) { 899 target_ulong hdeleg = async ? env->hideleg : env->hedeleg; 900 bool two_stage_lookup = false; 901 902 if (env->priv == PRV_M || 903 (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) || 904 (env->priv == PRV_U && !riscv_cpu_virt_enabled(env) && 905 get_field(env->hstatus, HSTATUS_HU))) { 906 two_stage_lookup = true; 907 } 908 909 if ((riscv_cpu_virt_enabled(env) || two_stage_lookup) && write_tval) { 910 /* 911 * If we are writing a guest virtual address to stval, set 912 * this to 1. If we are trapping to VS we will set this to 0 913 * later. 914 */ 915 env->hstatus = set_field(env->hstatus, HSTATUS_GVA, 1); 916 } else { 917 /* For other HS-mode traps, we set this to 0. */ 918 env->hstatus = set_field(env->hstatus, HSTATUS_GVA, 0); 919 } 920 921 if (riscv_cpu_virt_enabled(env) && ((hdeleg >> cause) & 1) && 922 !force_hs_execp) { 923 /* Trap to VS mode */ 924 /* 925 * See if we need to adjust cause. Yes if its VS mode interrupt 926 * no if hypervisor has delegated one of hs mode's interrupt 927 */ 928 if (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT || 929 cause == IRQ_VS_EXT) { 930 cause = cause - 1; 931 } 932 env->hstatus = set_field(env->hstatus, HSTATUS_GVA, 0); 933 } else if (riscv_cpu_virt_enabled(env)) { 934 /* Trap into HS mode, from virt */ 935 riscv_cpu_swap_hypervisor_regs(env); 936 env->hstatus = set_field(env->hstatus, HSTATUS_SPVP, 937 env->priv); 938 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, 939 riscv_cpu_virt_enabled(env)); 940 941 htval = env->guest_phys_fault_addr; 942 943 riscv_cpu_set_virt_enabled(env, 0); 944 riscv_cpu_set_force_hs_excep(env, 0); 945 } else { 946 /* Trap into HS mode */ 947 if (!two_stage_lookup) { 948 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, 949 riscv_cpu_virt_enabled(env)); 950 } 951 htval = env->guest_phys_fault_addr; 952 } 953 } 954 955 s = env->mstatus; 956 s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE)); 957 s = set_field(s, MSTATUS_SPP, env->priv); 958 s = set_field(s, MSTATUS_SIE, 0); 959 env->mstatus = s; 960 env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1)); 961 env->sepc = env->pc; 962 env->sbadaddr = tval; 963 env->htval = htval; 964 env->pc = (env->stvec >> 2 << 2) + 965 ((async && (env->stvec & 3) == 1) ? cause * 4 : 0); 966 riscv_cpu_set_mode(env, PRV_S); 967 } else { 968 /* handle the trap in M-mode */ 969 if (riscv_has_ext(env, RVH)) { 970 if (riscv_cpu_virt_enabled(env)) { 971 riscv_cpu_swap_hypervisor_regs(env); 972 } 973 env->mstatus = set_field(env->mstatus, MSTATUS_MPV, 974 riscv_cpu_virt_enabled(env)); 975 if (riscv_cpu_virt_enabled(env) && tval) { 976 env->mstatus = set_field(env->mstatus, MSTATUS_GVA, 1); 977 } 978 979 mtval2 = env->guest_phys_fault_addr; 980 981 /* Trapping to M mode, virt is disabled */ 982 riscv_cpu_set_virt_enabled(env, 0); 983 riscv_cpu_set_force_hs_excep(env, 0); 984 } 985 986 s = env->mstatus; 987 s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE)); 988 s = set_field(s, MSTATUS_MPP, env->priv); 989 s = set_field(s, MSTATUS_MIE, 0); 990 env->mstatus = s; 991 env->mcause = cause | ~(((target_ulong)-1) >> async); 992 env->mepc = env->pc; 993 env->mbadaddr = tval; 994 env->mtval2 = mtval2; 995 env->pc = (env->mtvec >> 2 << 2) + 996 ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0); 997 riscv_cpu_set_mode(env, PRV_M); 998 } 999 1000 /* NOTE: it is not necessary to yield load reservations here. It is only 1001 * necessary for an SC from "another hart" to cause a load reservation 1002 * to be yielded. Refer to the memory consistency model section of the 1003 * RISC-V ISA Specification. 1004 */ 1005 1006 #endif 1007 cs->exception_index = EXCP_NONE; /* mark handled to qemu */ 1008 } 1009