1 /* 2 * RISC-V CPU helpers for qemu. 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/log.h" 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "tcg-op.h" 25 26 #define RISCV_DEBUG_INTERRUPT 0 27 28 int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch) 29 { 30 #ifdef CONFIG_USER_ONLY 31 return 0; 32 #else 33 return env->priv; 34 #endif 35 } 36 37 #ifndef CONFIG_USER_ONLY 38 static int riscv_cpu_local_irq_pending(CPURISCVState *env) 39 { 40 target_ulong mstatus_mie = get_field(env->mstatus, MSTATUS_MIE); 41 target_ulong mstatus_sie = get_field(env->mstatus, MSTATUS_SIE); 42 target_ulong pending = atomic_read(&env->mip) & env->mie; 43 target_ulong mie = env->priv < PRV_M || (env->priv == PRV_M && mstatus_mie); 44 target_ulong sie = env->priv < PRV_S || (env->priv == PRV_S && mstatus_sie); 45 target_ulong irqs = (pending & ~env->mideleg & -mie) | 46 (pending & env->mideleg & -sie); 47 48 if (irqs) { 49 return ctz64(irqs); /* since non-zero */ 50 } else { 51 return EXCP_NONE; /* indicates no pending interrupt */ 52 } 53 } 54 #endif 55 56 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 57 { 58 #if !defined(CONFIG_USER_ONLY) 59 if (interrupt_request & CPU_INTERRUPT_HARD) { 60 RISCVCPU *cpu = RISCV_CPU(cs); 61 CPURISCVState *env = &cpu->env; 62 int interruptno = riscv_cpu_local_irq_pending(env); 63 if (interruptno >= 0) { 64 cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno; 65 riscv_cpu_do_interrupt(cs); 66 return true; 67 } 68 } 69 #endif 70 return false; 71 } 72 73 #if !defined(CONFIG_USER_ONLY) 74 75 /* iothread_mutex must be held */ 76 uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value) 77 { 78 CPURISCVState *env = &cpu->env; 79 uint32_t old, new, cmp = atomic_read(&env->mip); 80 81 do { 82 old = cmp; 83 new = (old & ~mask) | (value & mask); 84 cmp = atomic_cmpxchg(&env->mip, old, new); 85 } while (old != cmp); 86 87 if (new && !old) { 88 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD); 89 } else if (!new && old) { 90 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_HARD); 91 } 92 93 return old; 94 } 95 96 void riscv_set_mode(CPURISCVState *env, target_ulong newpriv) 97 { 98 if (newpriv > PRV_M) { 99 g_assert_not_reached(); 100 } 101 if (newpriv == PRV_H) { 102 newpriv = PRV_U; 103 } 104 /* tlb_flush is unnecessary as mode is contained in mmu_idx */ 105 env->priv = newpriv; 106 } 107 108 /* get_physical_address - get the physical address for this virtual address 109 * 110 * Do a page table walk to obtain the physical address corresponding to a 111 * virtual address. Returns 0 if the translation was successful 112 * 113 * Adapted from Spike's mmu_t::translate and mmu_t::walk 114 * 115 */ 116 static int get_physical_address(CPURISCVState *env, hwaddr *physical, 117 int *prot, target_ulong addr, 118 int access_type, int mmu_idx) 119 { 120 /* NOTE: the env->pc value visible here will not be 121 * correct, but the value visible to the exception handler 122 * (riscv_cpu_do_interrupt) is correct */ 123 124 int mode = mmu_idx; 125 126 if (mode == PRV_M && access_type != MMU_INST_FETCH) { 127 if (get_field(env->mstatus, MSTATUS_MPRV)) { 128 mode = get_field(env->mstatus, MSTATUS_MPP); 129 } 130 } 131 132 if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) { 133 *physical = addr; 134 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 135 return TRANSLATE_SUCCESS; 136 } 137 138 *prot = 0; 139 140 target_ulong base; 141 int levels, ptidxbits, ptesize, vm, sum; 142 int mxr = get_field(env->mstatus, MSTATUS_MXR); 143 144 if (env->priv_ver >= PRIV_VERSION_1_10_0) { 145 base = get_field(env->satp, SATP_PPN) << PGSHIFT; 146 sum = get_field(env->mstatus, MSTATUS_SUM); 147 vm = get_field(env->satp, SATP_MODE); 148 switch (vm) { 149 case VM_1_10_SV32: 150 levels = 2; ptidxbits = 10; ptesize = 4; break; 151 case VM_1_10_SV39: 152 levels = 3; ptidxbits = 9; ptesize = 8; break; 153 case VM_1_10_SV48: 154 levels = 4; ptidxbits = 9; ptesize = 8; break; 155 case VM_1_10_SV57: 156 levels = 5; ptidxbits = 9; ptesize = 8; break; 157 case VM_1_10_MBARE: 158 *physical = addr; 159 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 160 return TRANSLATE_SUCCESS; 161 default: 162 g_assert_not_reached(); 163 } 164 } else { 165 base = env->sptbr << PGSHIFT; 166 sum = !get_field(env->mstatus, MSTATUS_PUM); 167 vm = get_field(env->mstatus, MSTATUS_VM); 168 switch (vm) { 169 case VM_1_09_SV32: 170 levels = 2; ptidxbits = 10; ptesize = 4; break; 171 case VM_1_09_SV39: 172 levels = 3; ptidxbits = 9; ptesize = 8; break; 173 case VM_1_09_SV48: 174 levels = 4; ptidxbits = 9; ptesize = 8; break; 175 case VM_1_09_MBARE: 176 *physical = addr; 177 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 178 return TRANSLATE_SUCCESS; 179 default: 180 g_assert_not_reached(); 181 } 182 } 183 184 CPUState *cs = CPU(riscv_env_get_cpu(env)); 185 int va_bits = PGSHIFT + levels * ptidxbits; 186 target_ulong mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1; 187 target_ulong masked_msbs = (addr >> (va_bits - 1)) & mask; 188 if (masked_msbs != 0 && masked_msbs != mask) { 189 return TRANSLATE_FAIL; 190 } 191 192 int ptshift = (levels - 1) * ptidxbits; 193 int i; 194 195 #if !TCG_OVERSIZED_GUEST 196 restart: 197 #endif 198 for (i = 0; i < levels; i++, ptshift -= ptidxbits) { 199 target_ulong idx = (addr >> (PGSHIFT + ptshift)) & 200 ((1 << ptidxbits) - 1); 201 202 /* check that physical address of PTE is legal */ 203 target_ulong pte_addr = base + idx * ptesize; 204 #if defined(TARGET_RISCV32) 205 target_ulong pte = ldl_phys(cs->as, pte_addr); 206 #elif defined(TARGET_RISCV64) 207 target_ulong pte = ldq_phys(cs->as, pte_addr); 208 #endif 209 target_ulong ppn = pte >> PTE_PPN_SHIFT; 210 211 if (!(pte & PTE_V)) { 212 /* Invalid PTE */ 213 return TRANSLATE_FAIL; 214 } else if (!(pte & (PTE_R | PTE_W | PTE_X))) { 215 /* Inner PTE, continue walking */ 216 base = ppn << PGSHIFT; 217 } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) { 218 /* Reserved leaf PTE flags: PTE_W */ 219 return TRANSLATE_FAIL; 220 } else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) { 221 /* Reserved leaf PTE flags: PTE_W + PTE_X */ 222 return TRANSLATE_FAIL; 223 } else if ((pte & PTE_U) && ((mode != PRV_U) && 224 (!sum || access_type == MMU_INST_FETCH))) { 225 /* User PTE flags when not U mode and mstatus.SUM is not set, 226 or the access type is an instruction fetch */ 227 return TRANSLATE_FAIL; 228 } else if (!(pte & PTE_U) && (mode != PRV_S)) { 229 /* Supervisor PTE flags when not S mode */ 230 return TRANSLATE_FAIL; 231 } else if (ppn & ((1ULL << ptshift) - 1)) { 232 /* Misaligned PPN */ 233 return TRANSLATE_FAIL; 234 } else if (access_type == MMU_DATA_LOAD && !((pte & PTE_R) || 235 ((pte & PTE_X) && mxr))) { 236 /* Read access check failed */ 237 return TRANSLATE_FAIL; 238 } else if (access_type == MMU_DATA_STORE && !(pte & PTE_W)) { 239 /* Write access check failed */ 240 return TRANSLATE_FAIL; 241 } else if (access_type == MMU_INST_FETCH && !(pte & PTE_X)) { 242 /* Fetch access check failed */ 243 return TRANSLATE_FAIL; 244 } else { 245 /* if necessary, set accessed and dirty bits. */ 246 target_ulong updated_pte = pte | PTE_A | 247 (access_type == MMU_DATA_STORE ? PTE_D : 0); 248 249 /* Page table updates need to be atomic with MTTCG enabled */ 250 if (updated_pte != pte) { 251 /* 252 * - if accessed or dirty bits need updating, and the PTE is 253 * in RAM, then we do so atomically with a compare and swap. 254 * - if the PTE is in IO space or ROM, then it can't be updated 255 * and we return TRANSLATE_FAIL. 256 * - if the PTE changed by the time we went to update it, then 257 * it is no longer valid and we must re-walk the page table. 258 */ 259 MemoryRegion *mr; 260 hwaddr l = sizeof(target_ulong), addr1; 261 mr = address_space_translate(cs->as, pte_addr, 262 &addr1, &l, false, MEMTXATTRS_UNSPECIFIED); 263 if (memory_region_is_ram(mr)) { 264 target_ulong *pte_pa = 265 qemu_map_ram_ptr(mr->ram_block, addr1); 266 #if TCG_OVERSIZED_GUEST 267 /* MTTCG is not enabled on oversized TCG guests so 268 * page table updates do not need to be atomic */ 269 *pte_pa = pte = updated_pte; 270 #else 271 target_ulong old_pte = 272 atomic_cmpxchg(pte_pa, pte, updated_pte); 273 if (old_pte != pte) { 274 goto restart; 275 } else { 276 pte = updated_pte; 277 } 278 #endif 279 } else { 280 /* misconfigured PTE in ROM (AD bits are not preset) or 281 * PTE is in IO space and can't be updated atomically */ 282 return TRANSLATE_FAIL; 283 } 284 } 285 286 /* for superpage mappings, make a fake leaf PTE for the TLB's 287 benefit. */ 288 target_ulong vpn = addr >> PGSHIFT; 289 *physical = (ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT; 290 291 /* set permissions on the TLB entry */ 292 if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) { 293 *prot |= PAGE_READ; 294 } 295 if ((pte & PTE_X)) { 296 *prot |= PAGE_EXEC; 297 } 298 /* add write permission on stores or if the page is already dirty, 299 so that we TLB miss on later writes to update the dirty bit */ 300 if ((pte & PTE_W) && 301 (access_type == MMU_DATA_STORE || (pte & PTE_D))) { 302 *prot |= PAGE_WRITE; 303 } 304 return TRANSLATE_SUCCESS; 305 } 306 } 307 return TRANSLATE_FAIL; 308 } 309 310 static void raise_mmu_exception(CPURISCVState *env, target_ulong address, 311 MMUAccessType access_type) 312 { 313 CPUState *cs = CPU(riscv_env_get_cpu(env)); 314 int page_fault_exceptions = 315 (env->priv_ver >= PRIV_VERSION_1_10_0) && 316 get_field(env->satp, SATP_MODE) != VM_1_10_MBARE; 317 switch (access_type) { 318 case MMU_INST_FETCH: 319 cs->exception_index = page_fault_exceptions ? 320 RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT; 321 break; 322 case MMU_DATA_LOAD: 323 cs->exception_index = page_fault_exceptions ? 324 RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT; 325 break; 326 case MMU_DATA_STORE: 327 cs->exception_index = page_fault_exceptions ? 328 RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT; 329 break; 330 default: 331 g_assert_not_reached(); 332 } 333 env->badaddr = address; 334 } 335 336 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 337 { 338 RISCVCPU *cpu = RISCV_CPU(cs); 339 hwaddr phys_addr; 340 int prot; 341 int mmu_idx = cpu_mmu_index(&cpu->env, false); 342 343 if (get_physical_address(&cpu->env, &phys_addr, &prot, addr, 0, mmu_idx)) { 344 return -1; 345 } 346 return phys_addr; 347 } 348 349 void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 350 MMUAccessType access_type, int mmu_idx, 351 uintptr_t retaddr) 352 { 353 RISCVCPU *cpu = RISCV_CPU(cs); 354 CPURISCVState *env = &cpu->env; 355 switch (access_type) { 356 case MMU_INST_FETCH: 357 cs->exception_index = RISCV_EXCP_INST_ADDR_MIS; 358 break; 359 case MMU_DATA_LOAD: 360 cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS; 361 break; 362 case MMU_DATA_STORE: 363 cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS; 364 break; 365 default: 366 g_assert_not_reached(); 367 } 368 env->badaddr = addr; 369 do_raise_exception_err(env, cs->exception_index, retaddr); 370 } 371 372 /* called by qemu's softmmu to fill the qemu tlb */ 373 void tlb_fill(CPUState *cs, target_ulong addr, int size, 374 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 375 { 376 int ret; 377 ret = riscv_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx); 378 if (ret == TRANSLATE_FAIL) { 379 RISCVCPU *cpu = RISCV_CPU(cs); 380 CPURISCVState *env = &cpu->env; 381 do_raise_exception_err(env, cs->exception_index, retaddr); 382 } 383 } 384 385 #endif 386 387 int riscv_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, 388 int rw, int mmu_idx) 389 { 390 RISCVCPU *cpu = RISCV_CPU(cs); 391 CPURISCVState *env = &cpu->env; 392 #if !defined(CONFIG_USER_ONLY) 393 hwaddr pa = 0; 394 int prot; 395 #endif 396 int ret = TRANSLATE_FAIL; 397 398 qemu_log_mask(CPU_LOG_MMU, 399 "%s pc " TARGET_FMT_lx " ad %" VADDR_PRIx " rw %d mmu_idx \ 400 %d\n", __func__, env->pc, address, rw, mmu_idx); 401 402 #if !defined(CONFIG_USER_ONLY) 403 ret = get_physical_address(env, &pa, &prot, address, rw, mmu_idx); 404 qemu_log_mask(CPU_LOG_MMU, 405 "%s address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx 406 " prot %d\n", __func__, address, ret, pa, prot); 407 if (!pmp_hart_has_privs(env, pa, TARGET_PAGE_SIZE, 1 << rw)) { 408 ret = TRANSLATE_FAIL; 409 } 410 if (ret == TRANSLATE_SUCCESS) { 411 tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK, 412 prot, mmu_idx, TARGET_PAGE_SIZE); 413 } else if (ret == TRANSLATE_FAIL) { 414 raise_mmu_exception(env, address, rw); 415 } 416 #else 417 switch (rw) { 418 case MMU_INST_FETCH: 419 cs->exception_index = RISCV_EXCP_INST_PAGE_FAULT; 420 break; 421 case MMU_DATA_LOAD: 422 cs->exception_index = RISCV_EXCP_LOAD_PAGE_FAULT; 423 break; 424 case MMU_DATA_STORE: 425 cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT; 426 break; 427 } 428 #endif 429 return ret; 430 } 431 432 /* 433 * Handle Traps 434 * 435 * Adapted from Spike's processor_t::take_trap. 436 * 437 */ 438 void riscv_cpu_do_interrupt(CPUState *cs) 439 { 440 #if !defined(CONFIG_USER_ONLY) 441 442 RISCVCPU *cpu = RISCV_CPU(cs); 443 CPURISCVState *env = &cpu->env; 444 445 if (RISCV_DEBUG_INTERRUPT) { 446 int log_cause = cs->exception_index & RISCV_EXCP_INT_MASK; 447 if (cs->exception_index & RISCV_EXCP_INT_FLAG) { 448 qemu_log_mask(LOG_TRACE, "core " 449 TARGET_FMT_ld ": trap %s, epc 0x" TARGET_FMT_lx "\n", 450 env->mhartid, riscv_intr_names[log_cause], env->pc); 451 } else { 452 qemu_log_mask(LOG_TRACE, "core " 453 TARGET_FMT_ld ": intr %s, epc 0x" TARGET_FMT_lx "\n", 454 env->mhartid, riscv_excp_names[log_cause], env->pc); 455 } 456 } 457 458 target_ulong fixed_cause = 0; 459 if (cs->exception_index & (RISCV_EXCP_INT_FLAG)) { 460 /* hacky for now. the MSB (bit 63) indicates interrupt but cs->exception 461 index is only 32 bits wide */ 462 fixed_cause = cs->exception_index & RISCV_EXCP_INT_MASK; 463 fixed_cause |= ((target_ulong)1) << (TARGET_LONG_BITS - 1); 464 } else { 465 /* fixup User ECALL -> correct priv ECALL */ 466 if (cs->exception_index == RISCV_EXCP_U_ECALL) { 467 switch (env->priv) { 468 case PRV_U: 469 fixed_cause = RISCV_EXCP_U_ECALL; 470 break; 471 case PRV_S: 472 fixed_cause = RISCV_EXCP_S_ECALL; 473 break; 474 case PRV_H: 475 fixed_cause = RISCV_EXCP_H_ECALL; 476 break; 477 case PRV_M: 478 fixed_cause = RISCV_EXCP_M_ECALL; 479 break; 480 } 481 } else { 482 fixed_cause = cs->exception_index; 483 } 484 } 485 486 target_ulong backup_epc = env->pc; 487 488 target_ulong bit = fixed_cause; 489 target_ulong deleg = env->medeleg; 490 491 int hasbadaddr = 492 (fixed_cause == RISCV_EXCP_INST_ADDR_MIS) || 493 (fixed_cause == RISCV_EXCP_INST_ACCESS_FAULT) || 494 (fixed_cause == RISCV_EXCP_LOAD_ADDR_MIS) || 495 (fixed_cause == RISCV_EXCP_STORE_AMO_ADDR_MIS) || 496 (fixed_cause == RISCV_EXCP_LOAD_ACCESS_FAULT) || 497 (fixed_cause == RISCV_EXCP_STORE_AMO_ACCESS_FAULT) || 498 (fixed_cause == RISCV_EXCP_INST_PAGE_FAULT) || 499 (fixed_cause == RISCV_EXCP_LOAD_PAGE_FAULT) || 500 (fixed_cause == RISCV_EXCP_STORE_PAGE_FAULT); 501 502 if (bit & ((target_ulong)1 << (TARGET_LONG_BITS - 1))) { 503 deleg = env->mideleg; 504 bit &= ~((target_ulong)1 << (TARGET_LONG_BITS - 1)); 505 } 506 507 if (env->priv <= PRV_S && bit < 64 && ((deleg >> bit) & 1)) { 508 /* handle the trap in S-mode */ 509 /* No need to check STVEC for misaligned - lower 2 bits cannot be set */ 510 env->pc = env->stvec; 511 env->scause = fixed_cause; 512 env->sepc = backup_epc; 513 514 if (hasbadaddr) { 515 if (RISCV_DEBUG_INTERRUPT) { 516 qemu_log_mask(LOG_TRACE, "core " TARGET_FMT_ld ": badaddr 0x" 517 TARGET_FMT_lx "\n", env->mhartid, env->badaddr); 518 } 519 env->sbadaddr = env->badaddr; 520 } else { 521 /* otherwise we must clear sbadaddr/stval 522 * todo: support populating stval on illegal instructions */ 523 env->sbadaddr = 0; 524 } 525 526 target_ulong s = env->mstatus; 527 s = set_field(s, MSTATUS_SPIE, env->priv_ver >= PRIV_VERSION_1_10_0 ? 528 get_field(s, MSTATUS_SIE) : get_field(s, MSTATUS_UIE << env->priv)); 529 s = set_field(s, MSTATUS_SPP, env->priv); 530 s = set_field(s, MSTATUS_SIE, 0); 531 csr_write_helper(env, s, CSR_MSTATUS); 532 riscv_set_mode(env, PRV_S); 533 } else { 534 /* No need to check MTVEC for misaligned - lower 2 bits cannot be set */ 535 env->pc = env->mtvec; 536 env->mepc = backup_epc; 537 env->mcause = fixed_cause; 538 539 if (hasbadaddr) { 540 if (RISCV_DEBUG_INTERRUPT) { 541 qemu_log_mask(LOG_TRACE, "core " TARGET_FMT_ld ": badaddr 0x" 542 TARGET_FMT_lx "\n", env->mhartid, env->badaddr); 543 } 544 env->mbadaddr = env->badaddr; 545 } else { 546 /* otherwise we must clear mbadaddr/mtval 547 * todo: support populating mtval on illegal instructions */ 548 env->mbadaddr = 0; 549 } 550 551 target_ulong s = env->mstatus; 552 s = set_field(s, MSTATUS_MPIE, env->priv_ver >= PRIV_VERSION_1_10_0 ? 553 get_field(s, MSTATUS_MIE) : get_field(s, MSTATUS_UIE << env->priv)); 554 s = set_field(s, MSTATUS_MPP, env->priv); 555 s = set_field(s, MSTATUS_MIE, 0); 556 csr_write_helper(env, s, CSR_MSTATUS); 557 riscv_set_mode(env, PRV_M); 558 } 559 /* TODO yield load reservation */ 560 #endif 561 cs->exception_index = EXCP_NONE; /* mark handled to qemu */ 562 } 563