1 /* 2 * RISC-V CPU helpers for qemu. 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/log.h" 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "tcg-op.h" 25 #include "trace.h" 26 27 int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch) 28 { 29 #ifdef CONFIG_USER_ONLY 30 return 0; 31 #else 32 return env->priv; 33 #endif 34 } 35 36 #ifndef CONFIG_USER_ONLY 37 static int riscv_cpu_local_irq_pending(CPURISCVState *env) 38 { 39 target_ulong mstatus_mie = get_field(env->mstatus, MSTATUS_MIE); 40 target_ulong mstatus_sie = get_field(env->mstatus, MSTATUS_SIE); 41 target_ulong pending = atomic_read(&env->mip) & env->mie; 42 target_ulong mie = env->priv < PRV_M || (env->priv == PRV_M && mstatus_mie); 43 target_ulong sie = env->priv < PRV_S || (env->priv == PRV_S && mstatus_sie); 44 target_ulong irqs = (pending & ~env->mideleg & -mie) | 45 (pending & env->mideleg & -sie); 46 47 if (irqs) { 48 return ctz64(irqs); /* since non-zero */ 49 } else { 50 return EXCP_NONE; /* indicates no pending interrupt */ 51 } 52 } 53 #endif 54 55 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 56 { 57 #if !defined(CONFIG_USER_ONLY) 58 if (interrupt_request & CPU_INTERRUPT_HARD) { 59 RISCVCPU *cpu = RISCV_CPU(cs); 60 CPURISCVState *env = &cpu->env; 61 int interruptno = riscv_cpu_local_irq_pending(env); 62 if (interruptno >= 0) { 63 cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno; 64 riscv_cpu_do_interrupt(cs); 65 return true; 66 } 67 } 68 #endif 69 return false; 70 } 71 72 #if !defined(CONFIG_USER_ONLY) 73 74 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts) 75 { 76 CPURISCVState *env = &cpu->env; 77 if (env->miclaim & interrupts) { 78 return -1; 79 } else { 80 env->miclaim |= interrupts; 81 return 0; 82 } 83 } 84 85 /* iothread_mutex must be held */ 86 uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value) 87 { 88 CPURISCVState *env = &cpu->env; 89 uint32_t old, new, cmp = atomic_read(&env->mip); 90 91 do { 92 old = cmp; 93 new = (old & ~mask) | (value & mask); 94 cmp = atomic_cmpxchg(&env->mip, old, new); 95 } while (old != cmp); 96 97 if (new) { 98 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD); 99 } else { 100 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_HARD); 101 } 102 103 return old; 104 } 105 106 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv) 107 { 108 if (newpriv > PRV_M) { 109 g_assert_not_reached(); 110 } 111 if (newpriv == PRV_H) { 112 newpriv = PRV_U; 113 } 114 /* tlb_flush is unnecessary as mode is contained in mmu_idx */ 115 env->priv = newpriv; 116 } 117 118 /* get_physical_address - get the physical address for this virtual address 119 * 120 * Do a page table walk to obtain the physical address corresponding to a 121 * virtual address. Returns 0 if the translation was successful 122 * 123 * Adapted from Spike's mmu_t::translate and mmu_t::walk 124 * 125 */ 126 static int get_physical_address(CPURISCVState *env, hwaddr *physical, 127 int *prot, target_ulong addr, 128 int access_type, int mmu_idx) 129 { 130 /* NOTE: the env->pc value visible here will not be 131 * correct, but the value visible to the exception handler 132 * (riscv_cpu_do_interrupt) is correct */ 133 134 int mode = mmu_idx; 135 136 if (mode == PRV_M && access_type != MMU_INST_FETCH) { 137 if (get_field(env->mstatus, MSTATUS_MPRV)) { 138 mode = get_field(env->mstatus, MSTATUS_MPP); 139 } 140 } 141 142 if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) { 143 *physical = addr; 144 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 145 return TRANSLATE_SUCCESS; 146 } 147 148 *prot = 0; 149 150 target_ulong base; 151 int levels, ptidxbits, ptesize, vm, sum; 152 int mxr = get_field(env->mstatus, MSTATUS_MXR); 153 154 if (env->priv_ver >= PRIV_VERSION_1_10_0) { 155 base = get_field(env->satp, SATP_PPN) << PGSHIFT; 156 sum = get_field(env->mstatus, MSTATUS_SUM); 157 vm = get_field(env->satp, SATP_MODE); 158 switch (vm) { 159 case VM_1_10_SV32: 160 levels = 2; ptidxbits = 10; ptesize = 4; break; 161 case VM_1_10_SV39: 162 levels = 3; ptidxbits = 9; ptesize = 8; break; 163 case VM_1_10_SV48: 164 levels = 4; ptidxbits = 9; ptesize = 8; break; 165 case VM_1_10_SV57: 166 levels = 5; ptidxbits = 9; ptesize = 8; break; 167 case VM_1_10_MBARE: 168 *physical = addr; 169 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 170 return TRANSLATE_SUCCESS; 171 default: 172 g_assert_not_reached(); 173 } 174 } else { 175 base = env->sptbr << PGSHIFT; 176 sum = !get_field(env->mstatus, MSTATUS_PUM); 177 vm = get_field(env->mstatus, MSTATUS_VM); 178 switch (vm) { 179 case VM_1_09_SV32: 180 levels = 2; ptidxbits = 10; ptesize = 4; break; 181 case VM_1_09_SV39: 182 levels = 3; ptidxbits = 9; ptesize = 8; break; 183 case VM_1_09_SV48: 184 levels = 4; ptidxbits = 9; ptesize = 8; break; 185 case VM_1_09_MBARE: 186 *physical = addr; 187 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 188 return TRANSLATE_SUCCESS; 189 default: 190 g_assert_not_reached(); 191 } 192 } 193 194 CPUState *cs = CPU(riscv_env_get_cpu(env)); 195 int va_bits = PGSHIFT + levels * ptidxbits; 196 target_ulong mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1; 197 target_ulong masked_msbs = (addr >> (va_bits - 1)) & mask; 198 if (masked_msbs != 0 && masked_msbs != mask) { 199 return TRANSLATE_FAIL; 200 } 201 202 int ptshift = (levels - 1) * ptidxbits; 203 int i; 204 205 #if !TCG_OVERSIZED_GUEST 206 restart: 207 #endif 208 for (i = 0; i < levels; i++, ptshift -= ptidxbits) { 209 target_ulong idx = (addr >> (PGSHIFT + ptshift)) & 210 ((1 << ptidxbits) - 1); 211 212 /* check that physical address of PTE is legal */ 213 target_ulong pte_addr = base + idx * ptesize; 214 #if defined(TARGET_RISCV32) 215 target_ulong pte = ldl_phys(cs->as, pte_addr); 216 #elif defined(TARGET_RISCV64) 217 target_ulong pte = ldq_phys(cs->as, pte_addr); 218 #endif 219 target_ulong ppn = pte >> PTE_PPN_SHIFT; 220 221 if (!(pte & PTE_V)) { 222 /* Invalid PTE */ 223 return TRANSLATE_FAIL; 224 } else if (!(pte & (PTE_R | PTE_W | PTE_X))) { 225 /* Inner PTE, continue walking */ 226 base = ppn << PGSHIFT; 227 } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) { 228 /* Reserved leaf PTE flags: PTE_W */ 229 return TRANSLATE_FAIL; 230 } else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) { 231 /* Reserved leaf PTE flags: PTE_W + PTE_X */ 232 return TRANSLATE_FAIL; 233 } else if ((pte & PTE_U) && ((mode != PRV_U) && 234 (!sum || access_type == MMU_INST_FETCH))) { 235 /* User PTE flags when not U mode and mstatus.SUM is not set, 236 or the access type is an instruction fetch */ 237 return TRANSLATE_FAIL; 238 } else if (!(pte & PTE_U) && (mode != PRV_S)) { 239 /* Supervisor PTE flags when not S mode */ 240 return TRANSLATE_FAIL; 241 } else if (ppn & ((1ULL << ptshift) - 1)) { 242 /* Misaligned PPN */ 243 return TRANSLATE_FAIL; 244 } else if (access_type == MMU_DATA_LOAD && !((pte & PTE_R) || 245 ((pte & PTE_X) && mxr))) { 246 /* Read access check failed */ 247 return TRANSLATE_FAIL; 248 } else if (access_type == MMU_DATA_STORE && !(pte & PTE_W)) { 249 /* Write access check failed */ 250 return TRANSLATE_FAIL; 251 } else if (access_type == MMU_INST_FETCH && !(pte & PTE_X)) { 252 /* Fetch access check failed */ 253 return TRANSLATE_FAIL; 254 } else { 255 /* if necessary, set accessed and dirty bits. */ 256 target_ulong updated_pte = pte | PTE_A | 257 (access_type == MMU_DATA_STORE ? PTE_D : 0); 258 259 /* Page table updates need to be atomic with MTTCG enabled */ 260 if (updated_pte != pte) { 261 /* 262 * - if accessed or dirty bits need updating, and the PTE is 263 * in RAM, then we do so atomically with a compare and swap. 264 * - if the PTE is in IO space or ROM, then it can't be updated 265 * and we return TRANSLATE_FAIL. 266 * - if the PTE changed by the time we went to update it, then 267 * it is no longer valid and we must re-walk the page table. 268 */ 269 MemoryRegion *mr; 270 hwaddr l = sizeof(target_ulong), addr1; 271 mr = address_space_translate(cs->as, pte_addr, 272 &addr1, &l, false, MEMTXATTRS_UNSPECIFIED); 273 if (memory_region_is_ram(mr)) { 274 target_ulong *pte_pa = 275 qemu_map_ram_ptr(mr->ram_block, addr1); 276 #if TCG_OVERSIZED_GUEST 277 /* MTTCG is not enabled on oversized TCG guests so 278 * page table updates do not need to be atomic */ 279 *pte_pa = pte = updated_pte; 280 #else 281 target_ulong old_pte = 282 atomic_cmpxchg(pte_pa, pte, updated_pte); 283 if (old_pte != pte) { 284 goto restart; 285 } else { 286 pte = updated_pte; 287 } 288 #endif 289 } else { 290 /* misconfigured PTE in ROM (AD bits are not preset) or 291 * PTE is in IO space and can't be updated atomically */ 292 return TRANSLATE_FAIL; 293 } 294 } 295 296 /* for superpage mappings, make a fake leaf PTE for the TLB's 297 benefit. */ 298 target_ulong vpn = addr >> PGSHIFT; 299 *physical = (ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT; 300 301 /* set permissions on the TLB entry */ 302 if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) { 303 *prot |= PAGE_READ; 304 } 305 if ((pte & PTE_X)) { 306 *prot |= PAGE_EXEC; 307 } 308 /* add write permission on stores or if the page is already dirty, 309 so that we TLB miss on later writes to update the dirty bit */ 310 if ((pte & PTE_W) && 311 (access_type == MMU_DATA_STORE || (pte & PTE_D))) { 312 *prot |= PAGE_WRITE; 313 } 314 return TRANSLATE_SUCCESS; 315 } 316 } 317 return TRANSLATE_FAIL; 318 } 319 320 static void raise_mmu_exception(CPURISCVState *env, target_ulong address, 321 MMUAccessType access_type) 322 { 323 CPUState *cs = CPU(riscv_env_get_cpu(env)); 324 int page_fault_exceptions = 325 (env->priv_ver >= PRIV_VERSION_1_10_0) && 326 get_field(env->satp, SATP_MODE) != VM_1_10_MBARE; 327 switch (access_type) { 328 case MMU_INST_FETCH: 329 cs->exception_index = page_fault_exceptions ? 330 RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT; 331 break; 332 case MMU_DATA_LOAD: 333 cs->exception_index = page_fault_exceptions ? 334 RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT; 335 break; 336 case MMU_DATA_STORE: 337 cs->exception_index = page_fault_exceptions ? 338 RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT; 339 break; 340 default: 341 g_assert_not_reached(); 342 } 343 env->badaddr = address; 344 } 345 346 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 347 { 348 RISCVCPU *cpu = RISCV_CPU(cs); 349 hwaddr phys_addr; 350 int prot; 351 int mmu_idx = cpu_mmu_index(&cpu->env, false); 352 353 if (get_physical_address(&cpu->env, &phys_addr, &prot, addr, 0, mmu_idx)) { 354 return -1; 355 } 356 return phys_addr; 357 } 358 359 void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 360 MMUAccessType access_type, int mmu_idx, 361 uintptr_t retaddr) 362 { 363 RISCVCPU *cpu = RISCV_CPU(cs); 364 CPURISCVState *env = &cpu->env; 365 switch (access_type) { 366 case MMU_INST_FETCH: 367 cs->exception_index = RISCV_EXCP_INST_ADDR_MIS; 368 break; 369 case MMU_DATA_LOAD: 370 cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS; 371 break; 372 case MMU_DATA_STORE: 373 cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS; 374 break; 375 default: 376 g_assert_not_reached(); 377 } 378 env->badaddr = addr; 379 riscv_raise_exception(env, cs->exception_index, retaddr); 380 } 381 #endif 382 383 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 384 MMUAccessType access_type, int mmu_idx, 385 bool probe, uintptr_t retaddr) 386 { 387 #ifndef CONFIG_USER_ONLY 388 RISCVCPU *cpu = RISCV_CPU(cs); 389 CPURISCVState *env = &cpu->env; 390 hwaddr pa = 0; 391 int prot; 392 int ret = TRANSLATE_FAIL; 393 394 qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n", 395 __func__, address, access_type, mmu_idx); 396 397 ret = get_physical_address(env, &pa, &prot, address, access_type, mmu_idx); 398 399 qemu_log_mask(CPU_LOG_MMU, 400 "%s address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx 401 " prot %d\n", __func__, address, ret, pa, prot); 402 403 if (riscv_feature(env, RISCV_FEATURE_PMP) && 404 !pmp_hart_has_privs(env, pa, TARGET_PAGE_SIZE, 1 << access_type)) { 405 ret = TRANSLATE_FAIL; 406 } 407 if (ret == TRANSLATE_SUCCESS) { 408 tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK, 409 prot, mmu_idx, TARGET_PAGE_SIZE); 410 return true; 411 } else if (probe) { 412 return false; 413 } else { 414 raise_mmu_exception(env, address, access_type); 415 riscv_raise_exception(env, cs->exception_index, retaddr); 416 } 417 #else 418 switch (access_type) { 419 case MMU_INST_FETCH: 420 cs->exception_index = RISCV_EXCP_INST_PAGE_FAULT; 421 break; 422 case MMU_DATA_LOAD: 423 cs->exception_index = RISCV_EXCP_LOAD_PAGE_FAULT; 424 break; 425 case MMU_DATA_STORE: 426 cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT; 427 break; 428 } 429 cpu_loop_exit_restore(cs, retaddr); 430 #endif 431 } 432 433 /* 434 * Handle Traps 435 * 436 * Adapted from Spike's processor_t::take_trap. 437 * 438 */ 439 void riscv_cpu_do_interrupt(CPUState *cs) 440 { 441 #if !defined(CONFIG_USER_ONLY) 442 443 RISCVCPU *cpu = RISCV_CPU(cs); 444 CPURISCVState *env = &cpu->env; 445 446 /* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide 447 * so we mask off the MSB and separate into trap type and cause. 448 */ 449 bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG); 450 target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK; 451 target_ulong deleg = async ? env->mideleg : env->medeleg; 452 target_ulong tval = 0; 453 454 static const int ecall_cause_map[] = { 455 [PRV_U] = RISCV_EXCP_U_ECALL, 456 [PRV_S] = RISCV_EXCP_S_ECALL, 457 [PRV_H] = RISCV_EXCP_H_ECALL, 458 [PRV_M] = RISCV_EXCP_M_ECALL 459 }; 460 461 if (!async) { 462 /* set tval to badaddr for traps with address information */ 463 switch (cause) { 464 case RISCV_EXCP_INST_ADDR_MIS: 465 case RISCV_EXCP_INST_ACCESS_FAULT: 466 case RISCV_EXCP_LOAD_ADDR_MIS: 467 case RISCV_EXCP_STORE_AMO_ADDR_MIS: 468 case RISCV_EXCP_LOAD_ACCESS_FAULT: 469 case RISCV_EXCP_STORE_AMO_ACCESS_FAULT: 470 case RISCV_EXCP_INST_PAGE_FAULT: 471 case RISCV_EXCP_LOAD_PAGE_FAULT: 472 case RISCV_EXCP_STORE_PAGE_FAULT: 473 tval = env->badaddr; 474 break; 475 default: 476 break; 477 } 478 /* ecall is dispatched as one cause so translate based on mode */ 479 if (cause == RISCV_EXCP_U_ECALL) { 480 assert(env->priv <= 3); 481 cause = ecall_cause_map[env->priv]; 482 } 483 } 484 485 trace_riscv_trap(env->mhartid, async, cause, env->pc, tval, cause < 16 ? 486 (async ? riscv_intr_names : riscv_excp_names)[cause] : "(unknown)"); 487 488 if (env->priv <= PRV_S && 489 cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) { 490 /* handle the trap in S-mode */ 491 target_ulong s = env->mstatus; 492 s = set_field(s, MSTATUS_SPIE, env->priv_ver >= PRIV_VERSION_1_10_0 ? 493 get_field(s, MSTATUS_SIE) : get_field(s, MSTATUS_UIE << env->priv)); 494 s = set_field(s, MSTATUS_SPP, env->priv); 495 s = set_field(s, MSTATUS_SIE, 0); 496 env->mstatus = s; 497 env->scause = cause | ~(((target_ulong)-1) >> async); 498 env->sepc = env->pc; 499 env->sbadaddr = tval; 500 env->pc = (env->stvec >> 2 << 2) + 501 ((async && (env->stvec & 3) == 1) ? cause * 4 : 0); 502 riscv_cpu_set_mode(env, PRV_S); 503 } else { 504 /* handle the trap in M-mode */ 505 target_ulong s = env->mstatus; 506 s = set_field(s, MSTATUS_MPIE, env->priv_ver >= PRIV_VERSION_1_10_0 ? 507 get_field(s, MSTATUS_MIE) : get_field(s, MSTATUS_UIE << env->priv)); 508 s = set_field(s, MSTATUS_MPP, env->priv); 509 s = set_field(s, MSTATUS_MIE, 0); 510 env->mstatus = s; 511 env->mcause = cause | ~(((target_ulong)-1) >> async); 512 env->mepc = env->pc; 513 env->mbadaddr = tval; 514 env->pc = (env->mtvec >> 2 << 2) + 515 ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0); 516 riscv_cpu_set_mode(env, PRV_M); 517 } 518 519 /* NOTE: it is not necessary to yield load reservations here. It is only 520 * necessary for an SC from "another hart" to cause a load reservation 521 * to be yielded. Refer to the memory consistency model section of the 522 * RISC-V ISA Specification. 523 */ 524 525 #endif 526 cs->exception_index = EXCP_NONE; /* mark handled to qemu */ 527 } 528