1 /* 2 * RISC-V CPU helpers for qemu. 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/log.h" 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "tcg-op.h" 25 #include "trace.h" 26 27 int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch) 28 { 29 #ifdef CONFIG_USER_ONLY 30 return 0; 31 #else 32 return env->priv; 33 #endif 34 } 35 36 #ifndef CONFIG_USER_ONLY 37 static int riscv_cpu_local_irq_pending(CPURISCVState *env) 38 { 39 target_ulong mstatus_mie = get_field(env->mstatus, MSTATUS_MIE); 40 target_ulong mstatus_sie = get_field(env->mstatus, MSTATUS_SIE); 41 target_ulong pending = atomic_read(&env->mip) & env->mie; 42 target_ulong mie = env->priv < PRV_M || (env->priv == PRV_M && mstatus_mie); 43 target_ulong sie = env->priv < PRV_S || (env->priv == PRV_S && mstatus_sie); 44 target_ulong irqs = (pending & ~env->mideleg & -mie) | 45 (pending & env->mideleg & -sie); 46 47 if (irqs) { 48 return ctz64(irqs); /* since non-zero */ 49 } else { 50 return EXCP_NONE; /* indicates no pending interrupt */ 51 } 52 } 53 #endif 54 55 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 56 { 57 #if !defined(CONFIG_USER_ONLY) 58 if (interrupt_request & CPU_INTERRUPT_HARD) { 59 RISCVCPU *cpu = RISCV_CPU(cs); 60 CPURISCVState *env = &cpu->env; 61 int interruptno = riscv_cpu_local_irq_pending(env); 62 if (interruptno >= 0) { 63 cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno; 64 riscv_cpu_do_interrupt(cs); 65 return true; 66 } 67 } 68 #endif 69 return false; 70 } 71 72 #if !defined(CONFIG_USER_ONLY) 73 74 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts) 75 { 76 CPURISCVState *env = &cpu->env; 77 if (env->miclaim & interrupts) { 78 return -1; 79 } else { 80 env->miclaim |= interrupts; 81 return 0; 82 } 83 } 84 85 /* iothread_mutex must be held */ 86 uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value) 87 { 88 CPURISCVState *env = &cpu->env; 89 uint32_t old, new, cmp = atomic_read(&env->mip); 90 91 do { 92 old = cmp; 93 new = (old & ~mask) | (value & mask); 94 cmp = atomic_cmpxchg(&env->mip, old, new); 95 } while (old != cmp); 96 97 if (new) { 98 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD); 99 } else { 100 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_HARD); 101 } 102 103 return old; 104 } 105 106 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv) 107 { 108 if (newpriv > PRV_M) { 109 g_assert_not_reached(); 110 } 111 if (newpriv == PRV_H) { 112 newpriv = PRV_U; 113 } 114 /* tlb_flush is unnecessary as mode is contained in mmu_idx */ 115 env->priv = newpriv; 116 } 117 118 /* get_physical_address - get the physical address for this virtual address 119 * 120 * Do a page table walk to obtain the physical address corresponding to a 121 * virtual address. Returns 0 if the translation was successful 122 * 123 * Adapted from Spike's mmu_t::translate and mmu_t::walk 124 * 125 */ 126 static int get_physical_address(CPURISCVState *env, hwaddr *physical, 127 int *prot, target_ulong addr, 128 int access_type, int mmu_idx) 129 { 130 /* NOTE: the env->pc value visible here will not be 131 * correct, but the value visible to the exception handler 132 * (riscv_cpu_do_interrupt) is correct */ 133 134 int mode = mmu_idx; 135 136 if (mode == PRV_M && access_type != MMU_INST_FETCH) { 137 if (get_field(env->mstatus, MSTATUS_MPRV)) { 138 mode = get_field(env->mstatus, MSTATUS_MPP); 139 } 140 } 141 142 if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) { 143 *physical = addr; 144 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 145 return TRANSLATE_SUCCESS; 146 } 147 148 *prot = 0; 149 150 target_ulong base; 151 int levels, ptidxbits, ptesize, vm, sum; 152 int mxr = get_field(env->mstatus, MSTATUS_MXR); 153 154 if (env->priv_ver >= PRIV_VERSION_1_10_0) { 155 base = get_field(env->satp, SATP_PPN) << PGSHIFT; 156 sum = get_field(env->mstatus, MSTATUS_SUM); 157 vm = get_field(env->satp, SATP_MODE); 158 switch (vm) { 159 case VM_1_10_SV32: 160 levels = 2; ptidxbits = 10; ptesize = 4; break; 161 case VM_1_10_SV39: 162 levels = 3; ptidxbits = 9; ptesize = 8; break; 163 case VM_1_10_SV48: 164 levels = 4; ptidxbits = 9; ptesize = 8; break; 165 case VM_1_10_SV57: 166 levels = 5; ptidxbits = 9; ptesize = 8; break; 167 case VM_1_10_MBARE: 168 *physical = addr; 169 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 170 return TRANSLATE_SUCCESS; 171 default: 172 g_assert_not_reached(); 173 } 174 } else { 175 base = env->sptbr << PGSHIFT; 176 sum = !get_field(env->mstatus, MSTATUS_PUM); 177 vm = get_field(env->mstatus, MSTATUS_VM); 178 switch (vm) { 179 case VM_1_09_SV32: 180 levels = 2; ptidxbits = 10; ptesize = 4; break; 181 case VM_1_09_SV39: 182 levels = 3; ptidxbits = 9; ptesize = 8; break; 183 case VM_1_09_SV48: 184 levels = 4; ptidxbits = 9; ptesize = 8; break; 185 case VM_1_09_MBARE: 186 *physical = addr; 187 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 188 return TRANSLATE_SUCCESS; 189 default: 190 g_assert_not_reached(); 191 } 192 } 193 194 CPUState *cs = CPU(riscv_env_get_cpu(env)); 195 int va_bits = PGSHIFT + levels * ptidxbits; 196 target_ulong mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1; 197 target_ulong masked_msbs = (addr >> (va_bits - 1)) & mask; 198 if (masked_msbs != 0 && masked_msbs != mask) { 199 return TRANSLATE_FAIL; 200 } 201 202 int ptshift = (levels - 1) * ptidxbits; 203 int i; 204 205 #if !TCG_OVERSIZED_GUEST 206 restart: 207 #endif 208 for (i = 0; i < levels; i++, ptshift -= ptidxbits) { 209 target_ulong idx = (addr >> (PGSHIFT + ptshift)) & 210 ((1 << ptidxbits) - 1); 211 212 /* check that physical address of PTE is legal */ 213 target_ulong pte_addr = base + idx * ptesize; 214 #if defined(TARGET_RISCV32) 215 target_ulong pte = ldl_phys(cs->as, pte_addr); 216 #elif defined(TARGET_RISCV64) 217 target_ulong pte = ldq_phys(cs->as, pte_addr); 218 #endif 219 target_ulong ppn = pte >> PTE_PPN_SHIFT; 220 221 if (!(pte & PTE_V)) { 222 /* Invalid PTE */ 223 return TRANSLATE_FAIL; 224 } else if (!(pte & (PTE_R | PTE_W | PTE_X))) { 225 /* Inner PTE, continue walking */ 226 base = ppn << PGSHIFT; 227 } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) { 228 /* Reserved leaf PTE flags: PTE_W */ 229 return TRANSLATE_FAIL; 230 } else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) { 231 /* Reserved leaf PTE flags: PTE_W + PTE_X */ 232 return TRANSLATE_FAIL; 233 } else if ((pte & PTE_U) && ((mode != PRV_U) && 234 (!sum || access_type == MMU_INST_FETCH))) { 235 /* User PTE flags when not U mode and mstatus.SUM is not set, 236 or the access type is an instruction fetch */ 237 return TRANSLATE_FAIL; 238 } else if (!(pte & PTE_U) && (mode != PRV_S)) { 239 /* Supervisor PTE flags when not S mode */ 240 return TRANSLATE_FAIL; 241 } else if (ppn & ((1ULL << ptshift) - 1)) { 242 /* Misaligned PPN */ 243 return TRANSLATE_FAIL; 244 } else if (access_type == MMU_DATA_LOAD && !((pte & PTE_R) || 245 ((pte & PTE_X) && mxr))) { 246 /* Read access check failed */ 247 return TRANSLATE_FAIL; 248 } else if (access_type == MMU_DATA_STORE && !(pte & PTE_W)) { 249 /* Write access check failed */ 250 return TRANSLATE_FAIL; 251 } else if (access_type == MMU_INST_FETCH && !(pte & PTE_X)) { 252 /* Fetch access check failed */ 253 return TRANSLATE_FAIL; 254 } else { 255 /* if necessary, set accessed and dirty bits. */ 256 target_ulong updated_pte = pte | PTE_A | 257 (access_type == MMU_DATA_STORE ? PTE_D : 0); 258 259 /* Page table updates need to be atomic with MTTCG enabled */ 260 if (updated_pte != pte) { 261 /* 262 * - if accessed or dirty bits need updating, and the PTE is 263 * in RAM, then we do so atomically with a compare and swap. 264 * - if the PTE is in IO space or ROM, then it can't be updated 265 * and we return TRANSLATE_FAIL. 266 * - if the PTE changed by the time we went to update it, then 267 * it is no longer valid and we must re-walk the page table. 268 */ 269 MemoryRegion *mr; 270 hwaddr l = sizeof(target_ulong), addr1; 271 mr = address_space_translate(cs->as, pte_addr, 272 &addr1, &l, false, MEMTXATTRS_UNSPECIFIED); 273 if (memory_region_is_ram(mr)) { 274 target_ulong *pte_pa = 275 qemu_map_ram_ptr(mr->ram_block, addr1); 276 #if TCG_OVERSIZED_GUEST 277 /* MTTCG is not enabled on oversized TCG guests so 278 * page table updates do not need to be atomic */ 279 *pte_pa = pte = updated_pte; 280 #else 281 target_ulong old_pte = 282 atomic_cmpxchg(pte_pa, pte, updated_pte); 283 if (old_pte != pte) { 284 goto restart; 285 } else { 286 pte = updated_pte; 287 } 288 #endif 289 } else { 290 /* misconfigured PTE in ROM (AD bits are not preset) or 291 * PTE is in IO space and can't be updated atomically */ 292 return TRANSLATE_FAIL; 293 } 294 } 295 296 /* for superpage mappings, make a fake leaf PTE for the TLB's 297 benefit. */ 298 target_ulong vpn = addr >> PGSHIFT; 299 *physical = (ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT; 300 301 /* set permissions on the TLB entry */ 302 if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) { 303 *prot |= PAGE_READ; 304 } 305 if ((pte & PTE_X)) { 306 *prot |= PAGE_EXEC; 307 } 308 /* add write permission on stores or if the page is already dirty, 309 so that we TLB miss on later writes to update the dirty bit */ 310 if ((pte & PTE_W) && 311 (access_type == MMU_DATA_STORE || (pte & PTE_D))) { 312 *prot |= PAGE_WRITE; 313 } 314 return TRANSLATE_SUCCESS; 315 } 316 } 317 return TRANSLATE_FAIL; 318 } 319 320 static void raise_mmu_exception(CPURISCVState *env, target_ulong address, 321 MMUAccessType access_type) 322 { 323 CPUState *cs = CPU(riscv_env_get_cpu(env)); 324 int page_fault_exceptions = 325 (env->priv_ver >= PRIV_VERSION_1_10_0) && 326 get_field(env->satp, SATP_MODE) != VM_1_10_MBARE; 327 switch (access_type) { 328 case MMU_INST_FETCH: 329 cs->exception_index = page_fault_exceptions ? 330 RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT; 331 break; 332 case MMU_DATA_LOAD: 333 cs->exception_index = page_fault_exceptions ? 334 RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT; 335 break; 336 case MMU_DATA_STORE: 337 cs->exception_index = page_fault_exceptions ? 338 RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT; 339 break; 340 default: 341 g_assert_not_reached(); 342 } 343 env->badaddr = address; 344 } 345 346 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 347 { 348 RISCVCPU *cpu = RISCV_CPU(cs); 349 hwaddr phys_addr; 350 int prot; 351 int mmu_idx = cpu_mmu_index(&cpu->env, false); 352 353 if (get_physical_address(&cpu->env, &phys_addr, &prot, addr, 0, mmu_idx)) { 354 return -1; 355 } 356 return phys_addr; 357 } 358 359 void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 360 MMUAccessType access_type, int mmu_idx, 361 uintptr_t retaddr) 362 { 363 RISCVCPU *cpu = RISCV_CPU(cs); 364 CPURISCVState *env = &cpu->env; 365 switch (access_type) { 366 case MMU_INST_FETCH: 367 cs->exception_index = RISCV_EXCP_INST_ADDR_MIS; 368 break; 369 case MMU_DATA_LOAD: 370 cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS; 371 break; 372 case MMU_DATA_STORE: 373 cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS; 374 break; 375 default: 376 g_assert_not_reached(); 377 } 378 env->badaddr = addr; 379 riscv_raise_exception(env, cs->exception_index, retaddr); 380 } 381 382 /* called by qemu's softmmu to fill the qemu tlb */ 383 void tlb_fill(CPUState *cs, target_ulong addr, int size, 384 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 385 { 386 int ret; 387 ret = riscv_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx); 388 if (ret == TRANSLATE_FAIL) { 389 RISCVCPU *cpu = RISCV_CPU(cs); 390 CPURISCVState *env = &cpu->env; 391 riscv_raise_exception(env, cs->exception_index, retaddr); 392 } 393 } 394 395 #endif 396 397 int riscv_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, 398 int rw, int mmu_idx) 399 { 400 RISCVCPU *cpu = RISCV_CPU(cs); 401 CPURISCVState *env = &cpu->env; 402 #if !defined(CONFIG_USER_ONLY) 403 hwaddr pa = 0; 404 int prot; 405 #endif 406 int ret = TRANSLATE_FAIL; 407 408 qemu_log_mask(CPU_LOG_MMU, 409 "%s pc " TARGET_FMT_lx " ad %" VADDR_PRIx " rw %d mmu_idx \ 410 %d\n", __func__, env->pc, address, rw, mmu_idx); 411 412 #if !defined(CONFIG_USER_ONLY) 413 ret = get_physical_address(env, &pa, &prot, address, rw, mmu_idx); 414 qemu_log_mask(CPU_LOG_MMU, 415 "%s address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx 416 " prot %d\n", __func__, address, ret, pa, prot); 417 if (riscv_feature(env, RISCV_FEATURE_PMP) && 418 !pmp_hart_has_privs(env, pa, TARGET_PAGE_SIZE, 1 << rw)) { 419 ret = TRANSLATE_FAIL; 420 } 421 if (ret == TRANSLATE_SUCCESS) { 422 tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK, 423 prot, mmu_idx, TARGET_PAGE_SIZE); 424 } else if (ret == TRANSLATE_FAIL) { 425 raise_mmu_exception(env, address, rw); 426 } 427 #else 428 switch (rw) { 429 case MMU_INST_FETCH: 430 cs->exception_index = RISCV_EXCP_INST_PAGE_FAULT; 431 break; 432 case MMU_DATA_LOAD: 433 cs->exception_index = RISCV_EXCP_LOAD_PAGE_FAULT; 434 break; 435 case MMU_DATA_STORE: 436 cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT; 437 break; 438 } 439 #endif 440 return ret; 441 } 442 443 /* 444 * Handle Traps 445 * 446 * Adapted from Spike's processor_t::take_trap. 447 * 448 */ 449 void riscv_cpu_do_interrupt(CPUState *cs) 450 { 451 #if !defined(CONFIG_USER_ONLY) 452 453 RISCVCPU *cpu = RISCV_CPU(cs); 454 CPURISCVState *env = &cpu->env; 455 456 /* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide 457 * so we mask off the MSB and separate into trap type and cause. 458 */ 459 bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG); 460 target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK; 461 target_ulong deleg = async ? env->mideleg : env->medeleg; 462 target_ulong tval = 0; 463 464 static const int ecall_cause_map[] = { 465 [PRV_U] = RISCV_EXCP_U_ECALL, 466 [PRV_S] = RISCV_EXCP_S_ECALL, 467 [PRV_H] = RISCV_EXCP_H_ECALL, 468 [PRV_M] = RISCV_EXCP_M_ECALL 469 }; 470 471 if (!async) { 472 /* set tval to badaddr for traps with address information */ 473 switch (cause) { 474 case RISCV_EXCP_INST_ADDR_MIS: 475 case RISCV_EXCP_INST_ACCESS_FAULT: 476 case RISCV_EXCP_LOAD_ADDR_MIS: 477 case RISCV_EXCP_STORE_AMO_ADDR_MIS: 478 case RISCV_EXCP_LOAD_ACCESS_FAULT: 479 case RISCV_EXCP_STORE_AMO_ACCESS_FAULT: 480 case RISCV_EXCP_INST_PAGE_FAULT: 481 case RISCV_EXCP_LOAD_PAGE_FAULT: 482 case RISCV_EXCP_STORE_PAGE_FAULT: 483 tval = env->badaddr; 484 break; 485 default: 486 break; 487 } 488 /* ecall is dispatched as one cause so translate based on mode */ 489 if (cause == RISCV_EXCP_U_ECALL) { 490 assert(env->priv <= 3); 491 cause = ecall_cause_map[env->priv]; 492 } 493 } 494 495 trace_riscv_trap(env->mhartid, async, cause, env->pc, tval, cause < 16 ? 496 (async ? riscv_intr_names : riscv_excp_names)[cause] : "(unknown)"); 497 498 if (env->priv <= PRV_S && 499 cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) { 500 /* handle the trap in S-mode */ 501 target_ulong s = env->mstatus; 502 s = set_field(s, MSTATUS_SPIE, env->priv_ver >= PRIV_VERSION_1_10_0 ? 503 get_field(s, MSTATUS_SIE) : get_field(s, MSTATUS_UIE << env->priv)); 504 s = set_field(s, MSTATUS_SPP, env->priv); 505 s = set_field(s, MSTATUS_SIE, 0); 506 env->mstatus = s; 507 env->scause = cause | ~(((target_ulong)-1) >> async); 508 env->sepc = env->pc; 509 env->sbadaddr = tval; 510 env->pc = (env->stvec >> 2 << 2) + 511 ((async && (env->stvec & 3) == 1) ? cause * 4 : 0); 512 riscv_cpu_set_mode(env, PRV_S); 513 } else { 514 /* handle the trap in M-mode */ 515 target_ulong s = env->mstatus; 516 s = set_field(s, MSTATUS_MPIE, env->priv_ver >= PRIV_VERSION_1_10_0 ? 517 get_field(s, MSTATUS_MIE) : get_field(s, MSTATUS_UIE << env->priv)); 518 s = set_field(s, MSTATUS_MPP, env->priv); 519 s = set_field(s, MSTATUS_MIE, 0); 520 env->mstatus = s; 521 env->mcause = cause | ~(((target_ulong)-1) >> async); 522 env->mepc = env->pc; 523 env->mbadaddr = tval; 524 env->pc = (env->mtvec >> 2 << 2) + 525 ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0); 526 riscv_cpu_set_mode(env, PRV_M); 527 } 528 529 /* NOTE: it is not necessary to yield load reservations here. It is only 530 * necessary for an SC from "another hart" to cause a load reservation 531 * to be yielded. Refer to the memory consistency model section of the 532 * RISC-V ISA Specification. 533 */ 534 535 #endif 536 cs->exception_index = EXCP_NONE; /* mark handled to qemu */ 537 } 538