1 /* 2 * Sparc MMU helpers 3 * 4 * Copyright (c) 2003-2005 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/log.h" 22 #include "cpu.h" 23 #include "exec/cputlb.h" 24 #include "accel/tcg/cpu-mmu-index.h" 25 #include "exec/page-protection.h" 26 #include "exec/tlb-flags.h" 27 #include "system/memory.h" 28 #include "qemu/qemu-print.h" 29 #include "trace.h" 30 31 /* Sparc MMU emulation */ 32 33 #ifndef TARGET_SPARC64 34 /* 35 * Sparc V8 Reference MMU (SRMMU) 36 */ 37 static const int access_table[8][8] = { 38 { 0, 0, 0, 0, 8, 0, 12, 12 }, 39 { 0, 0, 0, 0, 8, 0, 0, 0 }, 40 { 8, 8, 0, 0, 0, 8, 12, 12 }, 41 { 8, 8, 0, 0, 0, 8, 0, 0 }, 42 { 8, 0, 8, 0, 8, 8, 12, 12 }, 43 { 8, 0, 8, 0, 8, 0, 8, 0 }, 44 { 8, 8, 8, 0, 8, 8, 12, 12 }, 45 { 8, 8, 8, 0, 8, 8, 8, 0 } 46 }; 47 48 static const int perm_table[2][8] = { 49 { 50 PAGE_READ, 51 PAGE_READ | PAGE_WRITE, 52 PAGE_READ | PAGE_EXEC, 53 PAGE_READ | PAGE_WRITE | PAGE_EXEC, 54 PAGE_EXEC, 55 PAGE_READ | PAGE_WRITE, 56 PAGE_READ | PAGE_EXEC, 57 PAGE_READ | PAGE_WRITE | PAGE_EXEC 58 }, 59 { 60 PAGE_READ, 61 PAGE_READ | PAGE_WRITE, 62 PAGE_READ | PAGE_EXEC, 63 PAGE_READ | PAGE_WRITE | PAGE_EXEC, 64 PAGE_EXEC, 65 PAGE_READ, 66 0, 67 0, 68 } 69 }; 70 71 static int get_physical_address(CPUSPARCState *env, CPUTLBEntryFull *full, 72 int *access_index, target_ulong address, 73 int rw, int mmu_idx) 74 { 75 int access_perms = 0; 76 hwaddr pde_ptr; 77 uint32_t pde; 78 int error_code = 0, is_dirty, is_user; 79 unsigned long page_offset; 80 CPUState *cs = env_cpu(env); 81 MemTxResult result; 82 83 is_user = mmu_idx == MMU_USER_IDX; 84 85 if (mmu_idx == MMU_PHYS_IDX) { 86 full->lg_page_size = TARGET_PAGE_BITS; 87 /* Boot mode: instruction fetches are taken from PROM */ 88 if (rw == 2 && (env->mmuregs[0] & env->def.mmu_bm)) { 89 full->phys_addr = env->prom_addr | (address & 0x7ffffULL); 90 full->prot = PAGE_READ | PAGE_EXEC; 91 return 0; 92 } 93 full->phys_addr = address; 94 full->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 95 return 0; 96 } 97 98 *access_index = ((rw & 1) << 2) | (rw & 2) | (is_user ? 0 : 1); 99 full->phys_addr = 0xffffffffffff0000ULL; 100 101 /* SPARC reference MMU table walk: Context table->L1->L2->PTE */ 102 /* Context base + context number */ 103 pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2); 104 pde = address_space_ldl(cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result); 105 if (result != MEMTX_OK) { 106 return 4 << 2; /* Translation fault, L = 0 */ 107 } 108 109 /* Ctx pde */ 110 switch (pde & PTE_ENTRYTYPE_MASK) { 111 default: 112 case 0: /* Invalid */ 113 return 1 << 2; 114 case 2: /* L0 PTE, maybe should not happen? */ 115 case 3: /* Reserved */ 116 return 4 << 2; 117 case 1: /* L0 PDE */ 118 pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); 119 pde = address_space_ldl(cs->as, pde_ptr, 120 MEMTXATTRS_UNSPECIFIED, &result); 121 if (result != MEMTX_OK) { 122 return (1 << 8) | (4 << 2); /* Translation fault, L = 1 */ 123 } 124 125 switch (pde & PTE_ENTRYTYPE_MASK) { 126 default: 127 case 0: /* Invalid */ 128 return (1 << 8) | (1 << 2); 129 case 3: /* Reserved */ 130 return (1 << 8) | (4 << 2); 131 case 1: /* L1 PDE */ 132 pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); 133 pde = address_space_ldl(cs->as, pde_ptr, 134 MEMTXATTRS_UNSPECIFIED, &result); 135 if (result != MEMTX_OK) { 136 return (2 << 8) | (4 << 2); /* Translation fault, L = 2 */ 137 } 138 139 switch (pde & PTE_ENTRYTYPE_MASK) { 140 default: 141 case 0: /* Invalid */ 142 return (2 << 8) | (1 << 2); 143 case 3: /* Reserved */ 144 return (2 << 8) | (4 << 2); 145 case 1: /* L2 PDE */ 146 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); 147 pde = address_space_ldl(cs->as, pde_ptr, 148 MEMTXATTRS_UNSPECIFIED, &result); 149 if (result != MEMTX_OK) { 150 return (3 << 8) | (4 << 2); /* Translation fault, L = 3 */ 151 } 152 153 switch (pde & PTE_ENTRYTYPE_MASK) { 154 default: 155 case 0: /* Invalid */ 156 return (3 << 8) | (1 << 2); 157 case 1: /* PDE, should not happen */ 158 case 3: /* Reserved */ 159 return (3 << 8) | (4 << 2); 160 case 2: /* L3 PTE */ 161 page_offset = 0; 162 } 163 full->lg_page_size = TARGET_PAGE_BITS; 164 break; 165 case 2: /* L2 PTE */ 166 page_offset = address & 0x3f000; 167 full->lg_page_size = 18; 168 } 169 break; 170 case 2: /* L1 PTE */ 171 page_offset = address & 0xfff000; 172 full->lg_page_size = 24; 173 break; 174 } 175 } 176 177 /* check access */ 178 access_perms = (pde & PTE_ACCESS_MASK) >> PTE_ACCESS_SHIFT; 179 error_code = access_table[*access_index][access_perms]; 180 if (error_code && !((env->mmuregs[0] & MMU_NF) && is_user)) { 181 return error_code; 182 } 183 184 /* update page modified and dirty bits */ 185 is_dirty = (rw & 1) && !(pde & PG_MODIFIED_MASK); 186 if (!(pde & PG_ACCESSED_MASK) || is_dirty) { 187 pde |= PG_ACCESSED_MASK; 188 if (is_dirty) { 189 pde |= PG_MODIFIED_MASK; 190 } 191 stl_phys_notdirty(cs->as, pde_ptr, pde); 192 } 193 194 /* the page can be put in the TLB */ 195 full->prot = perm_table[is_user][access_perms]; 196 if (!(pde & PG_MODIFIED_MASK)) { 197 /* only set write access if already dirty... otherwise wait 198 for dirty access */ 199 full->prot &= ~PAGE_WRITE; 200 } 201 202 /* Even if large ptes, we map only one 4KB page in the cache to 203 avoid filling it too fast */ 204 full->phys_addr = ((hwaddr)(pde & PTE_ADDR_MASK) << 4) + page_offset; 205 return error_code; 206 } 207 208 /* Perform address translation */ 209 bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 210 MMUAccessType access_type, int mmu_idx, 211 bool probe, uintptr_t retaddr) 212 { 213 CPUSPARCState *env = cpu_env(cs); 214 CPUTLBEntryFull full = {}; 215 target_ulong vaddr; 216 int error_code = 0, access_index; 217 218 /* 219 * TODO: If we ever need tlb_vaddr_to_host for this target, 220 * then we must figure out how to manipulate FSR and FAR 221 * when both MMU_NF and probe are set. In the meantime, 222 * do not support this use case. 223 */ 224 assert(!probe); 225 226 address &= TARGET_PAGE_MASK; 227 error_code = get_physical_address(env, &full, &access_index, 228 address, access_type, mmu_idx); 229 vaddr = address; 230 if (likely(error_code == 0)) { 231 qemu_log_mask(CPU_LOG_MMU, 232 "Translate at %" VADDR_PRIx " -> " 233 HWADDR_FMT_plx ", vaddr " TARGET_FMT_lx "\n", 234 address, full.phys_addr, vaddr); 235 tlb_set_page_full(cs, mmu_idx, vaddr, &full); 236 return true; 237 } 238 239 if (env->mmuregs[3]) { /* Fault status register */ 240 env->mmuregs[3] = 1; /* overflow (not read before another fault) */ 241 } 242 env->mmuregs[3] |= (access_index << 5) | error_code | 2; 243 env->mmuregs[4] = address; /* Fault address register */ 244 245 if ((env->mmuregs[0] & MMU_NF) || env->psret == 0) { 246 /* No fault mode: if a mapping is available, just override 247 permissions. If no mapping is available, redirect accesses to 248 neverland. Fake/overridden mappings will be flushed when 249 switching to normal mode. */ 250 full.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 251 tlb_set_page_full(cs, mmu_idx, vaddr, &full); 252 return true; 253 } else { 254 if (access_type == MMU_INST_FETCH) { 255 cs->exception_index = TT_TFAULT; 256 } else { 257 cs->exception_index = TT_DFAULT; 258 } 259 cpu_loop_exit_restore(cs, retaddr); 260 } 261 } 262 263 target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev) 264 { 265 CPUState *cs = env_cpu(env); 266 hwaddr pde_ptr; 267 uint32_t pde; 268 MemTxResult result; 269 270 /* 271 * TODO: MMU probe operations are supposed to set the fault 272 * status registers, but we don't do this. 273 */ 274 275 /* Context base + context number */ 276 pde_ptr = (hwaddr)(env->mmuregs[1] << 4) + 277 (env->mmuregs[2] << 2); 278 pde = address_space_ldl(cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result); 279 if (result != MEMTX_OK) { 280 return 0; 281 } 282 283 switch (pde & PTE_ENTRYTYPE_MASK) { 284 default: 285 case 0: /* Invalid */ 286 case 2: /* PTE, maybe should not happen? */ 287 case 3: /* Reserved */ 288 return 0; 289 case 1: /* L1 PDE */ 290 if (mmulev == 3) { 291 return pde; 292 } 293 pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); 294 pde = address_space_ldl(cs->as, pde_ptr, 295 MEMTXATTRS_UNSPECIFIED, &result); 296 if (result != MEMTX_OK) { 297 return 0; 298 } 299 300 switch (pde & PTE_ENTRYTYPE_MASK) { 301 default: 302 case 0: /* Invalid */ 303 case 3: /* Reserved */ 304 return 0; 305 case 2: /* L1 PTE */ 306 return pde; 307 case 1: /* L2 PDE */ 308 if (mmulev == 2) { 309 return pde; 310 } 311 pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); 312 pde = address_space_ldl(cs->as, pde_ptr, 313 MEMTXATTRS_UNSPECIFIED, &result); 314 if (result != MEMTX_OK) { 315 return 0; 316 } 317 318 switch (pde & PTE_ENTRYTYPE_MASK) { 319 default: 320 case 0: /* Invalid */ 321 case 3: /* Reserved */ 322 return 0; 323 case 2: /* L2 PTE */ 324 return pde; 325 case 1: /* L3 PDE */ 326 if (mmulev == 1) { 327 return pde; 328 } 329 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); 330 pde = address_space_ldl(cs->as, pde_ptr, 331 MEMTXATTRS_UNSPECIFIED, &result); 332 if (result != MEMTX_OK) { 333 return 0; 334 } 335 336 switch (pde & PTE_ENTRYTYPE_MASK) { 337 default: 338 case 0: /* Invalid */ 339 case 1: /* PDE, should not happen */ 340 case 3: /* Reserved */ 341 return 0; 342 case 2: /* L3 PTE */ 343 return pde; 344 } 345 } 346 } 347 } 348 return 0; 349 } 350 351 void dump_mmu(CPUSPARCState *env) 352 { 353 CPUState *cs = env_cpu(env); 354 target_ulong va, va1, va2; 355 unsigned int n, m, o; 356 hwaddr pa; 357 uint32_t pde; 358 359 qemu_printf("Root ptr: " HWADDR_FMT_plx ", ctx: %d\n", 360 (hwaddr)env->mmuregs[1] << 4, env->mmuregs[2]); 361 for (n = 0, va = 0; n < 256; n++, va += 16 * 1024 * 1024) { 362 pde = mmu_probe(env, va, 2); 363 if (pde) { 364 pa = cpu_get_phys_page_debug(cs, va); 365 qemu_printf("VA: " TARGET_FMT_lx ", PA: " HWADDR_FMT_plx 366 " PDE: " TARGET_FMT_lx "\n", va, pa, pde); 367 for (m = 0, va1 = va; m < 64; m++, va1 += 256 * 1024) { 368 pde = mmu_probe(env, va1, 1); 369 if (pde) { 370 pa = cpu_get_phys_page_debug(cs, va1); 371 qemu_printf(" VA: " TARGET_FMT_lx ", PA: " 372 HWADDR_FMT_plx " PDE: " TARGET_FMT_lx "\n", 373 va1, pa, pde); 374 for (o = 0, va2 = va1; o < 64; o++, va2 += 4 * 1024) { 375 pde = mmu_probe(env, va2, 0); 376 if (pde) { 377 pa = cpu_get_phys_page_debug(cs, va2); 378 qemu_printf(" VA: " TARGET_FMT_lx ", PA: " 379 HWADDR_FMT_plx " PTE: " 380 TARGET_FMT_lx "\n", 381 va2, pa, pde); 382 } 383 } 384 } 385 } 386 } 387 } 388 } 389 390 /* Gdb expects all registers windows to be flushed in ram. This function handles 391 * reads (and only reads) in stack frames as if windows were flushed. We assume 392 * that the sparc ABI is followed. 393 */ 394 int sparc_cpu_memory_rw_debug(CPUState *cs, vaddr address, 395 uint8_t *buf, size_t len, bool is_write) 396 { 397 CPUSPARCState *env = cpu_env(cs); 398 target_ulong addr = address; 399 int i; 400 int len1; 401 int cwp = env->cwp; 402 403 if (!is_write) { 404 for (i = 0; i < env->nwindows; i++) { 405 int off; 406 target_ulong fp = env->regbase[cwp * 16 + 22]; 407 408 /* Assume fp == 0 means end of frame. */ 409 if (fp == 0) { 410 break; 411 } 412 413 cwp = cpu_cwp_inc(env, cwp + 1); 414 415 /* Invalid window ? */ 416 if (env->wim & (1 << cwp)) { 417 break; 418 } 419 420 /* According to the ABI, the stack is growing downward. */ 421 if (addr + len < fp) { 422 break; 423 } 424 425 /* Not in this frame. */ 426 if (addr > fp + 64) { 427 continue; 428 } 429 430 /* Handle access before this window. */ 431 if (addr < fp) { 432 len1 = fp - addr; 433 if (cpu_memory_rw_debug(cs, addr, buf, len1, is_write) != 0) { 434 return -1; 435 } 436 addr += len1; 437 len -= len1; 438 buf += len1; 439 } 440 441 /* Access byte per byte to registers. Not very efficient but speed 442 * is not critical. 443 */ 444 off = addr - fp; 445 len1 = 64 - off; 446 447 if (len1 > len) { 448 len1 = len; 449 } 450 451 for (; len1; len1--) { 452 int reg = cwp * 16 + 8 + (off >> 2); 453 union { 454 uint32_t v; 455 uint8_t c[4]; 456 } u; 457 u.v = cpu_to_be32(env->regbase[reg]); 458 *buf++ = u.c[off & 3]; 459 addr++; 460 len--; 461 off++; 462 } 463 464 if (len == 0) { 465 return 0; 466 } 467 } 468 } 469 return cpu_memory_rw_debug(cs, addr, buf, len, is_write); 470 } 471 472 #else /* !TARGET_SPARC64 */ 473 474 /* 41 bit physical address space */ 475 static inline hwaddr ultrasparc_truncate_physical(uint64_t x) 476 { 477 return x & 0x1ffffffffffULL; 478 } 479 480 /* 481 * UltraSparc IIi I/DMMUs 482 */ 483 484 /* Returns true if TTE tag is valid and matches virtual address value 485 in context requires virtual address mask value calculated from TTE 486 entry size */ 487 static inline int ultrasparc_tag_match(SparcTLBEntry *tlb, 488 uint64_t address, uint64_t context, 489 hwaddr *physical) 490 { 491 uint64_t mask = -(8192ULL << 3 * TTE_PGSIZE(tlb->tte)); 492 493 /* valid, context match, virtual address match? */ 494 if (TTE_IS_VALID(tlb->tte) && 495 (TTE_IS_GLOBAL(tlb->tte) || tlb_compare_context(tlb, context)) 496 && compare_masked(address, tlb->tag, mask)) { 497 /* decode physical address */ 498 *physical = ((tlb->tte & mask) | (address & ~mask)) & 0x1ffffffe000ULL; 499 return 1; 500 } 501 502 return 0; 503 } 504 505 static uint64_t build_sfsr(CPUSPARCState *env, int mmu_idx, int rw) 506 { 507 uint64_t sfsr = SFSR_VALID_BIT; 508 509 switch (mmu_idx) { 510 case MMU_PHYS_IDX: 511 sfsr |= SFSR_CT_NOTRANS; 512 break; 513 case MMU_USER_IDX: 514 case MMU_KERNEL_IDX: 515 sfsr |= SFSR_CT_PRIMARY; 516 break; 517 case MMU_USER_SECONDARY_IDX: 518 case MMU_KERNEL_SECONDARY_IDX: 519 sfsr |= SFSR_CT_SECONDARY; 520 break; 521 case MMU_NUCLEUS_IDX: 522 sfsr |= SFSR_CT_NUCLEUS; 523 break; 524 default: 525 g_assert_not_reached(); 526 } 527 528 if (rw == 1) { 529 sfsr |= SFSR_WRITE_BIT; 530 } else if (rw == 4) { 531 sfsr |= SFSR_NF_BIT; 532 } 533 534 if (env->pstate & PS_PRIV) { 535 sfsr |= SFSR_PR_BIT; 536 } 537 538 if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */ 539 sfsr |= SFSR_OW_BIT; /* overflow (not read before another fault) */ 540 } 541 542 /* FIXME: ASI field in SFSR must be set */ 543 544 return sfsr; 545 } 546 547 static int get_physical_address_data(CPUSPARCState *env, CPUTLBEntryFull *full, 548 target_ulong address, int rw, int mmu_idx) 549 { 550 CPUState *cs = env_cpu(env); 551 unsigned int i; 552 uint64_t sfsr; 553 uint64_t context; 554 bool is_user = false; 555 556 sfsr = build_sfsr(env, mmu_idx, rw); 557 558 switch (mmu_idx) { 559 case MMU_PHYS_IDX: 560 g_assert_not_reached(); 561 case MMU_USER_IDX: 562 is_user = true; 563 /* fallthru */ 564 case MMU_KERNEL_IDX: 565 context = env->dmmu.mmu_primary_context & 0x1fff; 566 break; 567 case MMU_USER_SECONDARY_IDX: 568 is_user = true; 569 /* fallthru */ 570 case MMU_KERNEL_SECONDARY_IDX: 571 context = env->dmmu.mmu_secondary_context & 0x1fff; 572 break; 573 default: 574 context = 0; 575 break; 576 } 577 578 for (i = 0; i < 64; i++) { 579 /* ctx match, vaddr match, valid? */ 580 if (ultrasparc_tag_match(&env->dtlb[i], address, context, 581 &full->phys_addr)) { 582 int do_fault = 0; 583 584 if (TTE_IS_IE(env->dtlb[i].tte)) { 585 full->tlb_fill_flags |= TLB_BSWAP; 586 } 587 588 /* access ok? */ 589 /* multiple bits in SFSR.FT may be set on TT_DFAULT */ 590 if (TTE_IS_PRIV(env->dtlb[i].tte) && is_user) { 591 do_fault = 1; 592 sfsr |= SFSR_FT_PRIV_BIT; /* privilege violation */ 593 trace_mmu_helper_dfault(address, context, mmu_idx, env->tl); 594 } 595 if (rw == 4) { 596 if (TTE_IS_SIDEEFFECT(env->dtlb[i].tte)) { 597 do_fault = 1; 598 sfsr |= SFSR_FT_NF_E_BIT; 599 } 600 } else { 601 if (TTE_IS_NFO(env->dtlb[i].tte)) { 602 do_fault = 1; 603 sfsr |= SFSR_FT_NFO_BIT; 604 } 605 } 606 607 if (do_fault) { 608 /* faults above are reported with TT_DFAULT. */ 609 cs->exception_index = TT_DFAULT; 610 } else if (!TTE_IS_W_OK(env->dtlb[i].tte) && (rw == 1)) { 611 do_fault = 1; 612 cs->exception_index = TT_DPROT; 613 614 trace_mmu_helper_dprot(address, context, mmu_idx, env->tl); 615 } 616 617 if (!do_fault) { 618 full->prot = PAGE_READ; 619 if (TTE_IS_W_OK(env->dtlb[i].tte)) { 620 full->prot |= PAGE_WRITE; 621 } 622 623 TTE_SET_USED(env->dtlb[i].tte); 624 625 return 0; 626 } 627 628 env->dmmu.sfsr = sfsr; 629 env->dmmu.sfar = address; /* Fault address register */ 630 env->dmmu.tag_access = (address & ~0x1fffULL) | context; 631 return 1; 632 } 633 } 634 635 trace_mmu_helper_dmiss(address, context); 636 637 /* 638 * On MMU misses: 639 * - UltraSPARC IIi: SFSR and SFAR unmodified 640 * - JPS1: SFAR updated and some fields of SFSR updated 641 */ 642 env->dmmu.tag_access = (address & ~0x1fffULL) | context; 643 cs->exception_index = TT_DMISS; 644 return 1; 645 } 646 647 static int get_physical_address_code(CPUSPARCState *env, CPUTLBEntryFull *full, 648 target_ulong address, int mmu_idx) 649 { 650 CPUState *cs = env_cpu(env); 651 unsigned int i; 652 uint64_t context; 653 bool is_user = false; 654 655 switch (mmu_idx) { 656 case MMU_PHYS_IDX: 657 case MMU_USER_SECONDARY_IDX: 658 case MMU_KERNEL_SECONDARY_IDX: 659 g_assert_not_reached(); 660 case MMU_USER_IDX: 661 is_user = true; 662 /* fallthru */ 663 case MMU_KERNEL_IDX: 664 context = env->dmmu.mmu_primary_context & 0x1fff; 665 break; 666 default: 667 context = 0; 668 break; 669 } 670 671 if (env->tl == 0) { 672 /* PRIMARY context */ 673 context = env->dmmu.mmu_primary_context & 0x1fff; 674 } else { 675 /* NUCLEUS context */ 676 context = 0; 677 } 678 679 for (i = 0; i < 64; i++) { 680 /* ctx match, vaddr match, valid? */ 681 if (ultrasparc_tag_match(&env->itlb[i], 682 address, context, &full->phys_addr)) { 683 /* access ok? */ 684 if (TTE_IS_PRIV(env->itlb[i].tte) && is_user) { 685 /* Fault status register */ 686 if (env->immu.sfsr & SFSR_VALID_BIT) { 687 env->immu.sfsr = SFSR_OW_BIT; /* overflow (not read before 688 another fault) */ 689 } else { 690 env->immu.sfsr = 0; 691 } 692 if (env->pstate & PS_PRIV) { 693 env->immu.sfsr |= SFSR_PR_BIT; 694 } 695 if (env->tl > 0) { 696 env->immu.sfsr |= SFSR_CT_NUCLEUS; 697 } 698 699 /* FIXME: ASI field in SFSR must be set */ 700 env->immu.sfsr |= SFSR_FT_PRIV_BIT | SFSR_VALID_BIT; 701 cs->exception_index = TT_TFAULT; 702 703 env->immu.tag_access = (address & ~0x1fffULL) | context; 704 705 trace_mmu_helper_tfault(address, context); 706 707 return 1; 708 } 709 full->prot = PAGE_EXEC; 710 TTE_SET_USED(env->itlb[i].tte); 711 return 0; 712 } 713 } 714 715 trace_mmu_helper_tmiss(address, context); 716 717 /* Context is stored in DMMU (dmmuregs[1]) also for IMMU */ 718 env->immu.tag_access = (address & ~0x1fffULL) | context; 719 cs->exception_index = TT_TMISS; 720 return 1; 721 } 722 723 static int get_physical_address(CPUSPARCState *env, CPUTLBEntryFull *full, 724 int *access_index, target_ulong address, 725 int rw, int mmu_idx) 726 { 727 /* ??? We treat everything as a small page, then explicitly flush 728 everything when an entry is evicted. */ 729 full->lg_page_size = TARGET_PAGE_BITS; 730 731 /* safety net to catch wrong softmmu index use from dynamic code */ 732 if (env->tl > 0 && mmu_idx != MMU_NUCLEUS_IDX) { 733 if (rw == 2) { 734 trace_mmu_helper_get_phys_addr_code(env->tl, mmu_idx, 735 env->dmmu.mmu_primary_context, 736 env->dmmu.mmu_secondary_context, 737 address); 738 } else { 739 trace_mmu_helper_get_phys_addr_data(env->tl, mmu_idx, 740 env->dmmu.mmu_primary_context, 741 env->dmmu.mmu_secondary_context, 742 address); 743 } 744 } 745 746 if (mmu_idx == MMU_PHYS_IDX) { 747 full->phys_addr = ultrasparc_truncate_physical(address); 748 full->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 749 return 0; 750 } 751 752 if (rw == 2) { 753 return get_physical_address_code(env, full, address, mmu_idx); 754 } else { 755 return get_physical_address_data(env, full, address, rw, mmu_idx); 756 } 757 } 758 759 /* Perform address translation */ 760 bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 761 MMUAccessType access_type, int mmu_idx, 762 bool probe, uintptr_t retaddr) 763 { 764 CPUSPARCState *env = cpu_env(cs); 765 CPUTLBEntryFull full = {}; 766 int error_code = 0, access_index; 767 768 address &= TARGET_PAGE_MASK; 769 error_code = get_physical_address(env, &full, &access_index, 770 address, access_type, mmu_idx); 771 if (likely(error_code == 0)) { 772 trace_mmu_helper_mmu_fault(address, full.phys_addr, mmu_idx, env->tl, 773 env->dmmu.mmu_primary_context, 774 env->dmmu.mmu_secondary_context); 775 tlb_set_page_full(cs, mmu_idx, address, &full); 776 return true; 777 } 778 if (probe) { 779 return false; 780 } 781 cpu_loop_exit_restore(cs, retaddr); 782 } 783 784 void dump_mmu(CPUSPARCState *env) 785 { 786 unsigned int i; 787 const char *mask; 788 789 qemu_printf("MMU contexts: Primary: %" PRId64 ", Secondary: %" 790 PRId64 "\n", 791 env->dmmu.mmu_primary_context, 792 env->dmmu.mmu_secondary_context); 793 qemu_printf("DMMU Tag Access: %" PRIx64 ", TSB Tag Target: %" PRIx64 794 "\n", env->dmmu.tag_access, env->dmmu.tsb_tag_target); 795 if ((env->lsu & DMMU_E) == 0) { 796 qemu_printf("DMMU disabled\n"); 797 } else { 798 qemu_printf("DMMU dump\n"); 799 for (i = 0; i < 64; i++) { 800 switch (TTE_PGSIZE(env->dtlb[i].tte)) { 801 default: 802 case 0x0: 803 mask = " 8k"; 804 break; 805 case 0x1: 806 mask = " 64k"; 807 break; 808 case 0x2: 809 mask = "512k"; 810 break; 811 case 0x3: 812 mask = " 4M"; 813 break; 814 } 815 if (TTE_IS_VALID(env->dtlb[i].tte)) { 816 qemu_printf("[%02u] VA: %" PRIx64 ", PA: %llx" 817 ", %s, %s, %s, %s, ie %s, ctx %" PRId64 " %s\n", 818 i, 819 env->dtlb[i].tag & (uint64_t)~0x1fffULL, 820 TTE_PA(env->dtlb[i].tte), 821 mask, 822 TTE_IS_PRIV(env->dtlb[i].tte) ? "priv" : "user", 823 TTE_IS_W_OK(env->dtlb[i].tte) ? "RW" : "RO", 824 TTE_IS_LOCKED(env->dtlb[i].tte) ? 825 "locked" : "unlocked", 826 TTE_IS_IE(env->dtlb[i].tte) ? 827 "yes" : "no", 828 env->dtlb[i].tag & (uint64_t)0x1fffULL, 829 TTE_IS_GLOBAL(env->dtlb[i].tte) ? 830 "global" : "local"); 831 } 832 } 833 } 834 if ((env->lsu & IMMU_E) == 0) { 835 qemu_printf("IMMU disabled\n"); 836 } else { 837 qemu_printf("IMMU dump\n"); 838 for (i = 0; i < 64; i++) { 839 switch (TTE_PGSIZE(env->itlb[i].tte)) { 840 default: 841 case 0x0: 842 mask = " 8k"; 843 break; 844 case 0x1: 845 mask = " 64k"; 846 break; 847 case 0x2: 848 mask = "512k"; 849 break; 850 case 0x3: 851 mask = " 4M"; 852 break; 853 } 854 if (TTE_IS_VALID(env->itlb[i].tte)) { 855 qemu_printf("[%02u] VA: %" PRIx64 ", PA: %llx" 856 ", %s, %s, %s, ctx %" PRId64 " %s\n", 857 i, 858 env->itlb[i].tag & (uint64_t)~0x1fffULL, 859 TTE_PA(env->itlb[i].tte), 860 mask, 861 TTE_IS_PRIV(env->itlb[i].tte) ? "priv" : "user", 862 TTE_IS_LOCKED(env->itlb[i].tte) ? 863 "locked" : "unlocked", 864 env->itlb[i].tag & (uint64_t)0x1fffULL, 865 TTE_IS_GLOBAL(env->itlb[i].tte) ? 866 "global" : "local"); 867 } 868 } 869 } 870 } 871 872 #endif /* TARGET_SPARC64 */ 873 874 static int cpu_sparc_get_phys_page(CPUSPARCState *env, hwaddr *phys, 875 target_ulong addr, int rw, int mmu_idx) 876 { 877 CPUTLBEntryFull full = {}; 878 int access_index, ret; 879 880 ret = get_physical_address(env, &full, &access_index, addr, rw, mmu_idx); 881 if (ret == 0) { 882 *phys = full.phys_addr; 883 } 884 return ret; 885 } 886 887 #if defined(TARGET_SPARC64) 888 hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr, 889 int mmu_idx) 890 { 891 hwaddr phys_addr; 892 893 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 4, mmu_idx) != 0) { 894 return -1; 895 } 896 return phys_addr; 897 } 898 #endif 899 900 hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 901 { 902 CPUSPARCState *env = cpu_env(cs); 903 hwaddr phys_addr; 904 int mmu_idx = cpu_mmu_index(cs, false); 905 906 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) { 907 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 0, mmu_idx) != 0) { 908 return -1; 909 } 910 } 911 return phys_addr; 912 } 913 914 G_NORETURN void sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 915 MMUAccessType access_type, 916 int mmu_idx, 917 uintptr_t retaddr) 918 { 919 CPUSPARCState *env = cpu_env(cs); 920 921 #ifdef TARGET_SPARC64 922 env->dmmu.sfsr = build_sfsr(env, mmu_idx, access_type); 923 env->dmmu.sfar = addr; 924 #else 925 env->mmuregs[4] = addr; 926 #endif 927 928 cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr); 929 } 930