1 /* 2 * Sparc MMU helpers 3 * 4 * Copyright (c) 2003-2005 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "exec/exec-all.h" 23 #include "qemu/qemu-print.h" 24 #include "trace.h" 25 26 /* Sparc MMU emulation */ 27 28 #ifndef TARGET_SPARC64 29 /* 30 * Sparc V8 Reference MMU (SRMMU) 31 */ 32 static const int access_table[8][8] = { 33 { 0, 0, 0, 0, 8, 0, 12, 12 }, 34 { 0, 0, 0, 0, 8, 0, 0, 0 }, 35 { 8, 8, 0, 0, 0, 8, 12, 12 }, 36 { 8, 8, 0, 0, 0, 8, 0, 0 }, 37 { 8, 0, 8, 0, 8, 8, 12, 12 }, 38 { 8, 0, 8, 0, 8, 0, 8, 0 }, 39 { 8, 8, 8, 0, 8, 8, 12, 12 }, 40 { 8, 8, 8, 0, 8, 8, 8, 0 } 41 }; 42 43 static const int perm_table[2][8] = { 44 { 45 PAGE_READ, 46 PAGE_READ | PAGE_WRITE, 47 PAGE_READ | PAGE_EXEC, 48 PAGE_READ | PAGE_WRITE | PAGE_EXEC, 49 PAGE_EXEC, 50 PAGE_READ | PAGE_WRITE, 51 PAGE_READ | PAGE_EXEC, 52 PAGE_READ | PAGE_WRITE | PAGE_EXEC 53 }, 54 { 55 PAGE_READ, 56 PAGE_READ | PAGE_WRITE, 57 PAGE_READ | PAGE_EXEC, 58 PAGE_READ | PAGE_WRITE | PAGE_EXEC, 59 PAGE_EXEC, 60 PAGE_READ, 61 0, 62 0, 63 } 64 }; 65 66 static int get_physical_address(CPUSPARCState *env, hwaddr *physical, 67 int *prot, int *access_index, MemTxAttrs *attrs, 68 target_ulong address, int rw, int mmu_idx, 69 target_ulong *page_size) 70 { 71 int access_perms = 0; 72 hwaddr pde_ptr; 73 uint32_t pde; 74 int error_code = 0, is_dirty, is_user; 75 unsigned long page_offset; 76 CPUState *cs = env_cpu(env); 77 MemTxResult result; 78 79 is_user = mmu_idx == MMU_USER_IDX; 80 81 if (mmu_idx == MMU_PHYS_IDX) { 82 *page_size = TARGET_PAGE_SIZE; 83 /* Boot mode: instruction fetches are taken from PROM */ 84 if (rw == 2 && (env->mmuregs[0] & env->def.mmu_bm)) { 85 *physical = env->prom_addr | (address & 0x7ffffULL); 86 *prot = PAGE_READ | PAGE_EXEC; 87 return 0; 88 } 89 *physical = address; 90 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 91 return 0; 92 } 93 94 *access_index = ((rw & 1) << 2) | (rw & 2) | (is_user ? 0 : 1); 95 *physical = 0xffffffffffff0000ULL; 96 97 /* SPARC reference MMU table walk: Context table->L1->L2->PTE */ 98 /* Context base + context number */ 99 pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2); 100 pde = address_space_ldl(cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result); 101 if (result != MEMTX_OK) { 102 return 4 << 2; /* Translation fault, L = 0 */ 103 } 104 105 /* Ctx pde */ 106 switch (pde & PTE_ENTRYTYPE_MASK) { 107 default: 108 case 0: /* Invalid */ 109 return 1 << 2; 110 case 2: /* L0 PTE, maybe should not happen? */ 111 case 3: /* Reserved */ 112 return 4 << 2; 113 case 1: /* L0 PDE */ 114 pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); 115 pde = address_space_ldl(cs->as, pde_ptr, 116 MEMTXATTRS_UNSPECIFIED, &result); 117 if (result != MEMTX_OK) { 118 return (1 << 8) | (4 << 2); /* Translation fault, L = 1 */ 119 } 120 121 switch (pde & PTE_ENTRYTYPE_MASK) { 122 default: 123 case 0: /* Invalid */ 124 return (1 << 8) | (1 << 2); 125 case 3: /* Reserved */ 126 return (1 << 8) | (4 << 2); 127 case 1: /* L1 PDE */ 128 pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); 129 pde = address_space_ldl(cs->as, pde_ptr, 130 MEMTXATTRS_UNSPECIFIED, &result); 131 if (result != MEMTX_OK) { 132 return (2 << 8) | (4 << 2); /* Translation fault, L = 2 */ 133 } 134 135 switch (pde & PTE_ENTRYTYPE_MASK) { 136 default: 137 case 0: /* Invalid */ 138 return (2 << 8) | (1 << 2); 139 case 3: /* Reserved */ 140 return (2 << 8) | (4 << 2); 141 case 1: /* L2 PDE */ 142 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); 143 pde = address_space_ldl(cs->as, pde_ptr, 144 MEMTXATTRS_UNSPECIFIED, &result); 145 if (result != MEMTX_OK) { 146 return (3 << 8) | (4 << 2); /* Translation fault, L = 3 */ 147 } 148 149 switch (pde & PTE_ENTRYTYPE_MASK) { 150 default: 151 case 0: /* Invalid */ 152 return (3 << 8) | (1 << 2); 153 case 1: /* PDE, should not happen */ 154 case 3: /* Reserved */ 155 return (3 << 8) | (4 << 2); 156 case 2: /* L3 PTE */ 157 page_offset = 0; 158 } 159 *page_size = TARGET_PAGE_SIZE; 160 break; 161 case 2: /* L2 PTE */ 162 page_offset = address & 0x3f000; 163 *page_size = 0x40000; 164 } 165 break; 166 case 2: /* L1 PTE */ 167 page_offset = address & 0xfff000; 168 *page_size = 0x1000000; 169 } 170 } 171 172 /* check access */ 173 access_perms = (pde & PTE_ACCESS_MASK) >> PTE_ACCESS_SHIFT; 174 error_code = access_table[*access_index][access_perms]; 175 if (error_code && !((env->mmuregs[0] & MMU_NF) && is_user)) { 176 return error_code; 177 } 178 179 /* update page modified and dirty bits */ 180 is_dirty = (rw & 1) && !(pde & PG_MODIFIED_MASK); 181 if (!(pde & PG_ACCESSED_MASK) || is_dirty) { 182 pde |= PG_ACCESSED_MASK; 183 if (is_dirty) { 184 pde |= PG_MODIFIED_MASK; 185 } 186 stl_phys_notdirty(cs->as, pde_ptr, pde); 187 } 188 189 /* the page can be put in the TLB */ 190 *prot = perm_table[is_user][access_perms]; 191 if (!(pde & PG_MODIFIED_MASK)) { 192 /* only set write access if already dirty... otherwise wait 193 for dirty access */ 194 *prot &= ~PAGE_WRITE; 195 } 196 197 /* Even if large ptes, we map only one 4KB page in the cache to 198 avoid filling it too fast */ 199 *physical = ((hwaddr)(pde & PTE_ADDR_MASK) << 4) + page_offset; 200 return error_code; 201 } 202 203 /* Perform address translation */ 204 bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 205 MMUAccessType access_type, int mmu_idx, 206 bool probe, uintptr_t retaddr) 207 { 208 SPARCCPU *cpu = SPARC_CPU(cs); 209 CPUSPARCState *env = &cpu->env; 210 hwaddr paddr; 211 target_ulong vaddr; 212 target_ulong page_size; 213 int error_code = 0, prot, access_index; 214 MemTxAttrs attrs = {}; 215 216 /* 217 * TODO: If we ever need tlb_vaddr_to_host for this target, 218 * then we must figure out how to manipulate FSR and FAR 219 * when both MMU_NF and probe are set. In the meantime, 220 * do not support this use case. 221 */ 222 assert(!probe); 223 224 address &= TARGET_PAGE_MASK; 225 error_code = get_physical_address(env, &paddr, &prot, &access_index, &attrs, 226 address, access_type, 227 mmu_idx, &page_size); 228 vaddr = address; 229 if (likely(error_code == 0)) { 230 qemu_log_mask(CPU_LOG_MMU, 231 "Translate at %" VADDR_PRIx " -> " 232 TARGET_FMT_plx ", vaddr " TARGET_FMT_lx "\n", 233 address, paddr, vaddr); 234 tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size); 235 return true; 236 } 237 238 if (env->mmuregs[3]) { /* Fault status register */ 239 env->mmuregs[3] = 1; /* overflow (not read before another fault) */ 240 } 241 env->mmuregs[3] |= (access_index << 5) | error_code | 2; 242 env->mmuregs[4] = address; /* Fault address register */ 243 244 if ((env->mmuregs[0] & MMU_NF) || env->psret == 0) { 245 /* No fault mode: if a mapping is available, just override 246 permissions. If no mapping is available, redirect accesses to 247 neverland. Fake/overridden mappings will be flushed when 248 switching to normal mode. */ 249 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 250 tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE); 251 return true; 252 } else { 253 if (access_type == MMU_INST_FETCH) { 254 cs->exception_index = TT_TFAULT; 255 } else { 256 cs->exception_index = TT_DFAULT; 257 } 258 cpu_loop_exit_restore(cs, retaddr); 259 } 260 } 261 262 target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev) 263 { 264 CPUState *cs = env_cpu(env); 265 hwaddr pde_ptr; 266 uint32_t pde; 267 MemTxResult result; 268 269 /* 270 * TODO: MMU probe operations are supposed to set the fault 271 * status registers, but we don't do this. 272 */ 273 274 /* Context base + context number */ 275 pde_ptr = (hwaddr)(env->mmuregs[1] << 4) + 276 (env->mmuregs[2] << 2); 277 pde = address_space_ldl(cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result); 278 if (result != MEMTX_OK) { 279 return 0; 280 } 281 282 switch (pde & PTE_ENTRYTYPE_MASK) { 283 default: 284 case 0: /* Invalid */ 285 case 2: /* PTE, maybe should not happen? */ 286 case 3: /* Reserved */ 287 return 0; 288 case 1: /* L1 PDE */ 289 if (mmulev == 3) { 290 return pde; 291 } 292 pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); 293 pde = address_space_ldl(cs->as, pde_ptr, 294 MEMTXATTRS_UNSPECIFIED, &result); 295 if (result != MEMTX_OK) { 296 return 0; 297 } 298 299 switch (pde & PTE_ENTRYTYPE_MASK) { 300 default: 301 case 0: /* Invalid */ 302 case 3: /* Reserved */ 303 return 0; 304 case 2: /* L1 PTE */ 305 return pde; 306 case 1: /* L2 PDE */ 307 if (mmulev == 2) { 308 return pde; 309 } 310 pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); 311 pde = address_space_ldl(cs->as, pde_ptr, 312 MEMTXATTRS_UNSPECIFIED, &result); 313 if (result != MEMTX_OK) { 314 return 0; 315 } 316 317 switch (pde & PTE_ENTRYTYPE_MASK) { 318 default: 319 case 0: /* Invalid */ 320 case 3: /* Reserved */ 321 return 0; 322 case 2: /* L2 PTE */ 323 return pde; 324 case 1: /* L3 PDE */ 325 if (mmulev == 1) { 326 return pde; 327 } 328 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); 329 pde = address_space_ldl(cs->as, pde_ptr, 330 MEMTXATTRS_UNSPECIFIED, &result); 331 if (result != MEMTX_OK) { 332 return 0; 333 } 334 335 switch (pde & PTE_ENTRYTYPE_MASK) { 336 default: 337 case 0: /* Invalid */ 338 case 1: /* PDE, should not happen */ 339 case 3: /* Reserved */ 340 return 0; 341 case 2: /* L3 PTE */ 342 return pde; 343 } 344 } 345 } 346 } 347 return 0; 348 } 349 350 void dump_mmu(CPUSPARCState *env) 351 { 352 CPUState *cs = env_cpu(env); 353 target_ulong va, va1, va2; 354 unsigned int n, m, o; 355 hwaddr pa; 356 uint32_t pde; 357 358 qemu_printf("Root ptr: " TARGET_FMT_plx ", ctx: %d\n", 359 (hwaddr)env->mmuregs[1] << 4, env->mmuregs[2]); 360 for (n = 0, va = 0; n < 256; n++, va += 16 * 1024 * 1024) { 361 pde = mmu_probe(env, va, 2); 362 if (pde) { 363 pa = cpu_get_phys_page_debug(cs, va); 364 qemu_printf("VA: " TARGET_FMT_lx ", PA: " TARGET_FMT_plx 365 " PDE: " TARGET_FMT_lx "\n", va, pa, pde); 366 for (m = 0, va1 = va; m < 64; m++, va1 += 256 * 1024) { 367 pde = mmu_probe(env, va1, 1); 368 if (pde) { 369 pa = cpu_get_phys_page_debug(cs, va1); 370 qemu_printf(" VA: " TARGET_FMT_lx ", PA: " 371 TARGET_FMT_plx " PDE: " TARGET_FMT_lx "\n", 372 va1, pa, pde); 373 for (o = 0, va2 = va1; o < 64; o++, va2 += 4 * 1024) { 374 pde = mmu_probe(env, va2, 0); 375 if (pde) { 376 pa = cpu_get_phys_page_debug(cs, va2); 377 qemu_printf(" VA: " TARGET_FMT_lx ", PA: " 378 TARGET_FMT_plx " PTE: " 379 TARGET_FMT_lx "\n", 380 va2, pa, pde); 381 } 382 } 383 } 384 } 385 } 386 } 387 } 388 389 /* Gdb expects all registers windows to be flushed in ram. This function handles 390 * reads (and only reads) in stack frames as if windows were flushed. We assume 391 * that the sparc ABI is followed. 392 */ 393 int sparc_cpu_memory_rw_debug(CPUState *cs, vaddr address, 394 uint8_t *buf, int len, bool is_write) 395 { 396 SPARCCPU *cpu = SPARC_CPU(cs); 397 CPUSPARCState *env = &cpu->env; 398 target_ulong addr = address; 399 int i; 400 int len1; 401 int cwp = env->cwp; 402 403 if (!is_write) { 404 for (i = 0; i < env->nwindows; i++) { 405 int off; 406 target_ulong fp = env->regbase[cwp * 16 + 22]; 407 408 /* Assume fp == 0 means end of frame. */ 409 if (fp == 0) { 410 break; 411 } 412 413 cwp = cpu_cwp_inc(env, cwp + 1); 414 415 /* Invalid window ? */ 416 if (env->wim & (1 << cwp)) { 417 break; 418 } 419 420 /* According to the ABI, the stack is growing downward. */ 421 if (addr + len < fp) { 422 break; 423 } 424 425 /* Not in this frame. */ 426 if (addr > fp + 64) { 427 continue; 428 } 429 430 /* Handle access before this window. */ 431 if (addr < fp) { 432 len1 = fp - addr; 433 if (cpu_memory_rw_debug(cs, addr, buf, len1, is_write) != 0) { 434 return -1; 435 } 436 addr += len1; 437 len -= len1; 438 buf += len1; 439 } 440 441 /* Access byte per byte to registers. Not very efficient but speed 442 * is not critical. 443 */ 444 off = addr - fp; 445 len1 = 64 - off; 446 447 if (len1 > len) { 448 len1 = len; 449 } 450 451 for (; len1; len1--) { 452 int reg = cwp * 16 + 8 + (off >> 2); 453 union { 454 uint32_t v; 455 uint8_t c[4]; 456 } u; 457 u.v = cpu_to_be32(env->regbase[reg]); 458 *buf++ = u.c[off & 3]; 459 addr++; 460 len--; 461 off++; 462 } 463 464 if (len == 0) { 465 return 0; 466 } 467 } 468 } 469 return cpu_memory_rw_debug(cs, addr, buf, len, is_write); 470 } 471 472 #else /* !TARGET_SPARC64 */ 473 474 /* 41 bit physical address space */ 475 static inline hwaddr ultrasparc_truncate_physical(uint64_t x) 476 { 477 return x & 0x1ffffffffffULL; 478 } 479 480 /* 481 * UltraSparc IIi I/DMMUs 482 */ 483 484 /* Returns true if TTE tag is valid and matches virtual address value 485 in context requires virtual address mask value calculated from TTE 486 entry size */ 487 static inline int ultrasparc_tag_match(SparcTLBEntry *tlb, 488 uint64_t address, uint64_t context, 489 hwaddr *physical) 490 { 491 uint64_t mask = -(8192ULL << 3 * TTE_PGSIZE(tlb->tte)); 492 493 /* valid, context match, virtual address match? */ 494 if (TTE_IS_VALID(tlb->tte) && 495 (TTE_IS_GLOBAL(tlb->tte) || tlb_compare_context(tlb, context)) 496 && compare_masked(address, tlb->tag, mask)) { 497 /* decode physical address */ 498 *physical = ((tlb->tte & mask) | (address & ~mask)) & 0x1ffffffe000ULL; 499 return 1; 500 } 501 502 return 0; 503 } 504 505 static uint64_t build_sfsr(CPUSPARCState *env, int mmu_idx, int rw) 506 { 507 uint64_t sfsr = SFSR_VALID_BIT; 508 509 switch (mmu_idx) { 510 case MMU_PHYS_IDX: 511 sfsr |= SFSR_CT_NOTRANS; 512 break; 513 case MMU_USER_IDX: 514 case MMU_KERNEL_IDX: 515 sfsr |= SFSR_CT_PRIMARY; 516 break; 517 case MMU_USER_SECONDARY_IDX: 518 case MMU_KERNEL_SECONDARY_IDX: 519 sfsr |= SFSR_CT_SECONDARY; 520 break; 521 case MMU_NUCLEUS_IDX: 522 sfsr |= SFSR_CT_NUCLEUS; 523 break; 524 default: 525 g_assert_not_reached(); 526 } 527 528 if (rw == 1) { 529 sfsr |= SFSR_WRITE_BIT; 530 } else if (rw == 4) { 531 sfsr |= SFSR_NF_BIT; 532 } 533 534 if (env->pstate & PS_PRIV) { 535 sfsr |= SFSR_PR_BIT; 536 } 537 538 if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */ 539 sfsr |= SFSR_OW_BIT; /* overflow (not read before another fault) */ 540 } 541 542 /* FIXME: ASI field in SFSR must be set */ 543 544 return sfsr; 545 } 546 547 static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical, 548 int *prot, MemTxAttrs *attrs, 549 target_ulong address, int rw, int mmu_idx) 550 { 551 CPUState *cs = env_cpu(env); 552 unsigned int i; 553 uint64_t sfsr; 554 uint64_t context; 555 bool is_user = false; 556 557 sfsr = build_sfsr(env, mmu_idx, rw); 558 559 switch (mmu_idx) { 560 case MMU_PHYS_IDX: 561 g_assert_not_reached(); 562 case MMU_USER_IDX: 563 is_user = true; 564 /* fallthru */ 565 case MMU_KERNEL_IDX: 566 context = env->dmmu.mmu_primary_context & 0x1fff; 567 break; 568 case MMU_USER_SECONDARY_IDX: 569 is_user = true; 570 /* fallthru */ 571 case MMU_KERNEL_SECONDARY_IDX: 572 context = env->dmmu.mmu_secondary_context & 0x1fff; 573 break; 574 default: 575 context = 0; 576 break; 577 } 578 579 for (i = 0; i < 64; i++) { 580 /* ctx match, vaddr match, valid? */ 581 if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) { 582 int do_fault = 0; 583 584 if (TTE_IS_IE(env->dtlb[i].tte)) { 585 attrs->byte_swap = true; 586 } 587 588 /* access ok? */ 589 /* multiple bits in SFSR.FT may be set on TT_DFAULT */ 590 if (TTE_IS_PRIV(env->dtlb[i].tte) && is_user) { 591 do_fault = 1; 592 sfsr |= SFSR_FT_PRIV_BIT; /* privilege violation */ 593 trace_mmu_helper_dfault(address, context, mmu_idx, env->tl); 594 } 595 if (rw == 4) { 596 if (TTE_IS_SIDEEFFECT(env->dtlb[i].tte)) { 597 do_fault = 1; 598 sfsr |= SFSR_FT_NF_E_BIT; 599 } 600 } else { 601 if (TTE_IS_NFO(env->dtlb[i].tte)) { 602 do_fault = 1; 603 sfsr |= SFSR_FT_NFO_BIT; 604 } 605 } 606 607 if (do_fault) { 608 /* faults above are reported with TT_DFAULT. */ 609 cs->exception_index = TT_DFAULT; 610 } else if (!TTE_IS_W_OK(env->dtlb[i].tte) && (rw == 1)) { 611 do_fault = 1; 612 cs->exception_index = TT_DPROT; 613 614 trace_mmu_helper_dprot(address, context, mmu_idx, env->tl); 615 } 616 617 if (!do_fault) { 618 *prot = PAGE_READ; 619 if (TTE_IS_W_OK(env->dtlb[i].tte)) { 620 *prot |= PAGE_WRITE; 621 } 622 623 TTE_SET_USED(env->dtlb[i].tte); 624 625 return 0; 626 } 627 628 env->dmmu.sfsr = sfsr; 629 env->dmmu.sfar = address; /* Fault address register */ 630 env->dmmu.tag_access = (address & ~0x1fffULL) | context; 631 return 1; 632 } 633 } 634 635 trace_mmu_helper_dmiss(address, context); 636 637 /* 638 * On MMU misses: 639 * - UltraSPARC IIi: SFSR and SFAR unmodified 640 * - JPS1: SFAR updated and some fields of SFSR updated 641 */ 642 env->dmmu.tag_access = (address & ~0x1fffULL) | context; 643 cs->exception_index = TT_DMISS; 644 return 1; 645 } 646 647 static int get_physical_address_code(CPUSPARCState *env, hwaddr *physical, 648 int *prot, MemTxAttrs *attrs, 649 target_ulong address, int mmu_idx) 650 { 651 CPUState *cs = env_cpu(env); 652 unsigned int i; 653 uint64_t context; 654 bool is_user = false; 655 656 switch (mmu_idx) { 657 case MMU_PHYS_IDX: 658 case MMU_USER_SECONDARY_IDX: 659 case MMU_KERNEL_SECONDARY_IDX: 660 g_assert_not_reached(); 661 case MMU_USER_IDX: 662 is_user = true; 663 /* fallthru */ 664 case MMU_KERNEL_IDX: 665 context = env->dmmu.mmu_primary_context & 0x1fff; 666 break; 667 default: 668 context = 0; 669 break; 670 } 671 672 if (env->tl == 0) { 673 /* PRIMARY context */ 674 context = env->dmmu.mmu_primary_context & 0x1fff; 675 } else { 676 /* NUCLEUS context */ 677 context = 0; 678 } 679 680 for (i = 0; i < 64; i++) { 681 /* ctx match, vaddr match, valid? */ 682 if (ultrasparc_tag_match(&env->itlb[i], 683 address, context, physical)) { 684 /* access ok? */ 685 if (TTE_IS_PRIV(env->itlb[i].tte) && is_user) { 686 /* Fault status register */ 687 if (env->immu.sfsr & SFSR_VALID_BIT) { 688 env->immu.sfsr = SFSR_OW_BIT; /* overflow (not read before 689 another fault) */ 690 } else { 691 env->immu.sfsr = 0; 692 } 693 if (env->pstate & PS_PRIV) { 694 env->immu.sfsr |= SFSR_PR_BIT; 695 } 696 if (env->tl > 0) { 697 env->immu.sfsr |= SFSR_CT_NUCLEUS; 698 } 699 700 /* FIXME: ASI field in SFSR must be set */ 701 env->immu.sfsr |= SFSR_FT_PRIV_BIT | SFSR_VALID_BIT; 702 cs->exception_index = TT_TFAULT; 703 704 env->immu.tag_access = (address & ~0x1fffULL) | context; 705 706 trace_mmu_helper_tfault(address, context); 707 708 return 1; 709 } 710 *prot = PAGE_EXEC; 711 TTE_SET_USED(env->itlb[i].tte); 712 return 0; 713 } 714 } 715 716 trace_mmu_helper_tmiss(address, context); 717 718 /* Context is stored in DMMU (dmmuregs[1]) also for IMMU */ 719 env->immu.tag_access = (address & ~0x1fffULL) | context; 720 cs->exception_index = TT_TMISS; 721 return 1; 722 } 723 724 static int get_physical_address(CPUSPARCState *env, hwaddr *physical, 725 int *prot, int *access_index, MemTxAttrs *attrs, 726 target_ulong address, int rw, int mmu_idx, 727 target_ulong *page_size) 728 { 729 /* ??? We treat everything as a small page, then explicitly flush 730 everything when an entry is evicted. */ 731 *page_size = TARGET_PAGE_SIZE; 732 733 /* safety net to catch wrong softmmu index use from dynamic code */ 734 if (env->tl > 0 && mmu_idx != MMU_NUCLEUS_IDX) { 735 if (rw == 2) { 736 trace_mmu_helper_get_phys_addr_code(env->tl, mmu_idx, 737 env->dmmu.mmu_primary_context, 738 env->dmmu.mmu_secondary_context, 739 address); 740 } else { 741 trace_mmu_helper_get_phys_addr_data(env->tl, mmu_idx, 742 env->dmmu.mmu_primary_context, 743 env->dmmu.mmu_secondary_context, 744 address); 745 } 746 } 747 748 if (mmu_idx == MMU_PHYS_IDX) { 749 *physical = ultrasparc_truncate_physical(address); 750 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 751 return 0; 752 } 753 754 if (rw == 2) { 755 return get_physical_address_code(env, physical, prot, attrs, address, 756 mmu_idx); 757 } else { 758 return get_physical_address_data(env, physical, prot, attrs, address, 759 rw, mmu_idx); 760 } 761 } 762 763 /* Perform address translation */ 764 bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 765 MMUAccessType access_type, int mmu_idx, 766 bool probe, uintptr_t retaddr) 767 { 768 SPARCCPU *cpu = SPARC_CPU(cs); 769 CPUSPARCState *env = &cpu->env; 770 target_ulong vaddr; 771 hwaddr paddr; 772 target_ulong page_size; 773 MemTxAttrs attrs = {}; 774 int error_code = 0, prot, access_index; 775 776 address &= TARGET_PAGE_MASK; 777 error_code = get_physical_address(env, &paddr, &prot, &access_index, &attrs, 778 address, access_type, 779 mmu_idx, &page_size); 780 if (likely(error_code == 0)) { 781 vaddr = address; 782 783 trace_mmu_helper_mmu_fault(address, paddr, mmu_idx, env->tl, 784 env->dmmu.mmu_primary_context, 785 env->dmmu.mmu_secondary_context); 786 787 tlb_set_page_with_attrs(cs, vaddr, paddr, attrs, prot, mmu_idx, 788 page_size); 789 return true; 790 } 791 if (probe) { 792 return false; 793 } 794 cpu_loop_exit_restore(cs, retaddr); 795 } 796 797 void dump_mmu(CPUSPARCState *env) 798 { 799 unsigned int i; 800 const char *mask; 801 802 qemu_printf("MMU contexts: Primary: %" PRId64 ", Secondary: %" 803 PRId64 "\n", 804 env->dmmu.mmu_primary_context, 805 env->dmmu.mmu_secondary_context); 806 qemu_printf("DMMU Tag Access: %" PRIx64 ", TSB Tag Target: %" PRIx64 807 "\n", env->dmmu.tag_access, env->dmmu.tsb_tag_target); 808 if ((env->lsu & DMMU_E) == 0) { 809 qemu_printf("DMMU disabled\n"); 810 } else { 811 qemu_printf("DMMU dump\n"); 812 for (i = 0; i < 64; i++) { 813 switch (TTE_PGSIZE(env->dtlb[i].tte)) { 814 default: 815 case 0x0: 816 mask = " 8k"; 817 break; 818 case 0x1: 819 mask = " 64k"; 820 break; 821 case 0x2: 822 mask = "512k"; 823 break; 824 case 0x3: 825 mask = " 4M"; 826 break; 827 } 828 if (TTE_IS_VALID(env->dtlb[i].tte)) { 829 qemu_printf("[%02u] VA: %" PRIx64 ", PA: %llx" 830 ", %s, %s, %s, %s, ie %s, ctx %" PRId64 " %s\n", 831 i, 832 env->dtlb[i].tag & (uint64_t)~0x1fffULL, 833 TTE_PA(env->dtlb[i].tte), 834 mask, 835 TTE_IS_PRIV(env->dtlb[i].tte) ? "priv" : "user", 836 TTE_IS_W_OK(env->dtlb[i].tte) ? "RW" : "RO", 837 TTE_IS_LOCKED(env->dtlb[i].tte) ? 838 "locked" : "unlocked", 839 TTE_IS_IE(env->dtlb[i].tte) ? 840 "yes" : "no", 841 env->dtlb[i].tag & (uint64_t)0x1fffULL, 842 TTE_IS_GLOBAL(env->dtlb[i].tte) ? 843 "global" : "local"); 844 } 845 } 846 } 847 if ((env->lsu & IMMU_E) == 0) { 848 qemu_printf("IMMU disabled\n"); 849 } else { 850 qemu_printf("IMMU dump\n"); 851 for (i = 0; i < 64; i++) { 852 switch (TTE_PGSIZE(env->itlb[i].tte)) { 853 default: 854 case 0x0: 855 mask = " 8k"; 856 break; 857 case 0x1: 858 mask = " 64k"; 859 break; 860 case 0x2: 861 mask = "512k"; 862 break; 863 case 0x3: 864 mask = " 4M"; 865 break; 866 } 867 if (TTE_IS_VALID(env->itlb[i].tte)) { 868 qemu_printf("[%02u] VA: %" PRIx64 ", PA: %llx" 869 ", %s, %s, %s, ctx %" PRId64 " %s\n", 870 i, 871 env->itlb[i].tag & (uint64_t)~0x1fffULL, 872 TTE_PA(env->itlb[i].tte), 873 mask, 874 TTE_IS_PRIV(env->itlb[i].tte) ? "priv" : "user", 875 TTE_IS_LOCKED(env->itlb[i].tte) ? 876 "locked" : "unlocked", 877 env->itlb[i].tag & (uint64_t)0x1fffULL, 878 TTE_IS_GLOBAL(env->itlb[i].tte) ? 879 "global" : "local"); 880 } 881 } 882 } 883 } 884 885 #endif /* TARGET_SPARC64 */ 886 887 static int cpu_sparc_get_phys_page(CPUSPARCState *env, hwaddr *phys, 888 target_ulong addr, int rw, int mmu_idx) 889 { 890 target_ulong page_size; 891 int prot, access_index; 892 MemTxAttrs attrs = {}; 893 894 return get_physical_address(env, phys, &prot, &access_index, &attrs, addr, 895 rw, mmu_idx, &page_size); 896 } 897 898 #if defined(TARGET_SPARC64) 899 hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr, 900 int mmu_idx) 901 { 902 hwaddr phys_addr; 903 904 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 4, mmu_idx) != 0) { 905 return -1; 906 } 907 return phys_addr; 908 } 909 #endif 910 911 hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 912 { 913 SPARCCPU *cpu = SPARC_CPU(cs); 914 CPUSPARCState *env = &cpu->env; 915 hwaddr phys_addr; 916 int mmu_idx = cpu_mmu_index(env, false); 917 918 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) { 919 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 0, mmu_idx) != 0) { 920 return -1; 921 } 922 } 923 return phys_addr; 924 } 925 926 #ifndef CONFIG_USER_ONLY 927 void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 928 MMUAccessType access_type, 929 int mmu_idx, 930 uintptr_t retaddr) 931 { 932 SPARCCPU *cpu = SPARC_CPU(cs); 933 CPUSPARCState *env = &cpu->env; 934 935 #ifdef TARGET_SPARC64 936 env->dmmu.sfsr = build_sfsr(env, mmu_idx, access_type); 937 env->dmmu.sfar = addr; 938 #else 939 env->mmuregs[4] = addr; 940 #endif 941 942 cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr); 943 } 944 #endif /* !CONFIG_USER_ONLY */ 945