1 /* 2 * Sparc MMU helpers 3 * 4 * Copyright (c) 2003-2005 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/log.h" 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "qemu/qemu-print.h" 25 #include "trace.h" 26 27 /* Sparc MMU emulation */ 28 29 #ifndef TARGET_SPARC64 30 /* 31 * Sparc V8 Reference MMU (SRMMU) 32 */ 33 static const int access_table[8][8] = { 34 { 0, 0, 0, 0, 8, 0, 12, 12 }, 35 { 0, 0, 0, 0, 8, 0, 0, 0 }, 36 { 8, 8, 0, 0, 0, 8, 12, 12 }, 37 { 8, 8, 0, 0, 0, 8, 0, 0 }, 38 { 8, 0, 8, 0, 8, 8, 12, 12 }, 39 { 8, 0, 8, 0, 8, 0, 8, 0 }, 40 { 8, 8, 8, 0, 8, 8, 12, 12 }, 41 { 8, 8, 8, 0, 8, 8, 8, 0 } 42 }; 43 44 static const int perm_table[2][8] = { 45 { 46 PAGE_READ, 47 PAGE_READ | PAGE_WRITE, 48 PAGE_READ | PAGE_EXEC, 49 PAGE_READ | PAGE_WRITE | PAGE_EXEC, 50 PAGE_EXEC, 51 PAGE_READ | PAGE_WRITE, 52 PAGE_READ | PAGE_EXEC, 53 PAGE_READ | PAGE_WRITE | PAGE_EXEC 54 }, 55 { 56 PAGE_READ, 57 PAGE_READ | PAGE_WRITE, 58 PAGE_READ | PAGE_EXEC, 59 PAGE_READ | PAGE_WRITE | PAGE_EXEC, 60 PAGE_EXEC, 61 PAGE_READ, 62 0, 63 0, 64 } 65 }; 66 67 static int get_physical_address(CPUSPARCState *env, hwaddr *physical, 68 int *prot, int *access_index, MemTxAttrs *attrs, 69 target_ulong address, int rw, int mmu_idx, 70 target_ulong *page_size) 71 { 72 int access_perms = 0; 73 hwaddr pde_ptr; 74 uint32_t pde; 75 int error_code = 0, is_dirty, is_user; 76 unsigned long page_offset; 77 CPUState *cs = env_cpu(env); 78 MemTxResult result; 79 80 is_user = mmu_idx == MMU_USER_IDX; 81 82 if (mmu_idx == MMU_PHYS_IDX) { 83 *page_size = TARGET_PAGE_SIZE; 84 /* Boot mode: instruction fetches are taken from PROM */ 85 if (rw == 2 && (env->mmuregs[0] & env->def.mmu_bm)) { 86 *physical = env->prom_addr | (address & 0x7ffffULL); 87 *prot = PAGE_READ | PAGE_EXEC; 88 return 0; 89 } 90 *physical = address; 91 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 92 return 0; 93 } 94 95 *access_index = ((rw & 1) << 2) | (rw & 2) | (is_user ? 0 : 1); 96 *physical = 0xffffffffffff0000ULL; 97 98 /* SPARC reference MMU table walk: Context table->L1->L2->PTE */ 99 /* Context base + context number */ 100 pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2); 101 pde = address_space_ldl(cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result); 102 if (result != MEMTX_OK) { 103 return 4 << 2; /* Translation fault, L = 0 */ 104 } 105 106 /* Ctx pde */ 107 switch (pde & PTE_ENTRYTYPE_MASK) { 108 default: 109 case 0: /* Invalid */ 110 return 1 << 2; 111 case 2: /* L0 PTE, maybe should not happen? */ 112 case 3: /* Reserved */ 113 return 4 << 2; 114 case 1: /* L0 PDE */ 115 pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); 116 pde = address_space_ldl(cs->as, pde_ptr, 117 MEMTXATTRS_UNSPECIFIED, &result); 118 if (result != MEMTX_OK) { 119 return (1 << 8) | (4 << 2); /* Translation fault, L = 1 */ 120 } 121 122 switch (pde & PTE_ENTRYTYPE_MASK) { 123 default: 124 case 0: /* Invalid */ 125 return (1 << 8) | (1 << 2); 126 case 3: /* Reserved */ 127 return (1 << 8) | (4 << 2); 128 case 1: /* L1 PDE */ 129 pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); 130 pde = address_space_ldl(cs->as, pde_ptr, 131 MEMTXATTRS_UNSPECIFIED, &result); 132 if (result != MEMTX_OK) { 133 return (2 << 8) | (4 << 2); /* Translation fault, L = 2 */ 134 } 135 136 switch (pde & PTE_ENTRYTYPE_MASK) { 137 default: 138 case 0: /* Invalid */ 139 return (2 << 8) | (1 << 2); 140 case 3: /* Reserved */ 141 return (2 << 8) | (4 << 2); 142 case 1: /* L2 PDE */ 143 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); 144 pde = address_space_ldl(cs->as, pde_ptr, 145 MEMTXATTRS_UNSPECIFIED, &result); 146 if (result != MEMTX_OK) { 147 return (3 << 8) | (4 << 2); /* Translation fault, L = 3 */ 148 } 149 150 switch (pde & PTE_ENTRYTYPE_MASK) { 151 default: 152 case 0: /* Invalid */ 153 return (3 << 8) | (1 << 2); 154 case 1: /* PDE, should not happen */ 155 case 3: /* Reserved */ 156 return (3 << 8) | (4 << 2); 157 case 2: /* L3 PTE */ 158 page_offset = 0; 159 } 160 *page_size = TARGET_PAGE_SIZE; 161 break; 162 case 2: /* L2 PTE */ 163 page_offset = address & 0x3f000; 164 *page_size = 0x40000; 165 } 166 break; 167 case 2: /* L1 PTE */ 168 page_offset = address & 0xfff000; 169 *page_size = 0x1000000; 170 } 171 } 172 173 /* check access */ 174 access_perms = (pde & PTE_ACCESS_MASK) >> PTE_ACCESS_SHIFT; 175 error_code = access_table[*access_index][access_perms]; 176 if (error_code && !((env->mmuregs[0] & MMU_NF) && is_user)) { 177 return error_code; 178 } 179 180 /* update page modified and dirty bits */ 181 is_dirty = (rw & 1) && !(pde & PG_MODIFIED_MASK); 182 if (!(pde & PG_ACCESSED_MASK) || is_dirty) { 183 pde |= PG_ACCESSED_MASK; 184 if (is_dirty) { 185 pde |= PG_MODIFIED_MASK; 186 } 187 stl_phys_notdirty(cs->as, pde_ptr, pde); 188 } 189 190 /* the page can be put in the TLB */ 191 *prot = perm_table[is_user][access_perms]; 192 if (!(pde & PG_MODIFIED_MASK)) { 193 /* only set write access if already dirty... otherwise wait 194 for dirty access */ 195 *prot &= ~PAGE_WRITE; 196 } 197 198 /* Even if large ptes, we map only one 4KB page in the cache to 199 avoid filling it too fast */ 200 *physical = ((hwaddr)(pde & PTE_ADDR_MASK) << 4) + page_offset; 201 return error_code; 202 } 203 204 /* Perform address translation */ 205 bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 206 MMUAccessType access_type, int mmu_idx, 207 bool probe, uintptr_t retaddr) 208 { 209 SPARCCPU *cpu = SPARC_CPU(cs); 210 CPUSPARCState *env = &cpu->env; 211 hwaddr paddr; 212 target_ulong vaddr; 213 target_ulong page_size; 214 int error_code = 0, prot, access_index; 215 MemTxAttrs attrs = {}; 216 217 /* 218 * TODO: If we ever need tlb_vaddr_to_host for this target, 219 * then we must figure out how to manipulate FSR and FAR 220 * when both MMU_NF and probe are set. In the meantime, 221 * do not support this use case. 222 */ 223 assert(!probe); 224 225 address &= TARGET_PAGE_MASK; 226 error_code = get_physical_address(env, &paddr, &prot, &access_index, &attrs, 227 address, access_type, 228 mmu_idx, &page_size); 229 vaddr = address; 230 if (likely(error_code == 0)) { 231 qemu_log_mask(CPU_LOG_MMU, 232 "Translate at %" VADDR_PRIx " -> " 233 TARGET_FMT_plx ", vaddr " TARGET_FMT_lx "\n", 234 address, paddr, vaddr); 235 tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size); 236 return true; 237 } 238 239 if (env->mmuregs[3]) { /* Fault status register */ 240 env->mmuregs[3] = 1; /* overflow (not read before another fault) */ 241 } 242 env->mmuregs[3] |= (access_index << 5) | error_code | 2; 243 env->mmuregs[4] = address; /* Fault address register */ 244 245 if ((env->mmuregs[0] & MMU_NF) || env->psret == 0) { 246 /* No fault mode: if a mapping is available, just override 247 permissions. If no mapping is available, redirect accesses to 248 neverland. Fake/overridden mappings will be flushed when 249 switching to normal mode. */ 250 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 251 tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE); 252 return true; 253 } else { 254 if (access_type == MMU_INST_FETCH) { 255 cs->exception_index = TT_TFAULT; 256 } else { 257 cs->exception_index = TT_DFAULT; 258 } 259 cpu_loop_exit_restore(cs, retaddr); 260 } 261 } 262 263 target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev) 264 { 265 CPUState *cs = env_cpu(env); 266 hwaddr pde_ptr; 267 uint32_t pde; 268 MemTxResult result; 269 270 /* 271 * TODO: MMU probe operations are supposed to set the fault 272 * status registers, but we don't do this. 273 */ 274 275 /* Context base + context number */ 276 pde_ptr = (hwaddr)(env->mmuregs[1] << 4) + 277 (env->mmuregs[2] << 2); 278 pde = address_space_ldl(cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result); 279 if (result != MEMTX_OK) { 280 return 0; 281 } 282 283 switch (pde & PTE_ENTRYTYPE_MASK) { 284 default: 285 case 0: /* Invalid */ 286 case 2: /* PTE, maybe should not happen? */ 287 case 3: /* Reserved */ 288 return 0; 289 case 1: /* L1 PDE */ 290 if (mmulev == 3) { 291 return pde; 292 } 293 pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); 294 pde = address_space_ldl(cs->as, pde_ptr, 295 MEMTXATTRS_UNSPECIFIED, &result); 296 if (result != MEMTX_OK) { 297 return 0; 298 } 299 300 switch (pde & PTE_ENTRYTYPE_MASK) { 301 default: 302 case 0: /* Invalid */ 303 case 3: /* Reserved */ 304 return 0; 305 case 2: /* L1 PTE */ 306 return pde; 307 case 1: /* L2 PDE */ 308 if (mmulev == 2) { 309 return pde; 310 } 311 pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); 312 pde = address_space_ldl(cs->as, pde_ptr, 313 MEMTXATTRS_UNSPECIFIED, &result); 314 if (result != MEMTX_OK) { 315 return 0; 316 } 317 318 switch (pde & PTE_ENTRYTYPE_MASK) { 319 default: 320 case 0: /* Invalid */ 321 case 3: /* Reserved */ 322 return 0; 323 case 2: /* L2 PTE */ 324 return pde; 325 case 1: /* L3 PDE */ 326 if (mmulev == 1) { 327 return pde; 328 } 329 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); 330 pde = address_space_ldl(cs->as, pde_ptr, 331 MEMTXATTRS_UNSPECIFIED, &result); 332 if (result != MEMTX_OK) { 333 return 0; 334 } 335 336 switch (pde & PTE_ENTRYTYPE_MASK) { 337 default: 338 case 0: /* Invalid */ 339 case 1: /* PDE, should not happen */ 340 case 3: /* Reserved */ 341 return 0; 342 case 2: /* L3 PTE */ 343 return pde; 344 } 345 } 346 } 347 } 348 return 0; 349 } 350 351 void dump_mmu(CPUSPARCState *env) 352 { 353 CPUState *cs = env_cpu(env); 354 target_ulong va, va1, va2; 355 unsigned int n, m, o; 356 hwaddr pa; 357 uint32_t pde; 358 359 qemu_printf("Root ptr: " TARGET_FMT_plx ", ctx: %d\n", 360 (hwaddr)env->mmuregs[1] << 4, env->mmuregs[2]); 361 for (n = 0, va = 0; n < 256; n++, va += 16 * 1024 * 1024) { 362 pde = mmu_probe(env, va, 2); 363 if (pde) { 364 pa = cpu_get_phys_page_debug(cs, va); 365 qemu_printf("VA: " TARGET_FMT_lx ", PA: " TARGET_FMT_plx 366 " PDE: " TARGET_FMT_lx "\n", va, pa, pde); 367 for (m = 0, va1 = va; m < 64; m++, va1 += 256 * 1024) { 368 pde = mmu_probe(env, va1, 1); 369 if (pde) { 370 pa = cpu_get_phys_page_debug(cs, va1); 371 qemu_printf(" VA: " TARGET_FMT_lx ", PA: " 372 TARGET_FMT_plx " PDE: " TARGET_FMT_lx "\n", 373 va1, pa, pde); 374 for (o = 0, va2 = va1; o < 64; o++, va2 += 4 * 1024) { 375 pde = mmu_probe(env, va2, 0); 376 if (pde) { 377 pa = cpu_get_phys_page_debug(cs, va2); 378 qemu_printf(" VA: " TARGET_FMT_lx ", PA: " 379 TARGET_FMT_plx " PTE: " 380 TARGET_FMT_lx "\n", 381 va2, pa, pde); 382 } 383 } 384 } 385 } 386 } 387 } 388 } 389 390 /* Gdb expects all registers windows to be flushed in ram. This function handles 391 * reads (and only reads) in stack frames as if windows were flushed. We assume 392 * that the sparc ABI is followed. 393 */ 394 int sparc_cpu_memory_rw_debug(CPUState *cs, vaddr address, 395 uint8_t *buf, int len, bool is_write) 396 { 397 SPARCCPU *cpu = SPARC_CPU(cs); 398 CPUSPARCState *env = &cpu->env; 399 target_ulong addr = address; 400 int i; 401 int len1; 402 int cwp = env->cwp; 403 404 if (!is_write) { 405 for (i = 0; i < env->nwindows; i++) { 406 int off; 407 target_ulong fp = env->regbase[cwp * 16 + 22]; 408 409 /* Assume fp == 0 means end of frame. */ 410 if (fp == 0) { 411 break; 412 } 413 414 cwp = cpu_cwp_inc(env, cwp + 1); 415 416 /* Invalid window ? */ 417 if (env->wim & (1 << cwp)) { 418 break; 419 } 420 421 /* According to the ABI, the stack is growing downward. */ 422 if (addr + len < fp) { 423 break; 424 } 425 426 /* Not in this frame. */ 427 if (addr > fp + 64) { 428 continue; 429 } 430 431 /* Handle access before this window. */ 432 if (addr < fp) { 433 len1 = fp - addr; 434 if (cpu_memory_rw_debug(cs, addr, buf, len1, is_write) != 0) { 435 return -1; 436 } 437 addr += len1; 438 len -= len1; 439 buf += len1; 440 } 441 442 /* Access byte per byte to registers. Not very efficient but speed 443 * is not critical. 444 */ 445 off = addr - fp; 446 len1 = 64 - off; 447 448 if (len1 > len) { 449 len1 = len; 450 } 451 452 for (; len1; len1--) { 453 int reg = cwp * 16 + 8 + (off >> 2); 454 union { 455 uint32_t v; 456 uint8_t c[4]; 457 } u; 458 u.v = cpu_to_be32(env->regbase[reg]); 459 *buf++ = u.c[off & 3]; 460 addr++; 461 len--; 462 off++; 463 } 464 465 if (len == 0) { 466 return 0; 467 } 468 } 469 } 470 return cpu_memory_rw_debug(cs, addr, buf, len, is_write); 471 } 472 473 #else /* !TARGET_SPARC64 */ 474 475 /* 41 bit physical address space */ 476 static inline hwaddr ultrasparc_truncate_physical(uint64_t x) 477 { 478 return x & 0x1ffffffffffULL; 479 } 480 481 /* 482 * UltraSparc IIi I/DMMUs 483 */ 484 485 /* Returns true if TTE tag is valid and matches virtual address value 486 in context requires virtual address mask value calculated from TTE 487 entry size */ 488 static inline int ultrasparc_tag_match(SparcTLBEntry *tlb, 489 uint64_t address, uint64_t context, 490 hwaddr *physical) 491 { 492 uint64_t mask = -(8192ULL << 3 * TTE_PGSIZE(tlb->tte)); 493 494 /* valid, context match, virtual address match? */ 495 if (TTE_IS_VALID(tlb->tte) && 496 (TTE_IS_GLOBAL(tlb->tte) || tlb_compare_context(tlb, context)) 497 && compare_masked(address, tlb->tag, mask)) { 498 /* decode physical address */ 499 *physical = ((tlb->tte & mask) | (address & ~mask)) & 0x1ffffffe000ULL; 500 return 1; 501 } 502 503 return 0; 504 } 505 506 static uint64_t build_sfsr(CPUSPARCState *env, int mmu_idx, int rw) 507 { 508 uint64_t sfsr = SFSR_VALID_BIT; 509 510 switch (mmu_idx) { 511 case MMU_PHYS_IDX: 512 sfsr |= SFSR_CT_NOTRANS; 513 break; 514 case MMU_USER_IDX: 515 case MMU_KERNEL_IDX: 516 sfsr |= SFSR_CT_PRIMARY; 517 break; 518 case MMU_USER_SECONDARY_IDX: 519 case MMU_KERNEL_SECONDARY_IDX: 520 sfsr |= SFSR_CT_SECONDARY; 521 break; 522 case MMU_NUCLEUS_IDX: 523 sfsr |= SFSR_CT_NUCLEUS; 524 break; 525 default: 526 g_assert_not_reached(); 527 } 528 529 if (rw == 1) { 530 sfsr |= SFSR_WRITE_BIT; 531 } else if (rw == 4) { 532 sfsr |= SFSR_NF_BIT; 533 } 534 535 if (env->pstate & PS_PRIV) { 536 sfsr |= SFSR_PR_BIT; 537 } 538 539 if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */ 540 sfsr |= SFSR_OW_BIT; /* overflow (not read before another fault) */ 541 } 542 543 /* FIXME: ASI field in SFSR must be set */ 544 545 return sfsr; 546 } 547 548 static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical, 549 int *prot, MemTxAttrs *attrs, 550 target_ulong address, int rw, int mmu_idx) 551 { 552 CPUState *cs = env_cpu(env); 553 unsigned int i; 554 uint64_t sfsr; 555 uint64_t context; 556 bool is_user = false; 557 558 sfsr = build_sfsr(env, mmu_idx, rw); 559 560 switch (mmu_idx) { 561 case MMU_PHYS_IDX: 562 g_assert_not_reached(); 563 case MMU_USER_IDX: 564 is_user = true; 565 /* fallthru */ 566 case MMU_KERNEL_IDX: 567 context = env->dmmu.mmu_primary_context & 0x1fff; 568 break; 569 case MMU_USER_SECONDARY_IDX: 570 is_user = true; 571 /* fallthru */ 572 case MMU_KERNEL_SECONDARY_IDX: 573 context = env->dmmu.mmu_secondary_context & 0x1fff; 574 break; 575 default: 576 context = 0; 577 break; 578 } 579 580 for (i = 0; i < 64; i++) { 581 /* ctx match, vaddr match, valid? */ 582 if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) { 583 int do_fault = 0; 584 585 if (TTE_IS_IE(env->dtlb[i].tte)) { 586 attrs->byte_swap = true; 587 } 588 589 /* access ok? */ 590 /* multiple bits in SFSR.FT may be set on TT_DFAULT */ 591 if (TTE_IS_PRIV(env->dtlb[i].tte) && is_user) { 592 do_fault = 1; 593 sfsr |= SFSR_FT_PRIV_BIT; /* privilege violation */ 594 trace_mmu_helper_dfault(address, context, mmu_idx, env->tl); 595 } 596 if (rw == 4) { 597 if (TTE_IS_SIDEEFFECT(env->dtlb[i].tte)) { 598 do_fault = 1; 599 sfsr |= SFSR_FT_NF_E_BIT; 600 } 601 } else { 602 if (TTE_IS_NFO(env->dtlb[i].tte)) { 603 do_fault = 1; 604 sfsr |= SFSR_FT_NFO_BIT; 605 } 606 } 607 608 if (do_fault) { 609 /* faults above are reported with TT_DFAULT. */ 610 cs->exception_index = TT_DFAULT; 611 } else if (!TTE_IS_W_OK(env->dtlb[i].tte) && (rw == 1)) { 612 do_fault = 1; 613 cs->exception_index = TT_DPROT; 614 615 trace_mmu_helper_dprot(address, context, mmu_idx, env->tl); 616 } 617 618 if (!do_fault) { 619 *prot = PAGE_READ; 620 if (TTE_IS_W_OK(env->dtlb[i].tte)) { 621 *prot |= PAGE_WRITE; 622 } 623 624 TTE_SET_USED(env->dtlb[i].tte); 625 626 return 0; 627 } 628 629 env->dmmu.sfsr = sfsr; 630 env->dmmu.sfar = address; /* Fault address register */ 631 env->dmmu.tag_access = (address & ~0x1fffULL) | context; 632 return 1; 633 } 634 } 635 636 trace_mmu_helper_dmiss(address, context); 637 638 /* 639 * On MMU misses: 640 * - UltraSPARC IIi: SFSR and SFAR unmodified 641 * - JPS1: SFAR updated and some fields of SFSR updated 642 */ 643 env->dmmu.tag_access = (address & ~0x1fffULL) | context; 644 cs->exception_index = TT_DMISS; 645 return 1; 646 } 647 648 static int get_physical_address_code(CPUSPARCState *env, hwaddr *physical, 649 int *prot, MemTxAttrs *attrs, 650 target_ulong address, int mmu_idx) 651 { 652 CPUState *cs = env_cpu(env); 653 unsigned int i; 654 uint64_t context; 655 bool is_user = false; 656 657 switch (mmu_idx) { 658 case MMU_PHYS_IDX: 659 case MMU_USER_SECONDARY_IDX: 660 case MMU_KERNEL_SECONDARY_IDX: 661 g_assert_not_reached(); 662 case MMU_USER_IDX: 663 is_user = true; 664 /* fallthru */ 665 case MMU_KERNEL_IDX: 666 context = env->dmmu.mmu_primary_context & 0x1fff; 667 break; 668 default: 669 context = 0; 670 break; 671 } 672 673 if (env->tl == 0) { 674 /* PRIMARY context */ 675 context = env->dmmu.mmu_primary_context & 0x1fff; 676 } else { 677 /* NUCLEUS context */ 678 context = 0; 679 } 680 681 for (i = 0; i < 64; i++) { 682 /* ctx match, vaddr match, valid? */ 683 if (ultrasparc_tag_match(&env->itlb[i], 684 address, context, physical)) { 685 /* access ok? */ 686 if (TTE_IS_PRIV(env->itlb[i].tte) && is_user) { 687 /* Fault status register */ 688 if (env->immu.sfsr & SFSR_VALID_BIT) { 689 env->immu.sfsr = SFSR_OW_BIT; /* overflow (not read before 690 another fault) */ 691 } else { 692 env->immu.sfsr = 0; 693 } 694 if (env->pstate & PS_PRIV) { 695 env->immu.sfsr |= SFSR_PR_BIT; 696 } 697 if (env->tl > 0) { 698 env->immu.sfsr |= SFSR_CT_NUCLEUS; 699 } 700 701 /* FIXME: ASI field in SFSR must be set */ 702 env->immu.sfsr |= SFSR_FT_PRIV_BIT | SFSR_VALID_BIT; 703 cs->exception_index = TT_TFAULT; 704 705 env->immu.tag_access = (address & ~0x1fffULL) | context; 706 707 trace_mmu_helper_tfault(address, context); 708 709 return 1; 710 } 711 *prot = PAGE_EXEC; 712 TTE_SET_USED(env->itlb[i].tte); 713 return 0; 714 } 715 } 716 717 trace_mmu_helper_tmiss(address, context); 718 719 /* Context is stored in DMMU (dmmuregs[1]) also for IMMU */ 720 env->immu.tag_access = (address & ~0x1fffULL) | context; 721 cs->exception_index = TT_TMISS; 722 return 1; 723 } 724 725 static int get_physical_address(CPUSPARCState *env, hwaddr *physical, 726 int *prot, int *access_index, MemTxAttrs *attrs, 727 target_ulong address, int rw, int mmu_idx, 728 target_ulong *page_size) 729 { 730 /* ??? We treat everything as a small page, then explicitly flush 731 everything when an entry is evicted. */ 732 *page_size = TARGET_PAGE_SIZE; 733 734 /* safety net to catch wrong softmmu index use from dynamic code */ 735 if (env->tl > 0 && mmu_idx != MMU_NUCLEUS_IDX) { 736 if (rw == 2) { 737 trace_mmu_helper_get_phys_addr_code(env->tl, mmu_idx, 738 env->dmmu.mmu_primary_context, 739 env->dmmu.mmu_secondary_context, 740 address); 741 } else { 742 trace_mmu_helper_get_phys_addr_data(env->tl, mmu_idx, 743 env->dmmu.mmu_primary_context, 744 env->dmmu.mmu_secondary_context, 745 address); 746 } 747 } 748 749 if (mmu_idx == MMU_PHYS_IDX) { 750 *physical = ultrasparc_truncate_physical(address); 751 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 752 return 0; 753 } 754 755 if (rw == 2) { 756 return get_physical_address_code(env, physical, prot, attrs, address, 757 mmu_idx); 758 } else { 759 return get_physical_address_data(env, physical, prot, attrs, address, 760 rw, mmu_idx); 761 } 762 } 763 764 /* Perform address translation */ 765 bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 766 MMUAccessType access_type, int mmu_idx, 767 bool probe, uintptr_t retaddr) 768 { 769 SPARCCPU *cpu = SPARC_CPU(cs); 770 CPUSPARCState *env = &cpu->env; 771 target_ulong vaddr; 772 hwaddr paddr; 773 target_ulong page_size; 774 MemTxAttrs attrs = {}; 775 int error_code = 0, prot, access_index; 776 777 address &= TARGET_PAGE_MASK; 778 error_code = get_physical_address(env, &paddr, &prot, &access_index, &attrs, 779 address, access_type, 780 mmu_idx, &page_size); 781 if (likely(error_code == 0)) { 782 vaddr = address; 783 784 trace_mmu_helper_mmu_fault(address, paddr, mmu_idx, env->tl, 785 env->dmmu.mmu_primary_context, 786 env->dmmu.mmu_secondary_context); 787 788 tlb_set_page_with_attrs(cs, vaddr, paddr, attrs, prot, mmu_idx, 789 page_size); 790 return true; 791 } 792 if (probe) { 793 return false; 794 } 795 cpu_loop_exit_restore(cs, retaddr); 796 } 797 798 void dump_mmu(CPUSPARCState *env) 799 { 800 unsigned int i; 801 const char *mask; 802 803 qemu_printf("MMU contexts: Primary: %" PRId64 ", Secondary: %" 804 PRId64 "\n", 805 env->dmmu.mmu_primary_context, 806 env->dmmu.mmu_secondary_context); 807 qemu_printf("DMMU Tag Access: %" PRIx64 ", TSB Tag Target: %" PRIx64 808 "\n", env->dmmu.tag_access, env->dmmu.tsb_tag_target); 809 if ((env->lsu & DMMU_E) == 0) { 810 qemu_printf("DMMU disabled\n"); 811 } else { 812 qemu_printf("DMMU dump\n"); 813 for (i = 0; i < 64; i++) { 814 switch (TTE_PGSIZE(env->dtlb[i].tte)) { 815 default: 816 case 0x0: 817 mask = " 8k"; 818 break; 819 case 0x1: 820 mask = " 64k"; 821 break; 822 case 0x2: 823 mask = "512k"; 824 break; 825 case 0x3: 826 mask = " 4M"; 827 break; 828 } 829 if (TTE_IS_VALID(env->dtlb[i].tte)) { 830 qemu_printf("[%02u] VA: %" PRIx64 ", PA: %llx" 831 ", %s, %s, %s, %s, ie %s, ctx %" PRId64 " %s\n", 832 i, 833 env->dtlb[i].tag & (uint64_t)~0x1fffULL, 834 TTE_PA(env->dtlb[i].tte), 835 mask, 836 TTE_IS_PRIV(env->dtlb[i].tte) ? "priv" : "user", 837 TTE_IS_W_OK(env->dtlb[i].tte) ? "RW" : "RO", 838 TTE_IS_LOCKED(env->dtlb[i].tte) ? 839 "locked" : "unlocked", 840 TTE_IS_IE(env->dtlb[i].tte) ? 841 "yes" : "no", 842 env->dtlb[i].tag & (uint64_t)0x1fffULL, 843 TTE_IS_GLOBAL(env->dtlb[i].tte) ? 844 "global" : "local"); 845 } 846 } 847 } 848 if ((env->lsu & IMMU_E) == 0) { 849 qemu_printf("IMMU disabled\n"); 850 } else { 851 qemu_printf("IMMU dump\n"); 852 for (i = 0; i < 64; i++) { 853 switch (TTE_PGSIZE(env->itlb[i].tte)) { 854 default: 855 case 0x0: 856 mask = " 8k"; 857 break; 858 case 0x1: 859 mask = " 64k"; 860 break; 861 case 0x2: 862 mask = "512k"; 863 break; 864 case 0x3: 865 mask = " 4M"; 866 break; 867 } 868 if (TTE_IS_VALID(env->itlb[i].tte)) { 869 qemu_printf("[%02u] VA: %" PRIx64 ", PA: %llx" 870 ", %s, %s, %s, ctx %" PRId64 " %s\n", 871 i, 872 env->itlb[i].tag & (uint64_t)~0x1fffULL, 873 TTE_PA(env->itlb[i].tte), 874 mask, 875 TTE_IS_PRIV(env->itlb[i].tte) ? "priv" : "user", 876 TTE_IS_LOCKED(env->itlb[i].tte) ? 877 "locked" : "unlocked", 878 env->itlb[i].tag & (uint64_t)0x1fffULL, 879 TTE_IS_GLOBAL(env->itlb[i].tte) ? 880 "global" : "local"); 881 } 882 } 883 } 884 } 885 886 #endif /* TARGET_SPARC64 */ 887 888 static int cpu_sparc_get_phys_page(CPUSPARCState *env, hwaddr *phys, 889 target_ulong addr, int rw, int mmu_idx) 890 { 891 target_ulong page_size; 892 int prot, access_index; 893 MemTxAttrs attrs = {}; 894 895 return get_physical_address(env, phys, &prot, &access_index, &attrs, addr, 896 rw, mmu_idx, &page_size); 897 } 898 899 #if defined(TARGET_SPARC64) 900 hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr, 901 int mmu_idx) 902 { 903 hwaddr phys_addr; 904 905 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 4, mmu_idx) != 0) { 906 return -1; 907 } 908 return phys_addr; 909 } 910 #endif 911 912 hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 913 { 914 SPARCCPU *cpu = SPARC_CPU(cs); 915 CPUSPARCState *env = &cpu->env; 916 hwaddr phys_addr; 917 int mmu_idx = cpu_mmu_index(env, false); 918 919 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) { 920 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 0, mmu_idx) != 0) { 921 return -1; 922 } 923 } 924 return phys_addr; 925 } 926 927 #ifndef CONFIG_USER_ONLY 928 void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 929 MMUAccessType access_type, 930 int mmu_idx, 931 uintptr_t retaddr) 932 { 933 SPARCCPU *cpu = SPARC_CPU(cs); 934 CPUSPARCState *env = &cpu->env; 935 936 #ifdef TARGET_SPARC64 937 env->dmmu.sfsr = build_sfsr(env, mmu_idx, access_type); 938 env->dmmu.sfar = addr; 939 #else 940 env->mmuregs[4] = addr; 941 #endif 942 943 cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr); 944 } 945 #endif /* !CONFIG_USER_ONLY */ 946