1 /* 2 * Sparc MMU helpers 3 * 4 * Copyright (c) 2003-2005 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "exec/exec-all.h" 23 #include "trace.h" 24 #include "exec/address-spaces.h" 25 26 /* Sparc MMU emulation */ 27 28 #if defined(CONFIG_USER_ONLY) 29 30 int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, 31 int mmu_idx) 32 { 33 SPARCCPU *cpu = SPARC_CPU(cs); 34 CPUSPARCState *env = &cpu->env; 35 36 if (rw & 2) { 37 cs->exception_index = TT_TFAULT; 38 } else { 39 cs->exception_index = TT_DFAULT; 40 #ifdef TARGET_SPARC64 41 env->dmmu.mmuregs[4] = address; 42 #else 43 env->mmuregs[4] = address; 44 #endif 45 } 46 return 1; 47 } 48 49 #else 50 51 #ifndef TARGET_SPARC64 52 /* 53 * Sparc V8 Reference MMU (SRMMU) 54 */ 55 static const int access_table[8][8] = { 56 { 0, 0, 0, 0, 8, 0, 12, 12 }, 57 { 0, 0, 0, 0, 8, 0, 0, 0 }, 58 { 8, 8, 0, 0, 0, 8, 12, 12 }, 59 { 8, 8, 0, 0, 0, 8, 0, 0 }, 60 { 8, 0, 8, 0, 8, 8, 12, 12 }, 61 { 8, 0, 8, 0, 8, 0, 8, 0 }, 62 { 8, 8, 8, 0, 8, 8, 12, 12 }, 63 { 8, 8, 8, 0, 8, 8, 8, 0 } 64 }; 65 66 static const int perm_table[2][8] = { 67 { 68 PAGE_READ, 69 PAGE_READ | PAGE_WRITE, 70 PAGE_READ | PAGE_EXEC, 71 PAGE_READ | PAGE_WRITE | PAGE_EXEC, 72 PAGE_EXEC, 73 PAGE_READ | PAGE_WRITE, 74 PAGE_READ | PAGE_EXEC, 75 PAGE_READ | PAGE_WRITE | PAGE_EXEC 76 }, 77 { 78 PAGE_READ, 79 PAGE_READ | PAGE_WRITE, 80 PAGE_READ | PAGE_EXEC, 81 PAGE_READ | PAGE_WRITE | PAGE_EXEC, 82 PAGE_EXEC, 83 PAGE_READ, 84 0, 85 0, 86 } 87 }; 88 89 static int get_physical_address(CPUSPARCState *env, hwaddr *physical, 90 int *prot, int *access_index, 91 target_ulong address, int rw, int mmu_idx, 92 target_ulong *page_size) 93 { 94 int access_perms = 0; 95 hwaddr pde_ptr; 96 uint32_t pde; 97 int error_code = 0, is_dirty, is_user; 98 unsigned long page_offset; 99 CPUState *cs = CPU(sparc_env_get_cpu(env)); 100 101 is_user = mmu_idx == MMU_USER_IDX; 102 103 if (mmu_idx == MMU_PHYS_IDX) { 104 *page_size = TARGET_PAGE_SIZE; 105 /* Boot mode: instruction fetches are taken from PROM */ 106 if (rw == 2 && (env->mmuregs[0] & env->def.mmu_bm)) { 107 *physical = env->prom_addr | (address & 0x7ffffULL); 108 *prot = PAGE_READ | PAGE_EXEC; 109 return 0; 110 } 111 *physical = address; 112 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 113 return 0; 114 } 115 116 *access_index = ((rw & 1) << 2) | (rw & 2) | (is_user ? 0 : 1); 117 *physical = 0xffffffffffff0000ULL; 118 119 /* SPARC reference MMU table walk: Context table->L1->L2->PTE */ 120 /* Context base + context number */ 121 pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2); 122 pde = ldl_phys(cs->as, pde_ptr); 123 124 /* Ctx pde */ 125 switch (pde & PTE_ENTRYTYPE_MASK) { 126 default: 127 case 0: /* Invalid */ 128 return 1 << 2; 129 case 2: /* L0 PTE, maybe should not happen? */ 130 case 3: /* Reserved */ 131 return 4 << 2; 132 case 1: /* L0 PDE */ 133 pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); 134 pde = ldl_phys(cs->as, pde_ptr); 135 136 switch (pde & PTE_ENTRYTYPE_MASK) { 137 default: 138 case 0: /* Invalid */ 139 return (1 << 8) | (1 << 2); 140 case 3: /* Reserved */ 141 return (1 << 8) | (4 << 2); 142 case 1: /* L1 PDE */ 143 pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); 144 pde = ldl_phys(cs->as, pde_ptr); 145 146 switch (pde & PTE_ENTRYTYPE_MASK) { 147 default: 148 case 0: /* Invalid */ 149 return (2 << 8) | (1 << 2); 150 case 3: /* Reserved */ 151 return (2 << 8) | (4 << 2); 152 case 1: /* L2 PDE */ 153 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); 154 pde = ldl_phys(cs->as, pde_ptr); 155 156 switch (pde & PTE_ENTRYTYPE_MASK) { 157 default: 158 case 0: /* Invalid */ 159 return (3 << 8) | (1 << 2); 160 case 1: /* PDE, should not happen */ 161 case 3: /* Reserved */ 162 return (3 << 8) | (4 << 2); 163 case 2: /* L3 PTE */ 164 page_offset = 0; 165 } 166 *page_size = TARGET_PAGE_SIZE; 167 break; 168 case 2: /* L2 PTE */ 169 page_offset = address & 0x3f000; 170 *page_size = 0x40000; 171 } 172 break; 173 case 2: /* L1 PTE */ 174 page_offset = address & 0xfff000; 175 *page_size = 0x1000000; 176 } 177 } 178 179 /* check access */ 180 access_perms = (pde & PTE_ACCESS_MASK) >> PTE_ACCESS_SHIFT; 181 error_code = access_table[*access_index][access_perms]; 182 if (error_code && !((env->mmuregs[0] & MMU_NF) && is_user)) { 183 return error_code; 184 } 185 186 /* update page modified and dirty bits */ 187 is_dirty = (rw & 1) && !(pde & PG_MODIFIED_MASK); 188 if (!(pde & PG_ACCESSED_MASK) || is_dirty) { 189 pde |= PG_ACCESSED_MASK; 190 if (is_dirty) { 191 pde |= PG_MODIFIED_MASK; 192 } 193 stl_phys_notdirty(cs->as, pde_ptr, pde); 194 } 195 196 /* the page can be put in the TLB */ 197 *prot = perm_table[is_user][access_perms]; 198 if (!(pde & PG_MODIFIED_MASK)) { 199 /* only set write access if already dirty... otherwise wait 200 for dirty access */ 201 *prot &= ~PAGE_WRITE; 202 } 203 204 /* Even if large ptes, we map only one 4KB page in the cache to 205 avoid filling it too fast */ 206 *physical = ((hwaddr)(pde & PTE_ADDR_MASK) << 4) + page_offset; 207 return error_code; 208 } 209 210 /* Perform address translation */ 211 int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, 212 int mmu_idx) 213 { 214 SPARCCPU *cpu = SPARC_CPU(cs); 215 CPUSPARCState *env = &cpu->env; 216 hwaddr paddr; 217 target_ulong vaddr; 218 target_ulong page_size; 219 int error_code = 0, prot, access_index; 220 221 address &= TARGET_PAGE_MASK; 222 error_code = get_physical_address(env, &paddr, &prot, &access_index, 223 address, rw, mmu_idx, &page_size); 224 vaddr = address; 225 if (error_code == 0) { 226 qemu_log_mask(CPU_LOG_MMU, 227 "Translate at %" VADDR_PRIx " -> " TARGET_FMT_plx ", vaddr " 228 TARGET_FMT_lx "\n", address, paddr, vaddr); 229 tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size); 230 return 0; 231 } 232 233 if (env->mmuregs[3]) { /* Fault status register */ 234 env->mmuregs[3] = 1; /* overflow (not read before another fault) */ 235 } 236 env->mmuregs[3] |= (access_index << 5) | error_code | 2; 237 env->mmuregs[4] = address; /* Fault address register */ 238 239 if ((env->mmuregs[0] & MMU_NF) || env->psret == 0) { 240 /* No fault mode: if a mapping is available, just override 241 permissions. If no mapping is available, redirect accesses to 242 neverland. Fake/overridden mappings will be flushed when 243 switching to normal mode. */ 244 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 245 tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE); 246 return 0; 247 } else { 248 if (rw & 2) { 249 cs->exception_index = TT_TFAULT; 250 } else { 251 cs->exception_index = TT_DFAULT; 252 } 253 return 1; 254 } 255 } 256 257 target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev) 258 { 259 CPUState *cs = CPU(sparc_env_get_cpu(env)); 260 hwaddr pde_ptr; 261 uint32_t pde; 262 263 /* Context base + context number */ 264 pde_ptr = (hwaddr)(env->mmuregs[1] << 4) + 265 (env->mmuregs[2] << 2); 266 pde = ldl_phys(cs->as, pde_ptr); 267 268 switch (pde & PTE_ENTRYTYPE_MASK) { 269 default: 270 case 0: /* Invalid */ 271 case 2: /* PTE, maybe should not happen? */ 272 case 3: /* Reserved */ 273 return 0; 274 case 1: /* L1 PDE */ 275 if (mmulev == 3) { 276 return pde; 277 } 278 pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); 279 pde = ldl_phys(cs->as, pde_ptr); 280 281 switch (pde & PTE_ENTRYTYPE_MASK) { 282 default: 283 case 0: /* Invalid */ 284 case 3: /* Reserved */ 285 return 0; 286 case 2: /* L1 PTE */ 287 return pde; 288 case 1: /* L2 PDE */ 289 if (mmulev == 2) { 290 return pde; 291 } 292 pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); 293 pde = ldl_phys(cs->as, pde_ptr); 294 295 switch (pde & PTE_ENTRYTYPE_MASK) { 296 default: 297 case 0: /* Invalid */ 298 case 3: /* Reserved */ 299 return 0; 300 case 2: /* L2 PTE */ 301 return pde; 302 case 1: /* L3 PDE */ 303 if (mmulev == 1) { 304 return pde; 305 } 306 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); 307 pde = ldl_phys(cs->as, pde_ptr); 308 309 switch (pde & PTE_ENTRYTYPE_MASK) { 310 default: 311 case 0: /* Invalid */ 312 case 1: /* PDE, should not happen */ 313 case 3: /* Reserved */ 314 return 0; 315 case 2: /* L3 PTE */ 316 return pde; 317 } 318 } 319 } 320 } 321 return 0; 322 } 323 324 void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUSPARCState *env) 325 { 326 CPUState *cs = CPU(sparc_env_get_cpu(env)); 327 target_ulong va, va1, va2; 328 unsigned int n, m, o; 329 hwaddr pde_ptr, pa; 330 uint32_t pde; 331 332 pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2); 333 pde = ldl_phys(cs->as, pde_ptr); 334 (*cpu_fprintf)(f, "Root ptr: " TARGET_FMT_plx ", ctx: %d\n", 335 (hwaddr)env->mmuregs[1] << 4, env->mmuregs[2]); 336 for (n = 0, va = 0; n < 256; n++, va += 16 * 1024 * 1024) { 337 pde = mmu_probe(env, va, 2); 338 if (pde) { 339 pa = cpu_get_phys_page_debug(cs, va); 340 (*cpu_fprintf)(f, "VA: " TARGET_FMT_lx ", PA: " TARGET_FMT_plx 341 " PDE: " TARGET_FMT_lx "\n", va, pa, pde); 342 for (m = 0, va1 = va; m < 64; m++, va1 += 256 * 1024) { 343 pde = mmu_probe(env, va1, 1); 344 if (pde) { 345 pa = cpu_get_phys_page_debug(cs, va1); 346 (*cpu_fprintf)(f, " VA: " TARGET_FMT_lx ", PA: " 347 TARGET_FMT_plx " PDE: " TARGET_FMT_lx "\n", 348 va1, pa, pde); 349 for (o = 0, va2 = va1; o < 64; o++, va2 += 4 * 1024) { 350 pde = mmu_probe(env, va2, 0); 351 if (pde) { 352 pa = cpu_get_phys_page_debug(cs, va2); 353 (*cpu_fprintf)(f, " VA: " TARGET_FMT_lx ", PA: " 354 TARGET_FMT_plx " PTE: " 355 TARGET_FMT_lx "\n", 356 va2, pa, pde); 357 } 358 } 359 } 360 } 361 } 362 } 363 } 364 365 /* Gdb expects all registers windows to be flushed in ram. This function handles 366 * reads (and only reads) in stack frames as if windows were flushed. We assume 367 * that the sparc ABI is followed. 368 */ 369 int sparc_cpu_memory_rw_debug(CPUState *cs, vaddr address, 370 uint8_t *buf, int len, bool is_write) 371 { 372 SPARCCPU *cpu = SPARC_CPU(cs); 373 CPUSPARCState *env = &cpu->env; 374 target_ulong addr = address; 375 int i; 376 int len1; 377 int cwp = env->cwp; 378 379 if (!is_write) { 380 for (i = 0; i < env->nwindows; i++) { 381 int off; 382 target_ulong fp = env->regbase[cwp * 16 + 22]; 383 384 /* Assume fp == 0 means end of frame. */ 385 if (fp == 0) { 386 break; 387 } 388 389 cwp = cpu_cwp_inc(env, cwp + 1); 390 391 /* Invalid window ? */ 392 if (env->wim & (1 << cwp)) { 393 break; 394 } 395 396 /* According to the ABI, the stack is growing downward. */ 397 if (addr + len < fp) { 398 break; 399 } 400 401 /* Not in this frame. */ 402 if (addr > fp + 64) { 403 continue; 404 } 405 406 /* Handle access before this window. */ 407 if (addr < fp) { 408 len1 = fp - addr; 409 if (cpu_memory_rw_debug(cs, addr, buf, len1, is_write) != 0) { 410 return -1; 411 } 412 addr += len1; 413 len -= len1; 414 buf += len1; 415 } 416 417 /* Access byte per byte to registers. Not very efficient but speed 418 * is not critical. 419 */ 420 off = addr - fp; 421 len1 = 64 - off; 422 423 if (len1 > len) { 424 len1 = len; 425 } 426 427 for (; len1; len1--) { 428 int reg = cwp * 16 + 8 + (off >> 2); 429 union { 430 uint32_t v; 431 uint8_t c[4]; 432 } u; 433 u.v = cpu_to_be32(env->regbase[reg]); 434 *buf++ = u.c[off & 3]; 435 addr++; 436 len--; 437 off++; 438 } 439 440 if (len == 0) { 441 return 0; 442 } 443 } 444 } 445 return cpu_memory_rw_debug(cs, addr, buf, len, is_write); 446 } 447 448 #else /* !TARGET_SPARC64 */ 449 450 /* 41 bit physical address space */ 451 static inline hwaddr ultrasparc_truncate_physical(uint64_t x) 452 { 453 return x & 0x1ffffffffffULL; 454 } 455 456 /* 457 * UltraSparc IIi I/DMMUs 458 */ 459 460 /* Returns true if TTE tag is valid and matches virtual address value 461 in context requires virtual address mask value calculated from TTE 462 entry size */ 463 static inline int ultrasparc_tag_match(SparcTLBEntry *tlb, 464 uint64_t address, uint64_t context, 465 hwaddr *physical) 466 { 467 uint64_t mask = -(8192ULL << 3 * TTE_PGSIZE(tlb->tte)); 468 469 /* valid, context match, virtual address match? */ 470 if (TTE_IS_VALID(tlb->tte) && 471 (TTE_IS_GLOBAL(tlb->tte) || tlb_compare_context(tlb, context)) 472 && compare_masked(address, tlb->tag, mask)) { 473 /* decode physical address */ 474 *physical = ((tlb->tte & mask) | (address & ~mask)) & 0x1ffffffe000ULL; 475 return 1; 476 } 477 478 return 0; 479 } 480 481 static int get_physical_address_data(CPUSPARCState *env, 482 hwaddr *physical, int *prot, 483 target_ulong address, int rw, int mmu_idx) 484 { 485 CPUState *cs = CPU(sparc_env_get_cpu(env)); 486 unsigned int i; 487 uint64_t context; 488 uint64_t sfsr = 0; 489 bool is_user = false; 490 491 switch (mmu_idx) { 492 case MMU_PHYS_IDX: 493 g_assert_not_reached(); 494 case MMU_USER_IDX: 495 is_user = true; 496 /* fallthru */ 497 case MMU_KERNEL_IDX: 498 context = env->dmmu.mmu_primary_context & 0x1fff; 499 sfsr |= SFSR_CT_PRIMARY; 500 break; 501 case MMU_USER_SECONDARY_IDX: 502 is_user = true; 503 /* fallthru */ 504 case MMU_KERNEL_SECONDARY_IDX: 505 context = env->dmmu.mmu_secondary_context & 0x1fff; 506 sfsr |= SFSR_CT_SECONDARY; 507 break; 508 case MMU_NUCLEUS_IDX: 509 sfsr |= SFSR_CT_NUCLEUS; 510 /* FALLTHRU */ 511 default: 512 context = 0; 513 break; 514 } 515 516 if (rw == 1) { 517 sfsr |= SFSR_WRITE_BIT; 518 } else if (rw == 4) { 519 sfsr |= SFSR_NF_BIT; 520 } 521 522 for (i = 0; i < 64; i++) { 523 /* ctx match, vaddr match, valid? */ 524 if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) { 525 int do_fault = 0; 526 527 /* access ok? */ 528 /* multiple bits in SFSR.FT may be set on TT_DFAULT */ 529 if (TTE_IS_PRIV(env->dtlb[i].tte) && is_user) { 530 do_fault = 1; 531 sfsr |= SFSR_FT_PRIV_BIT; /* privilege violation */ 532 trace_mmu_helper_dfault(address, context, mmu_idx, env->tl); 533 } 534 if (rw == 4) { 535 if (TTE_IS_SIDEEFFECT(env->dtlb[i].tte)) { 536 do_fault = 1; 537 sfsr |= SFSR_FT_NF_E_BIT; 538 } 539 } else { 540 if (TTE_IS_NFO(env->dtlb[i].tte)) { 541 do_fault = 1; 542 sfsr |= SFSR_FT_NFO_BIT; 543 } 544 } 545 546 if (do_fault) { 547 /* faults above are reported with TT_DFAULT. */ 548 cs->exception_index = TT_DFAULT; 549 } else if (!TTE_IS_W_OK(env->dtlb[i].tte) && (rw == 1)) { 550 do_fault = 1; 551 cs->exception_index = TT_DPROT; 552 553 trace_mmu_helper_dprot(address, context, mmu_idx, env->tl); 554 } 555 556 if (!do_fault) { 557 *prot = PAGE_READ; 558 if (TTE_IS_W_OK(env->dtlb[i].tte)) { 559 *prot |= PAGE_WRITE; 560 } 561 562 TTE_SET_USED(env->dtlb[i].tte); 563 564 return 0; 565 } 566 567 if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */ 568 sfsr |= SFSR_OW_BIT; /* overflow (not read before 569 another fault) */ 570 } 571 572 if (env->pstate & PS_PRIV) { 573 sfsr |= SFSR_PR_BIT; 574 } 575 576 /* FIXME: ASI field in SFSR must be set */ 577 env->dmmu.sfsr = sfsr | SFSR_VALID_BIT; 578 579 env->dmmu.sfar = address; /* Fault address register */ 580 581 env->dmmu.tag_access = (address & ~0x1fffULL) | context; 582 583 return 1; 584 } 585 } 586 587 trace_mmu_helper_dmiss(address, context); 588 589 /* 590 * On MMU misses: 591 * - UltraSPARC IIi: SFSR and SFAR unmodified 592 * - JPS1: SFAR updated and some fields of SFSR updated 593 */ 594 env->dmmu.tag_access = (address & ~0x1fffULL) | context; 595 cs->exception_index = TT_DMISS; 596 return 1; 597 } 598 599 static int get_physical_address_code(CPUSPARCState *env, 600 hwaddr *physical, int *prot, 601 target_ulong address, int mmu_idx) 602 { 603 CPUState *cs = CPU(sparc_env_get_cpu(env)); 604 unsigned int i; 605 uint64_t context; 606 bool is_user = false; 607 608 switch (mmu_idx) { 609 case MMU_PHYS_IDX: 610 case MMU_USER_SECONDARY_IDX: 611 case MMU_KERNEL_SECONDARY_IDX: 612 g_assert_not_reached(); 613 case MMU_USER_IDX: 614 is_user = true; 615 /* fallthru */ 616 case MMU_KERNEL_IDX: 617 context = env->dmmu.mmu_primary_context & 0x1fff; 618 break; 619 default: 620 context = 0; 621 break; 622 } 623 624 if (env->tl == 0) { 625 /* PRIMARY context */ 626 context = env->dmmu.mmu_primary_context & 0x1fff; 627 } else { 628 /* NUCLEUS context */ 629 context = 0; 630 } 631 632 for (i = 0; i < 64; i++) { 633 /* ctx match, vaddr match, valid? */ 634 if (ultrasparc_tag_match(&env->itlb[i], 635 address, context, physical)) { 636 /* access ok? */ 637 if (TTE_IS_PRIV(env->itlb[i].tte) && is_user) { 638 /* Fault status register */ 639 if (env->immu.sfsr & SFSR_VALID_BIT) { 640 env->immu.sfsr = SFSR_OW_BIT; /* overflow (not read before 641 another fault) */ 642 } else { 643 env->immu.sfsr = 0; 644 } 645 if (env->pstate & PS_PRIV) { 646 env->immu.sfsr |= SFSR_PR_BIT; 647 } 648 if (env->tl > 0) { 649 env->immu.sfsr |= SFSR_CT_NUCLEUS; 650 } 651 652 /* FIXME: ASI field in SFSR must be set */ 653 env->immu.sfsr |= SFSR_FT_PRIV_BIT | SFSR_VALID_BIT; 654 cs->exception_index = TT_TFAULT; 655 656 env->immu.tag_access = (address & ~0x1fffULL) | context; 657 658 trace_mmu_helper_tfault(address, context); 659 660 return 1; 661 } 662 *prot = PAGE_EXEC; 663 TTE_SET_USED(env->itlb[i].tte); 664 return 0; 665 } 666 } 667 668 trace_mmu_helper_tmiss(address, context); 669 670 /* Context is stored in DMMU (dmmuregs[1]) also for IMMU */ 671 env->immu.tag_access = (address & ~0x1fffULL) | context; 672 cs->exception_index = TT_TMISS; 673 return 1; 674 } 675 676 static int get_physical_address(CPUSPARCState *env, hwaddr *physical, 677 int *prot, int *access_index, 678 target_ulong address, int rw, int mmu_idx, 679 target_ulong *page_size) 680 { 681 /* ??? We treat everything as a small page, then explicitly flush 682 everything when an entry is evicted. */ 683 *page_size = TARGET_PAGE_SIZE; 684 685 /* safety net to catch wrong softmmu index use from dynamic code */ 686 if (env->tl > 0 && mmu_idx != MMU_NUCLEUS_IDX) { 687 if (rw == 2) { 688 trace_mmu_helper_get_phys_addr_code(env->tl, mmu_idx, 689 env->dmmu.mmu_primary_context, 690 env->dmmu.mmu_secondary_context, 691 address); 692 } else { 693 trace_mmu_helper_get_phys_addr_data(env->tl, mmu_idx, 694 env->dmmu.mmu_primary_context, 695 env->dmmu.mmu_secondary_context, 696 address); 697 } 698 } 699 700 if (mmu_idx == MMU_PHYS_IDX) { 701 *physical = ultrasparc_truncate_physical(address); 702 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 703 return 0; 704 } 705 706 if (rw == 2) { 707 return get_physical_address_code(env, physical, prot, address, 708 mmu_idx); 709 } else { 710 return get_physical_address_data(env, physical, prot, address, rw, 711 mmu_idx); 712 } 713 } 714 715 /* Perform address translation */ 716 int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, 717 int mmu_idx) 718 { 719 SPARCCPU *cpu = SPARC_CPU(cs); 720 CPUSPARCState *env = &cpu->env; 721 target_ulong vaddr; 722 hwaddr paddr; 723 target_ulong page_size; 724 int error_code = 0, prot, access_index; 725 726 address &= TARGET_PAGE_MASK; 727 error_code = get_physical_address(env, &paddr, &prot, &access_index, 728 address, rw, mmu_idx, &page_size); 729 if (error_code == 0) { 730 vaddr = address; 731 732 trace_mmu_helper_mmu_fault(address, paddr, mmu_idx, env->tl, 733 env->dmmu.mmu_primary_context, 734 env->dmmu.mmu_secondary_context); 735 736 tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size); 737 return 0; 738 } 739 /* XXX */ 740 return 1; 741 } 742 743 void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUSPARCState *env) 744 { 745 unsigned int i; 746 const char *mask; 747 748 (*cpu_fprintf)(f, "MMU contexts: Primary: %" PRId64 ", Secondary: %" 749 PRId64 "\n", 750 env->dmmu.mmu_primary_context, 751 env->dmmu.mmu_secondary_context); 752 (*cpu_fprintf)(f, "DMMU Tag Access: %" PRIx64 ", TSB Tag Target: %" PRIx64 753 "\n", env->dmmu.tag_access, env->dmmu.tsb_tag_target); 754 if ((env->lsu & DMMU_E) == 0) { 755 (*cpu_fprintf)(f, "DMMU disabled\n"); 756 } else { 757 (*cpu_fprintf)(f, "DMMU dump\n"); 758 for (i = 0; i < 64; i++) { 759 switch (TTE_PGSIZE(env->dtlb[i].tte)) { 760 default: 761 case 0x0: 762 mask = " 8k"; 763 break; 764 case 0x1: 765 mask = " 64k"; 766 break; 767 case 0x2: 768 mask = "512k"; 769 break; 770 case 0x3: 771 mask = " 4M"; 772 break; 773 } 774 if (TTE_IS_VALID(env->dtlb[i].tte)) { 775 (*cpu_fprintf)(f, "[%02u] VA: %" PRIx64 ", PA: %llx" 776 ", %s, %s, %s, %s, ctx %" PRId64 " %s\n", 777 i, 778 env->dtlb[i].tag & (uint64_t)~0x1fffULL, 779 TTE_PA(env->dtlb[i].tte), 780 mask, 781 TTE_IS_PRIV(env->dtlb[i].tte) ? "priv" : "user", 782 TTE_IS_W_OK(env->dtlb[i].tte) ? "RW" : "RO", 783 TTE_IS_LOCKED(env->dtlb[i].tte) ? 784 "locked" : "unlocked", 785 env->dtlb[i].tag & (uint64_t)0x1fffULL, 786 TTE_IS_GLOBAL(env->dtlb[i].tte) ? 787 "global" : "local"); 788 } 789 } 790 } 791 if ((env->lsu & IMMU_E) == 0) { 792 (*cpu_fprintf)(f, "IMMU disabled\n"); 793 } else { 794 (*cpu_fprintf)(f, "IMMU dump\n"); 795 for (i = 0; i < 64; i++) { 796 switch (TTE_PGSIZE(env->itlb[i].tte)) { 797 default: 798 case 0x0: 799 mask = " 8k"; 800 break; 801 case 0x1: 802 mask = " 64k"; 803 break; 804 case 0x2: 805 mask = "512k"; 806 break; 807 case 0x3: 808 mask = " 4M"; 809 break; 810 } 811 if (TTE_IS_VALID(env->itlb[i].tte)) { 812 (*cpu_fprintf)(f, "[%02u] VA: %" PRIx64 ", PA: %llx" 813 ", %s, %s, %s, ctx %" PRId64 " %s\n", 814 i, 815 env->itlb[i].tag & (uint64_t)~0x1fffULL, 816 TTE_PA(env->itlb[i].tte), 817 mask, 818 TTE_IS_PRIV(env->itlb[i].tte) ? "priv" : "user", 819 TTE_IS_LOCKED(env->itlb[i].tte) ? 820 "locked" : "unlocked", 821 env->itlb[i].tag & (uint64_t)0x1fffULL, 822 TTE_IS_GLOBAL(env->itlb[i].tte) ? 823 "global" : "local"); 824 } 825 } 826 } 827 } 828 829 #endif /* TARGET_SPARC64 */ 830 831 static int cpu_sparc_get_phys_page(CPUSPARCState *env, hwaddr *phys, 832 target_ulong addr, int rw, int mmu_idx) 833 { 834 target_ulong page_size; 835 int prot, access_index; 836 837 return get_physical_address(env, phys, &prot, &access_index, addr, rw, 838 mmu_idx, &page_size); 839 } 840 841 #if defined(TARGET_SPARC64) 842 hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr, 843 int mmu_idx) 844 { 845 hwaddr phys_addr; 846 847 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 4, mmu_idx) != 0) { 848 return -1; 849 } 850 return phys_addr; 851 } 852 #endif 853 854 hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 855 { 856 SPARCCPU *cpu = SPARC_CPU(cs); 857 CPUSPARCState *env = &cpu->env; 858 hwaddr phys_addr; 859 int mmu_idx = cpu_mmu_index(env, false); 860 861 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) { 862 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 0, mmu_idx) != 0) { 863 return -1; 864 } 865 } 866 return phys_addr; 867 } 868 #endif 869