1 /* 2 * Sparc MMU helpers 3 * 4 * Copyright (c) 2003-2005 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "exec/exec-all.h" 23 #include "trace.h" 24 25 /* Sparc MMU emulation */ 26 27 #if defined(CONFIG_USER_ONLY) 28 29 int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw, 30 int mmu_idx) 31 { 32 SPARCCPU *cpu = SPARC_CPU(cs); 33 CPUSPARCState *env = &cpu->env; 34 35 if (rw & 2) { 36 cs->exception_index = TT_TFAULT; 37 } else { 38 cs->exception_index = TT_DFAULT; 39 #ifdef TARGET_SPARC64 40 env->dmmu.mmuregs[4] = address; 41 #else 42 env->mmuregs[4] = address; 43 #endif 44 } 45 return 1; 46 } 47 48 #else 49 50 #ifndef TARGET_SPARC64 51 /* 52 * Sparc V8 Reference MMU (SRMMU) 53 */ 54 static const int access_table[8][8] = { 55 { 0, 0, 0, 0, 8, 0, 12, 12 }, 56 { 0, 0, 0, 0, 8, 0, 0, 0 }, 57 { 8, 8, 0, 0, 0, 8, 12, 12 }, 58 { 8, 8, 0, 0, 0, 8, 0, 0 }, 59 { 8, 0, 8, 0, 8, 8, 12, 12 }, 60 { 8, 0, 8, 0, 8, 0, 8, 0 }, 61 { 8, 8, 8, 0, 8, 8, 12, 12 }, 62 { 8, 8, 8, 0, 8, 8, 8, 0 } 63 }; 64 65 static const int perm_table[2][8] = { 66 { 67 PAGE_READ, 68 PAGE_READ | PAGE_WRITE, 69 PAGE_READ | PAGE_EXEC, 70 PAGE_READ | PAGE_WRITE | PAGE_EXEC, 71 PAGE_EXEC, 72 PAGE_READ | PAGE_WRITE, 73 PAGE_READ | PAGE_EXEC, 74 PAGE_READ | PAGE_WRITE | PAGE_EXEC 75 }, 76 { 77 PAGE_READ, 78 PAGE_READ | PAGE_WRITE, 79 PAGE_READ | PAGE_EXEC, 80 PAGE_READ | PAGE_WRITE | PAGE_EXEC, 81 PAGE_EXEC, 82 PAGE_READ, 83 0, 84 0, 85 } 86 }; 87 88 static int get_physical_address(CPUSPARCState *env, hwaddr *physical, 89 int *prot, int *access_index, 90 target_ulong address, int rw, int mmu_idx, 91 target_ulong *page_size) 92 { 93 int access_perms = 0; 94 hwaddr pde_ptr; 95 uint32_t pde; 96 int error_code = 0, is_dirty, is_user; 97 unsigned long page_offset; 98 CPUState *cs = CPU(sparc_env_get_cpu(env)); 99 100 is_user = mmu_idx == MMU_USER_IDX; 101 102 if (mmu_idx == MMU_PHYS_IDX) { 103 *page_size = TARGET_PAGE_SIZE; 104 /* Boot mode: instruction fetches are taken from PROM */ 105 if (rw == 2 && (env->mmuregs[0] & env->def.mmu_bm)) { 106 *physical = env->prom_addr | (address & 0x7ffffULL); 107 *prot = PAGE_READ | PAGE_EXEC; 108 return 0; 109 } 110 *physical = address; 111 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 112 return 0; 113 } 114 115 *access_index = ((rw & 1) << 2) | (rw & 2) | (is_user ? 0 : 1); 116 *physical = 0xffffffffffff0000ULL; 117 118 /* SPARC reference MMU table walk: Context table->L1->L2->PTE */ 119 /* Context base + context number */ 120 pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2); 121 pde = ldl_phys(cs->as, pde_ptr); 122 123 /* Ctx pde */ 124 switch (pde & PTE_ENTRYTYPE_MASK) { 125 default: 126 case 0: /* Invalid */ 127 return 1 << 2; 128 case 2: /* L0 PTE, maybe should not happen? */ 129 case 3: /* Reserved */ 130 return 4 << 2; 131 case 1: /* L0 PDE */ 132 pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); 133 pde = ldl_phys(cs->as, pde_ptr); 134 135 switch (pde & PTE_ENTRYTYPE_MASK) { 136 default: 137 case 0: /* Invalid */ 138 return (1 << 8) | (1 << 2); 139 case 3: /* Reserved */ 140 return (1 << 8) | (4 << 2); 141 case 1: /* L1 PDE */ 142 pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); 143 pde = ldl_phys(cs->as, pde_ptr); 144 145 switch (pde & PTE_ENTRYTYPE_MASK) { 146 default: 147 case 0: /* Invalid */ 148 return (2 << 8) | (1 << 2); 149 case 3: /* Reserved */ 150 return (2 << 8) | (4 << 2); 151 case 1: /* L2 PDE */ 152 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); 153 pde = ldl_phys(cs->as, pde_ptr); 154 155 switch (pde & PTE_ENTRYTYPE_MASK) { 156 default: 157 case 0: /* Invalid */ 158 return (3 << 8) | (1 << 2); 159 case 1: /* PDE, should not happen */ 160 case 3: /* Reserved */ 161 return (3 << 8) | (4 << 2); 162 case 2: /* L3 PTE */ 163 page_offset = 0; 164 } 165 *page_size = TARGET_PAGE_SIZE; 166 break; 167 case 2: /* L2 PTE */ 168 page_offset = address & 0x3f000; 169 *page_size = 0x40000; 170 } 171 break; 172 case 2: /* L1 PTE */ 173 page_offset = address & 0xfff000; 174 *page_size = 0x1000000; 175 } 176 } 177 178 /* check access */ 179 access_perms = (pde & PTE_ACCESS_MASK) >> PTE_ACCESS_SHIFT; 180 error_code = access_table[*access_index][access_perms]; 181 if (error_code && !((env->mmuregs[0] & MMU_NF) && is_user)) { 182 return error_code; 183 } 184 185 /* update page modified and dirty bits */ 186 is_dirty = (rw & 1) && !(pde & PG_MODIFIED_MASK); 187 if (!(pde & PG_ACCESSED_MASK) || is_dirty) { 188 pde |= PG_ACCESSED_MASK; 189 if (is_dirty) { 190 pde |= PG_MODIFIED_MASK; 191 } 192 stl_phys_notdirty(cs->as, pde_ptr, pde); 193 } 194 195 /* the page can be put in the TLB */ 196 *prot = perm_table[is_user][access_perms]; 197 if (!(pde & PG_MODIFIED_MASK)) { 198 /* only set write access if already dirty... otherwise wait 199 for dirty access */ 200 *prot &= ~PAGE_WRITE; 201 } 202 203 /* Even if large ptes, we map only one 4KB page in the cache to 204 avoid filling it too fast */ 205 *physical = ((hwaddr)(pde & PTE_ADDR_MASK) << 4) + page_offset; 206 return error_code; 207 } 208 209 /* Perform address translation */ 210 int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw, 211 int mmu_idx) 212 { 213 SPARCCPU *cpu = SPARC_CPU(cs); 214 CPUSPARCState *env = &cpu->env; 215 hwaddr paddr; 216 target_ulong vaddr; 217 target_ulong page_size; 218 int error_code = 0, prot, access_index; 219 220 address &= TARGET_PAGE_MASK; 221 error_code = get_physical_address(env, &paddr, &prot, &access_index, 222 address, rw, mmu_idx, &page_size); 223 vaddr = address; 224 if (error_code == 0) { 225 qemu_log_mask(CPU_LOG_MMU, 226 "Translate at %" VADDR_PRIx " -> " TARGET_FMT_plx ", vaddr " 227 TARGET_FMT_lx "\n", address, paddr, vaddr); 228 tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size); 229 return 0; 230 } 231 232 if (env->mmuregs[3]) { /* Fault status register */ 233 env->mmuregs[3] = 1; /* overflow (not read before another fault) */ 234 } 235 env->mmuregs[3] |= (access_index << 5) | error_code | 2; 236 env->mmuregs[4] = address; /* Fault address register */ 237 238 if ((env->mmuregs[0] & MMU_NF) || env->psret == 0) { 239 /* No fault mode: if a mapping is available, just override 240 permissions. If no mapping is available, redirect accesses to 241 neverland. Fake/overridden mappings will be flushed when 242 switching to normal mode. */ 243 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 244 tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE); 245 return 0; 246 } else { 247 if (rw & 2) { 248 cs->exception_index = TT_TFAULT; 249 } else { 250 cs->exception_index = TT_DFAULT; 251 } 252 return 1; 253 } 254 } 255 256 target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev) 257 { 258 CPUState *cs = CPU(sparc_env_get_cpu(env)); 259 hwaddr pde_ptr; 260 uint32_t pde; 261 262 /* Context base + context number */ 263 pde_ptr = (hwaddr)(env->mmuregs[1] << 4) + 264 (env->mmuregs[2] << 2); 265 pde = ldl_phys(cs->as, pde_ptr); 266 267 switch (pde & PTE_ENTRYTYPE_MASK) { 268 default: 269 case 0: /* Invalid */ 270 case 2: /* PTE, maybe should not happen? */ 271 case 3: /* Reserved */ 272 return 0; 273 case 1: /* L1 PDE */ 274 if (mmulev == 3) { 275 return pde; 276 } 277 pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); 278 pde = ldl_phys(cs->as, pde_ptr); 279 280 switch (pde & PTE_ENTRYTYPE_MASK) { 281 default: 282 case 0: /* Invalid */ 283 case 3: /* Reserved */ 284 return 0; 285 case 2: /* L1 PTE */ 286 return pde; 287 case 1: /* L2 PDE */ 288 if (mmulev == 2) { 289 return pde; 290 } 291 pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); 292 pde = ldl_phys(cs->as, pde_ptr); 293 294 switch (pde & PTE_ENTRYTYPE_MASK) { 295 default: 296 case 0: /* Invalid */ 297 case 3: /* Reserved */ 298 return 0; 299 case 2: /* L2 PTE */ 300 return pde; 301 case 1: /* L3 PDE */ 302 if (mmulev == 1) { 303 return pde; 304 } 305 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); 306 pde = ldl_phys(cs->as, pde_ptr); 307 308 switch (pde & PTE_ENTRYTYPE_MASK) { 309 default: 310 case 0: /* Invalid */ 311 case 1: /* PDE, should not happen */ 312 case 3: /* Reserved */ 313 return 0; 314 case 2: /* L3 PTE */ 315 return pde; 316 } 317 } 318 } 319 } 320 return 0; 321 } 322 323 void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUSPARCState *env) 324 { 325 CPUState *cs = CPU(sparc_env_get_cpu(env)); 326 target_ulong va, va1, va2; 327 unsigned int n, m, o; 328 hwaddr pde_ptr, pa; 329 uint32_t pde; 330 331 pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2); 332 pde = ldl_phys(cs->as, pde_ptr); 333 (*cpu_fprintf)(f, "Root ptr: " TARGET_FMT_plx ", ctx: %d\n", 334 (hwaddr)env->mmuregs[1] << 4, env->mmuregs[2]); 335 for (n = 0, va = 0; n < 256; n++, va += 16 * 1024 * 1024) { 336 pde = mmu_probe(env, va, 2); 337 if (pde) { 338 pa = cpu_get_phys_page_debug(cs, va); 339 (*cpu_fprintf)(f, "VA: " TARGET_FMT_lx ", PA: " TARGET_FMT_plx 340 " PDE: " TARGET_FMT_lx "\n", va, pa, pde); 341 for (m = 0, va1 = va; m < 64; m++, va1 += 256 * 1024) { 342 pde = mmu_probe(env, va1, 1); 343 if (pde) { 344 pa = cpu_get_phys_page_debug(cs, va1); 345 (*cpu_fprintf)(f, " VA: " TARGET_FMT_lx ", PA: " 346 TARGET_FMT_plx " PDE: " TARGET_FMT_lx "\n", 347 va1, pa, pde); 348 for (o = 0, va2 = va1; o < 64; o++, va2 += 4 * 1024) { 349 pde = mmu_probe(env, va2, 0); 350 if (pde) { 351 pa = cpu_get_phys_page_debug(cs, va2); 352 (*cpu_fprintf)(f, " VA: " TARGET_FMT_lx ", PA: " 353 TARGET_FMT_plx " PTE: " 354 TARGET_FMT_lx "\n", 355 va2, pa, pde); 356 } 357 } 358 } 359 } 360 } 361 } 362 } 363 364 /* Gdb expects all registers windows to be flushed in ram. This function handles 365 * reads (and only reads) in stack frames as if windows were flushed. We assume 366 * that the sparc ABI is followed. 367 */ 368 int sparc_cpu_memory_rw_debug(CPUState *cs, vaddr address, 369 uint8_t *buf, int len, bool is_write) 370 { 371 SPARCCPU *cpu = SPARC_CPU(cs); 372 CPUSPARCState *env = &cpu->env; 373 target_ulong addr = address; 374 int i; 375 int len1; 376 int cwp = env->cwp; 377 378 if (!is_write) { 379 for (i = 0; i < env->nwindows; i++) { 380 int off; 381 target_ulong fp = env->regbase[cwp * 16 + 22]; 382 383 /* Assume fp == 0 means end of frame. */ 384 if (fp == 0) { 385 break; 386 } 387 388 cwp = cpu_cwp_inc(env, cwp + 1); 389 390 /* Invalid window ? */ 391 if (env->wim & (1 << cwp)) { 392 break; 393 } 394 395 /* According to the ABI, the stack is growing downward. */ 396 if (addr + len < fp) { 397 break; 398 } 399 400 /* Not in this frame. */ 401 if (addr > fp + 64) { 402 continue; 403 } 404 405 /* Handle access before this window. */ 406 if (addr < fp) { 407 len1 = fp - addr; 408 if (cpu_memory_rw_debug(cs, addr, buf, len1, is_write) != 0) { 409 return -1; 410 } 411 addr += len1; 412 len -= len1; 413 buf += len1; 414 } 415 416 /* Access byte per byte to registers. Not very efficient but speed 417 * is not critical. 418 */ 419 off = addr - fp; 420 len1 = 64 - off; 421 422 if (len1 > len) { 423 len1 = len; 424 } 425 426 for (; len1; len1--) { 427 int reg = cwp * 16 + 8 + (off >> 2); 428 union { 429 uint32_t v; 430 uint8_t c[4]; 431 } u; 432 u.v = cpu_to_be32(env->regbase[reg]); 433 *buf++ = u.c[off & 3]; 434 addr++; 435 len--; 436 off++; 437 } 438 439 if (len == 0) { 440 return 0; 441 } 442 } 443 } 444 return cpu_memory_rw_debug(cs, addr, buf, len, is_write); 445 } 446 447 #else /* !TARGET_SPARC64 */ 448 449 /* 41 bit physical address space */ 450 static inline hwaddr ultrasparc_truncate_physical(uint64_t x) 451 { 452 return x & 0x1ffffffffffULL; 453 } 454 455 /* 456 * UltraSparc IIi I/DMMUs 457 */ 458 459 /* Returns true if TTE tag is valid and matches virtual address value 460 in context requires virtual address mask value calculated from TTE 461 entry size */ 462 static inline int ultrasparc_tag_match(SparcTLBEntry *tlb, 463 uint64_t address, uint64_t context, 464 hwaddr *physical) 465 { 466 uint64_t mask = -(8192ULL << 3 * TTE_PGSIZE(tlb->tte)); 467 468 /* valid, context match, virtual address match? */ 469 if (TTE_IS_VALID(tlb->tte) && 470 (TTE_IS_GLOBAL(tlb->tte) || tlb_compare_context(tlb, context)) 471 && compare_masked(address, tlb->tag, mask)) { 472 /* decode physical address */ 473 *physical = ((tlb->tte & mask) | (address & ~mask)) & 0x1ffffffe000ULL; 474 return 1; 475 } 476 477 return 0; 478 } 479 480 static int get_physical_address_data(CPUSPARCState *env, 481 hwaddr *physical, int *prot, 482 target_ulong address, int rw, int mmu_idx) 483 { 484 CPUState *cs = CPU(sparc_env_get_cpu(env)); 485 unsigned int i; 486 uint64_t context; 487 uint64_t sfsr = 0; 488 bool is_user = false; 489 490 switch (mmu_idx) { 491 case MMU_PHYS_IDX: 492 g_assert_not_reached(); 493 case MMU_USER_IDX: 494 is_user = true; 495 /* fallthru */ 496 case MMU_KERNEL_IDX: 497 context = env->dmmu.mmu_primary_context & 0x1fff; 498 sfsr |= SFSR_CT_PRIMARY; 499 break; 500 case MMU_USER_SECONDARY_IDX: 501 is_user = true; 502 /* fallthru */ 503 case MMU_KERNEL_SECONDARY_IDX: 504 context = env->dmmu.mmu_secondary_context & 0x1fff; 505 sfsr |= SFSR_CT_SECONDARY; 506 break; 507 case MMU_NUCLEUS_IDX: 508 sfsr |= SFSR_CT_NUCLEUS; 509 /* FALLTHRU */ 510 default: 511 context = 0; 512 break; 513 } 514 515 if (rw == 1) { 516 sfsr |= SFSR_WRITE_BIT; 517 } else if (rw == 4) { 518 sfsr |= SFSR_NF_BIT; 519 } 520 521 for (i = 0; i < 64; i++) { 522 /* ctx match, vaddr match, valid? */ 523 if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) { 524 int do_fault = 0; 525 526 /* access ok? */ 527 /* multiple bits in SFSR.FT may be set on TT_DFAULT */ 528 if (TTE_IS_PRIV(env->dtlb[i].tte) && is_user) { 529 do_fault = 1; 530 sfsr |= SFSR_FT_PRIV_BIT; /* privilege violation */ 531 trace_mmu_helper_dfault(address, context, mmu_idx, env->tl); 532 } 533 if (rw == 4) { 534 if (TTE_IS_SIDEEFFECT(env->dtlb[i].tte)) { 535 do_fault = 1; 536 sfsr |= SFSR_FT_NF_E_BIT; 537 } 538 } else { 539 if (TTE_IS_NFO(env->dtlb[i].tte)) { 540 do_fault = 1; 541 sfsr |= SFSR_FT_NFO_BIT; 542 } 543 } 544 545 if (do_fault) { 546 /* faults above are reported with TT_DFAULT. */ 547 cs->exception_index = TT_DFAULT; 548 } else if (!TTE_IS_W_OK(env->dtlb[i].tte) && (rw == 1)) { 549 do_fault = 1; 550 cs->exception_index = TT_DPROT; 551 552 trace_mmu_helper_dprot(address, context, mmu_idx, env->tl); 553 } 554 555 if (!do_fault) { 556 *prot = PAGE_READ; 557 if (TTE_IS_W_OK(env->dtlb[i].tte)) { 558 *prot |= PAGE_WRITE; 559 } 560 561 TTE_SET_USED(env->dtlb[i].tte); 562 563 return 0; 564 } 565 566 if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */ 567 sfsr |= SFSR_OW_BIT; /* overflow (not read before 568 another fault) */ 569 } 570 571 if (env->pstate & PS_PRIV) { 572 sfsr |= SFSR_PR_BIT; 573 } 574 575 /* FIXME: ASI field in SFSR must be set */ 576 env->dmmu.sfsr = sfsr | SFSR_VALID_BIT; 577 578 env->dmmu.sfar = address; /* Fault address register */ 579 580 env->dmmu.tag_access = (address & ~0x1fffULL) | context; 581 582 return 1; 583 } 584 } 585 586 trace_mmu_helper_dmiss(address, context); 587 588 /* 589 * On MMU misses: 590 * - UltraSPARC IIi: SFSR and SFAR unmodified 591 * - JPS1: SFAR updated and some fields of SFSR updated 592 */ 593 env->dmmu.tag_access = (address & ~0x1fffULL) | context; 594 cs->exception_index = TT_DMISS; 595 return 1; 596 } 597 598 static int get_physical_address_code(CPUSPARCState *env, 599 hwaddr *physical, int *prot, 600 target_ulong address, int mmu_idx) 601 { 602 CPUState *cs = CPU(sparc_env_get_cpu(env)); 603 unsigned int i; 604 uint64_t context; 605 bool is_user = false; 606 607 switch (mmu_idx) { 608 case MMU_PHYS_IDX: 609 case MMU_USER_SECONDARY_IDX: 610 case MMU_KERNEL_SECONDARY_IDX: 611 g_assert_not_reached(); 612 case MMU_USER_IDX: 613 is_user = true; 614 /* fallthru */ 615 case MMU_KERNEL_IDX: 616 context = env->dmmu.mmu_primary_context & 0x1fff; 617 break; 618 default: 619 context = 0; 620 break; 621 } 622 623 if (env->tl == 0) { 624 /* PRIMARY context */ 625 context = env->dmmu.mmu_primary_context & 0x1fff; 626 } else { 627 /* NUCLEUS context */ 628 context = 0; 629 } 630 631 for (i = 0; i < 64; i++) { 632 /* ctx match, vaddr match, valid? */ 633 if (ultrasparc_tag_match(&env->itlb[i], 634 address, context, physical)) { 635 /* access ok? */ 636 if (TTE_IS_PRIV(env->itlb[i].tte) && is_user) { 637 /* Fault status register */ 638 if (env->immu.sfsr & SFSR_VALID_BIT) { 639 env->immu.sfsr = SFSR_OW_BIT; /* overflow (not read before 640 another fault) */ 641 } else { 642 env->immu.sfsr = 0; 643 } 644 if (env->pstate & PS_PRIV) { 645 env->immu.sfsr |= SFSR_PR_BIT; 646 } 647 if (env->tl > 0) { 648 env->immu.sfsr |= SFSR_CT_NUCLEUS; 649 } 650 651 /* FIXME: ASI field in SFSR must be set */ 652 env->immu.sfsr |= SFSR_FT_PRIV_BIT | SFSR_VALID_BIT; 653 cs->exception_index = TT_TFAULT; 654 655 env->immu.tag_access = (address & ~0x1fffULL) | context; 656 657 trace_mmu_helper_tfault(address, context); 658 659 return 1; 660 } 661 *prot = PAGE_EXEC; 662 TTE_SET_USED(env->itlb[i].tte); 663 return 0; 664 } 665 } 666 667 trace_mmu_helper_tmiss(address, context); 668 669 /* Context is stored in DMMU (dmmuregs[1]) also for IMMU */ 670 env->immu.tag_access = (address & ~0x1fffULL) | context; 671 cs->exception_index = TT_TMISS; 672 return 1; 673 } 674 675 static int get_physical_address(CPUSPARCState *env, hwaddr *physical, 676 int *prot, int *access_index, 677 target_ulong address, int rw, int mmu_idx, 678 target_ulong *page_size) 679 { 680 /* ??? We treat everything as a small page, then explicitly flush 681 everything when an entry is evicted. */ 682 *page_size = TARGET_PAGE_SIZE; 683 684 /* safety net to catch wrong softmmu index use from dynamic code */ 685 if (env->tl > 0 && mmu_idx != MMU_NUCLEUS_IDX) { 686 if (rw == 2) { 687 trace_mmu_helper_get_phys_addr_code(env->tl, mmu_idx, 688 env->dmmu.mmu_primary_context, 689 env->dmmu.mmu_secondary_context, 690 address); 691 } else { 692 trace_mmu_helper_get_phys_addr_data(env->tl, mmu_idx, 693 env->dmmu.mmu_primary_context, 694 env->dmmu.mmu_secondary_context, 695 address); 696 } 697 } 698 699 if (mmu_idx == MMU_PHYS_IDX) { 700 *physical = ultrasparc_truncate_physical(address); 701 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 702 return 0; 703 } 704 705 if (rw == 2) { 706 return get_physical_address_code(env, physical, prot, address, 707 mmu_idx); 708 } else { 709 return get_physical_address_data(env, physical, prot, address, rw, 710 mmu_idx); 711 } 712 } 713 714 /* Perform address translation */ 715 int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw, 716 int mmu_idx) 717 { 718 SPARCCPU *cpu = SPARC_CPU(cs); 719 CPUSPARCState *env = &cpu->env; 720 target_ulong vaddr; 721 hwaddr paddr; 722 target_ulong page_size; 723 int error_code = 0, prot, access_index; 724 725 address &= TARGET_PAGE_MASK; 726 error_code = get_physical_address(env, &paddr, &prot, &access_index, 727 address, rw, mmu_idx, &page_size); 728 if (error_code == 0) { 729 vaddr = address; 730 731 trace_mmu_helper_mmu_fault(address, paddr, mmu_idx, env->tl, 732 env->dmmu.mmu_primary_context, 733 env->dmmu.mmu_secondary_context); 734 735 tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size); 736 return 0; 737 } 738 /* XXX */ 739 return 1; 740 } 741 742 void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUSPARCState *env) 743 { 744 unsigned int i; 745 const char *mask; 746 747 (*cpu_fprintf)(f, "MMU contexts: Primary: %" PRId64 ", Secondary: %" 748 PRId64 "\n", 749 env->dmmu.mmu_primary_context, 750 env->dmmu.mmu_secondary_context); 751 (*cpu_fprintf)(f, "DMMU Tag Access: %" PRIx64 ", TSB Tag Target: %" PRIx64 752 "\n", env->dmmu.tag_access, env->dmmu.tsb_tag_target); 753 if ((env->lsu & DMMU_E) == 0) { 754 (*cpu_fprintf)(f, "DMMU disabled\n"); 755 } else { 756 (*cpu_fprintf)(f, "DMMU dump\n"); 757 for (i = 0; i < 64; i++) { 758 switch (TTE_PGSIZE(env->dtlb[i].tte)) { 759 default: 760 case 0x0: 761 mask = " 8k"; 762 break; 763 case 0x1: 764 mask = " 64k"; 765 break; 766 case 0x2: 767 mask = "512k"; 768 break; 769 case 0x3: 770 mask = " 4M"; 771 break; 772 } 773 if (TTE_IS_VALID(env->dtlb[i].tte)) { 774 (*cpu_fprintf)(f, "[%02u] VA: %" PRIx64 ", PA: %llx" 775 ", %s, %s, %s, %s, ctx %" PRId64 " %s\n", 776 i, 777 env->dtlb[i].tag & (uint64_t)~0x1fffULL, 778 TTE_PA(env->dtlb[i].tte), 779 mask, 780 TTE_IS_PRIV(env->dtlb[i].tte) ? "priv" : "user", 781 TTE_IS_W_OK(env->dtlb[i].tte) ? "RW" : "RO", 782 TTE_IS_LOCKED(env->dtlb[i].tte) ? 783 "locked" : "unlocked", 784 env->dtlb[i].tag & (uint64_t)0x1fffULL, 785 TTE_IS_GLOBAL(env->dtlb[i].tte) ? 786 "global" : "local"); 787 } 788 } 789 } 790 if ((env->lsu & IMMU_E) == 0) { 791 (*cpu_fprintf)(f, "IMMU disabled\n"); 792 } else { 793 (*cpu_fprintf)(f, "IMMU dump\n"); 794 for (i = 0; i < 64; i++) { 795 switch (TTE_PGSIZE(env->itlb[i].tte)) { 796 default: 797 case 0x0: 798 mask = " 8k"; 799 break; 800 case 0x1: 801 mask = " 64k"; 802 break; 803 case 0x2: 804 mask = "512k"; 805 break; 806 case 0x3: 807 mask = " 4M"; 808 break; 809 } 810 if (TTE_IS_VALID(env->itlb[i].tte)) { 811 (*cpu_fprintf)(f, "[%02u] VA: %" PRIx64 ", PA: %llx" 812 ", %s, %s, %s, ctx %" PRId64 " %s\n", 813 i, 814 env->itlb[i].tag & (uint64_t)~0x1fffULL, 815 TTE_PA(env->itlb[i].tte), 816 mask, 817 TTE_IS_PRIV(env->itlb[i].tte) ? "priv" : "user", 818 TTE_IS_LOCKED(env->itlb[i].tte) ? 819 "locked" : "unlocked", 820 env->itlb[i].tag & (uint64_t)0x1fffULL, 821 TTE_IS_GLOBAL(env->itlb[i].tte) ? 822 "global" : "local"); 823 } 824 } 825 } 826 } 827 828 #endif /* TARGET_SPARC64 */ 829 830 static int cpu_sparc_get_phys_page(CPUSPARCState *env, hwaddr *phys, 831 target_ulong addr, int rw, int mmu_idx) 832 { 833 target_ulong page_size; 834 int prot, access_index; 835 836 return get_physical_address(env, phys, &prot, &access_index, addr, rw, 837 mmu_idx, &page_size); 838 } 839 840 #if defined(TARGET_SPARC64) 841 hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr, 842 int mmu_idx) 843 { 844 hwaddr phys_addr; 845 846 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 4, mmu_idx) != 0) { 847 return -1; 848 } 849 return phys_addr; 850 } 851 #endif 852 853 hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 854 { 855 SPARCCPU *cpu = SPARC_CPU(cs); 856 CPUSPARCState *env = &cpu->env; 857 hwaddr phys_addr; 858 int mmu_idx = cpu_mmu_index(env, false); 859 860 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) { 861 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 0, mmu_idx) != 0) { 862 return -1; 863 } 864 } 865 return phys_addr; 866 } 867 #endif 868