1 /* 2 * Sparc MMU helpers 3 * 4 * Copyright (c) 2003-2005 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "exec/exec-all.h" 23 #include "qemu/qemu-print.h" 24 #include "trace.h" 25 26 /* Sparc MMU emulation */ 27 28 #if defined(CONFIG_USER_ONLY) 29 30 bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 31 MMUAccessType access_type, int mmu_idx, 32 bool probe, uintptr_t retaddr) 33 { 34 SPARCCPU *cpu = SPARC_CPU(cs); 35 CPUSPARCState *env = &cpu->env; 36 37 if (access_type == MMU_INST_FETCH) { 38 cs->exception_index = TT_TFAULT; 39 } else { 40 cs->exception_index = TT_DFAULT; 41 #ifdef TARGET_SPARC64 42 env->dmmu.mmuregs[4] = address; 43 #else 44 env->mmuregs[4] = address; 45 #endif 46 } 47 cpu_loop_exit_restore(cs, retaddr); 48 } 49 50 #else 51 52 #ifndef TARGET_SPARC64 53 /* 54 * Sparc V8 Reference MMU (SRMMU) 55 */ 56 static const int access_table[8][8] = { 57 { 0, 0, 0, 0, 8, 0, 12, 12 }, 58 { 0, 0, 0, 0, 8, 0, 0, 0 }, 59 { 8, 8, 0, 0, 0, 8, 12, 12 }, 60 { 8, 8, 0, 0, 0, 8, 0, 0 }, 61 { 8, 0, 8, 0, 8, 8, 12, 12 }, 62 { 8, 0, 8, 0, 8, 0, 8, 0 }, 63 { 8, 8, 8, 0, 8, 8, 12, 12 }, 64 { 8, 8, 8, 0, 8, 8, 8, 0 } 65 }; 66 67 static const int perm_table[2][8] = { 68 { 69 PAGE_READ, 70 PAGE_READ | PAGE_WRITE, 71 PAGE_READ | PAGE_EXEC, 72 PAGE_READ | PAGE_WRITE | PAGE_EXEC, 73 PAGE_EXEC, 74 PAGE_READ | PAGE_WRITE, 75 PAGE_READ | PAGE_EXEC, 76 PAGE_READ | PAGE_WRITE | PAGE_EXEC 77 }, 78 { 79 PAGE_READ, 80 PAGE_READ | PAGE_WRITE, 81 PAGE_READ | PAGE_EXEC, 82 PAGE_READ | PAGE_WRITE | PAGE_EXEC, 83 PAGE_EXEC, 84 PAGE_READ, 85 0, 86 0, 87 } 88 }; 89 90 static int get_physical_address(CPUSPARCState *env, hwaddr *physical, 91 int *prot, int *access_index, 92 target_ulong address, int rw, int mmu_idx, 93 target_ulong *page_size) 94 { 95 int access_perms = 0; 96 hwaddr pde_ptr; 97 uint32_t pde; 98 int error_code = 0, is_dirty, is_user; 99 unsigned long page_offset; 100 CPUState *cs = CPU(sparc_env_get_cpu(env)); 101 102 is_user = mmu_idx == MMU_USER_IDX; 103 104 if (mmu_idx == MMU_PHYS_IDX) { 105 *page_size = TARGET_PAGE_SIZE; 106 /* Boot mode: instruction fetches are taken from PROM */ 107 if (rw == 2 && (env->mmuregs[0] & env->def.mmu_bm)) { 108 *physical = env->prom_addr | (address & 0x7ffffULL); 109 *prot = PAGE_READ | PAGE_EXEC; 110 return 0; 111 } 112 *physical = address; 113 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 114 return 0; 115 } 116 117 *access_index = ((rw & 1) << 2) | (rw & 2) | (is_user ? 0 : 1); 118 *physical = 0xffffffffffff0000ULL; 119 120 /* SPARC reference MMU table walk: Context table->L1->L2->PTE */ 121 /* Context base + context number */ 122 pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2); 123 pde = ldl_phys(cs->as, pde_ptr); 124 125 /* Ctx pde */ 126 switch (pde & PTE_ENTRYTYPE_MASK) { 127 default: 128 case 0: /* Invalid */ 129 return 1 << 2; 130 case 2: /* L0 PTE, maybe should not happen? */ 131 case 3: /* Reserved */ 132 return 4 << 2; 133 case 1: /* L0 PDE */ 134 pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); 135 pde = ldl_phys(cs->as, pde_ptr); 136 137 switch (pde & PTE_ENTRYTYPE_MASK) { 138 default: 139 case 0: /* Invalid */ 140 return (1 << 8) | (1 << 2); 141 case 3: /* Reserved */ 142 return (1 << 8) | (4 << 2); 143 case 1: /* L1 PDE */ 144 pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); 145 pde = ldl_phys(cs->as, pde_ptr); 146 147 switch (pde & PTE_ENTRYTYPE_MASK) { 148 default: 149 case 0: /* Invalid */ 150 return (2 << 8) | (1 << 2); 151 case 3: /* Reserved */ 152 return (2 << 8) | (4 << 2); 153 case 1: /* L2 PDE */ 154 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); 155 pde = ldl_phys(cs->as, pde_ptr); 156 157 switch (pde & PTE_ENTRYTYPE_MASK) { 158 default: 159 case 0: /* Invalid */ 160 return (3 << 8) | (1 << 2); 161 case 1: /* PDE, should not happen */ 162 case 3: /* Reserved */ 163 return (3 << 8) | (4 << 2); 164 case 2: /* L3 PTE */ 165 page_offset = 0; 166 } 167 *page_size = TARGET_PAGE_SIZE; 168 break; 169 case 2: /* L2 PTE */ 170 page_offset = address & 0x3f000; 171 *page_size = 0x40000; 172 } 173 break; 174 case 2: /* L1 PTE */ 175 page_offset = address & 0xfff000; 176 *page_size = 0x1000000; 177 } 178 } 179 180 /* check access */ 181 access_perms = (pde & PTE_ACCESS_MASK) >> PTE_ACCESS_SHIFT; 182 error_code = access_table[*access_index][access_perms]; 183 if (error_code && !((env->mmuregs[0] & MMU_NF) && is_user)) { 184 return error_code; 185 } 186 187 /* update page modified and dirty bits */ 188 is_dirty = (rw & 1) && !(pde & PG_MODIFIED_MASK); 189 if (!(pde & PG_ACCESSED_MASK) || is_dirty) { 190 pde |= PG_ACCESSED_MASK; 191 if (is_dirty) { 192 pde |= PG_MODIFIED_MASK; 193 } 194 stl_phys_notdirty(cs->as, pde_ptr, pde); 195 } 196 197 /* the page can be put in the TLB */ 198 *prot = perm_table[is_user][access_perms]; 199 if (!(pde & PG_MODIFIED_MASK)) { 200 /* only set write access if already dirty... otherwise wait 201 for dirty access */ 202 *prot &= ~PAGE_WRITE; 203 } 204 205 /* Even if large ptes, we map only one 4KB page in the cache to 206 avoid filling it too fast */ 207 *physical = ((hwaddr)(pde & PTE_ADDR_MASK) << 4) + page_offset; 208 return error_code; 209 } 210 211 /* Perform address translation */ 212 bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 213 MMUAccessType access_type, int mmu_idx, 214 bool probe, uintptr_t retaddr) 215 { 216 SPARCCPU *cpu = SPARC_CPU(cs); 217 CPUSPARCState *env = &cpu->env; 218 hwaddr paddr; 219 target_ulong vaddr; 220 target_ulong page_size; 221 int error_code = 0, prot, access_index; 222 223 /* 224 * TODO: If we ever need tlb_vaddr_to_host for this target, 225 * then we must figure out how to manipulate FSR and FAR 226 * when both MMU_NF and probe are set. In the meantime, 227 * do not support this use case. 228 */ 229 assert(!probe); 230 231 address &= TARGET_PAGE_MASK; 232 error_code = get_physical_address(env, &paddr, &prot, &access_index, 233 address, access_type, 234 mmu_idx, &page_size); 235 vaddr = address; 236 if (likely(error_code == 0)) { 237 qemu_log_mask(CPU_LOG_MMU, 238 "Translate at %" VADDR_PRIx " -> " 239 TARGET_FMT_plx ", vaddr " TARGET_FMT_lx "\n", 240 address, paddr, vaddr); 241 tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size); 242 return true; 243 } 244 245 if (env->mmuregs[3]) { /* Fault status register */ 246 env->mmuregs[3] = 1; /* overflow (not read before another fault) */ 247 } 248 env->mmuregs[3] |= (access_index << 5) | error_code | 2; 249 env->mmuregs[4] = address; /* Fault address register */ 250 251 if ((env->mmuregs[0] & MMU_NF) || env->psret == 0) { 252 /* No fault mode: if a mapping is available, just override 253 permissions. If no mapping is available, redirect accesses to 254 neverland. Fake/overridden mappings will be flushed when 255 switching to normal mode. */ 256 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 257 tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE); 258 return true; 259 } else { 260 if (access_type == MMU_INST_FETCH) { 261 cs->exception_index = TT_TFAULT; 262 } else { 263 cs->exception_index = TT_DFAULT; 264 } 265 cpu_loop_exit_restore(cs, retaddr); 266 } 267 } 268 269 target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev) 270 { 271 CPUState *cs = CPU(sparc_env_get_cpu(env)); 272 hwaddr pde_ptr; 273 uint32_t pde; 274 275 /* Context base + context number */ 276 pde_ptr = (hwaddr)(env->mmuregs[1] << 4) + 277 (env->mmuregs[2] << 2); 278 pde = ldl_phys(cs->as, pde_ptr); 279 280 switch (pde & PTE_ENTRYTYPE_MASK) { 281 default: 282 case 0: /* Invalid */ 283 case 2: /* PTE, maybe should not happen? */ 284 case 3: /* Reserved */ 285 return 0; 286 case 1: /* L1 PDE */ 287 if (mmulev == 3) { 288 return pde; 289 } 290 pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); 291 pde = ldl_phys(cs->as, pde_ptr); 292 293 switch (pde & PTE_ENTRYTYPE_MASK) { 294 default: 295 case 0: /* Invalid */ 296 case 3: /* Reserved */ 297 return 0; 298 case 2: /* L1 PTE */ 299 return pde; 300 case 1: /* L2 PDE */ 301 if (mmulev == 2) { 302 return pde; 303 } 304 pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); 305 pde = ldl_phys(cs->as, pde_ptr); 306 307 switch (pde & PTE_ENTRYTYPE_MASK) { 308 default: 309 case 0: /* Invalid */ 310 case 3: /* Reserved */ 311 return 0; 312 case 2: /* L2 PTE */ 313 return pde; 314 case 1: /* L3 PDE */ 315 if (mmulev == 1) { 316 return pde; 317 } 318 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); 319 pde = ldl_phys(cs->as, pde_ptr); 320 321 switch (pde & PTE_ENTRYTYPE_MASK) { 322 default: 323 case 0: /* Invalid */ 324 case 1: /* PDE, should not happen */ 325 case 3: /* Reserved */ 326 return 0; 327 case 2: /* L3 PTE */ 328 return pde; 329 } 330 } 331 } 332 } 333 return 0; 334 } 335 336 void dump_mmu(CPUSPARCState *env) 337 { 338 CPUState *cs = CPU(sparc_env_get_cpu(env)); 339 target_ulong va, va1, va2; 340 unsigned int n, m, o; 341 hwaddr pde_ptr, pa; 342 uint32_t pde; 343 344 pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2); 345 pde = ldl_phys(cs->as, pde_ptr); 346 qemu_printf("Root ptr: " TARGET_FMT_plx ", ctx: %d\n", 347 (hwaddr)env->mmuregs[1] << 4, env->mmuregs[2]); 348 for (n = 0, va = 0; n < 256; n++, va += 16 * 1024 * 1024) { 349 pde = mmu_probe(env, va, 2); 350 if (pde) { 351 pa = cpu_get_phys_page_debug(cs, va); 352 qemu_printf("VA: " TARGET_FMT_lx ", PA: " TARGET_FMT_plx 353 " PDE: " TARGET_FMT_lx "\n", va, pa, pde); 354 for (m = 0, va1 = va; m < 64; m++, va1 += 256 * 1024) { 355 pde = mmu_probe(env, va1, 1); 356 if (pde) { 357 pa = cpu_get_phys_page_debug(cs, va1); 358 qemu_printf(" VA: " TARGET_FMT_lx ", PA: " 359 TARGET_FMT_plx " PDE: " TARGET_FMT_lx "\n", 360 va1, pa, pde); 361 for (o = 0, va2 = va1; o < 64; o++, va2 += 4 * 1024) { 362 pde = mmu_probe(env, va2, 0); 363 if (pde) { 364 pa = cpu_get_phys_page_debug(cs, va2); 365 qemu_printf(" VA: " TARGET_FMT_lx ", PA: " 366 TARGET_FMT_plx " PTE: " 367 TARGET_FMT_lx "\n", 368 va2, pa, pde); 369 } 370 } 371 } 372 } 373 } 374 } 375 } 376 377 /* Gdb expects all registers windows to be flushed in ram. This function handles 378 * reads (and only reads) in stack frames as if windows were flushed. We assume 379 * that the sparc ABI is followed. 380 */ 381 int sparc_cpu_memory_rw_debug(CPUState *cs, vaddr address, 382 uint8_t *buf, int len, bool is_write) 383 { 384 SPARCCPU *cpu = SPARC_CPU(cs); 385 CPUSPARCState *env = &cpu->env; 386 target_ulong addr = address; 387 int i; 388 int len1; 389 int cwp = env->cwp; 390 391 if (!is_write) { 392 for (i = 0; i < env->nwindows; i++) { 393 int off; 394 target_ulong fp = env->regbase[cwp * 16 + 22]; 395 396 /* Assume fp == 0 means end of frame. */ 397 if (fp == 0) { 398 break; 399 } 400 401 cwp = cpu_cwp_inc(env, cwp + 1); 402 403 /* Invalid window ? */ 404 if (env->wim & (1 << cwp)) { 405 break; 406 } 407 408 /* According to the ABI, the stack is growing downward. */ 409 if (addr + len < fp) { 410 break; 411 } 412 413 /* Not in this frame. */ 414 if (addr > fp + 64) { 415 continue; 416 } 417 418 /* Handle access before this window. */ 419 if (addr < fp) { 420 len1 = fp - addr; 421 if (cpu_memory_rw_debug(cs, addr, buf, len1, is_write) != 0) { 422 return -1; 423 } 424 addr += len1; 425 len -= len1; 426 buf += len1; 427 } 428 429 /* Access byte per byte to registers. Not very efficient but speed 430 * is not critical. 431 */ 432 off = addr - fp; 433 len1 = 64 - off; 434 435 if (len1 > len) { 436 len1 = len; 437 } 438 439 for (; len1; len1--) { 440 int reg = cwp * 16 + 8 + (off >> 2); 441 union { 442 uint32_t v; 443 uint8_t c[4]; 444 } u; 445 u.v = cpu_to_be32(env->regbase[reg]); 446 *buf++ = u.c[off & 3]; 447 addr++; 448 len--; 449 off++; 450 } 451 452 if (len == 0) { 453 return 0; 454 } 455 } 456 } 457 return cpu_memory_rw_debug(cs, addr, buf, len, is_write); 458 } 459 460 #else /* !TARGET_SPARC64 */ 461 462 /* 41 bit physical address space */ 463 static inline hwaddr ultrasparc_truncate_physical(uint64_t x) 464 { 465 return x & 0x1ffffffffffULL; 466 } 467 468 /* 469 * UltraSparc IIi I/DMMUs 470 */ 471 472 /* Returns true if TTE tag is valid and matches virtual address value 473 in context requires virtual address mask value calculated from TTE 474 entry size */ 475 static inline int ultrasparc_tag_match(SparcTLBEntry *tlb, 476 uint64_t address, uint64_t context, 477 hwaddr *physical) 478 { 479 uint64_t mask = -(8192ULL << 3 * TTE_PGSIZE(tlb->tte)); 480 481 /* valid, context match, virtual address match? */ 482 if (TTE_IS_VALID(tlb->tte) && 483 (TTE_IS_GLOBAL(tlb->tte) || tlb_compare_context(tlb, context)) 484 && compare_masked(address, tlb->tag, mask)) { 485 /* decode physical address */ 486 *physical = ((tlb->tte & mask) | (address & ~mask)) & 0x1ffffffe000ULL; 487 return 1; 488 } 489 490 return 0; 491 } 492 493 static int get_physical_address_data(CPUSPARCState *env, 494 hwaddr *physical, int *prot, 495 target_ulong address, int rw, int mmu_idx) 496 { 497 CPUState *cs = CPU(sparc_env_get_cpu(env)); 498 unsigned int i; 499 uint64_t context; 500 uint64_t sfsr = 0; 501 bool is_user = false; 502 503 switch (mmu_idx) { 504 case MMU_PHYS_IDX: 505 g_assert_not_reached(); 506 case MMU_USER_IDX: 507 is_user = true; 508 /* fallthru */ 509 case MMU_KERNEL_IDX: 510 context = env->dmmu.mmu_primary_context & 0x1fff; 511 sfsr |= SFSR_CT_PRIMARY; 512 break; 513 case MMU_USER_SECONDARY_IDX: 514 is_user = true; 515 /* fallthru */ 516 case MMU_KERNEL_SECONDARY_IDX: 517 context = env->dmmu.mmu_secondary_context & 0x1fff; 518 sfsr |= SFSR_CT_SECONDARY; 519 break; 520 case MMU_NUCLEUS_IDX: 521 sfsr |= SFSR_CT_NUCLEUS; 522 /* FALLTHRU */ 523 default: 524 context = 0; 525 break; 526 } 527 528 if (rw == 1) { 529 sfsr |= SFSR_WRITE_BIT; 530 } else if (rw == 4) { 531 sfsr |= SFSR_NF_BIT; 532 } 533 534 for (i = 0; i < 64; i++) { 535 /* ctx match, vaddr match, valid? */ 536 if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) { 537 int do_fault = 0; 538 539 /* access ok? */ 540 /* multiple bits in SFSR.FT may be set on TT_DFAULT */ 541 if (TTE_IS_PRIV(env->dtlb[i].tte) && is_user) { 542 do_fault = 1; 543 sfsr |= SFSR_FT_PRIV_BIT; /* privilege violation */ 544 trace_mmu_helper_dfault(address, context, mmu_idx, env->tl); 545 } 546 if (rw == 4) { 547 if (TTE_IS_SIDEEFFECT(env->dtlb[i].tte)) { 548 do_fault = 1; 549 sfsr |= SFSR_FT_NF_E_BIT; 550 } 551 } else { 552 if (TTE_IS_NFO(env->dtlb[i].tte)) { 553 do_fault = 1; 554 sfsr |= SFSR_FT_NFO_BIT; 555 } 556 } 557 558 if (do_fault) { 559 /* faults above are reported with TT_DFAULT. */ 560 cs->exception_index = TT_DFAULT; 561 } else if (!TTE_IS_W_OK(env->dtlb[i].tte) && (rw == 1)) { 562 do_fault = 1; 563 cs->exception_index = TT_DPROT; 564 565 trace_mmu_helper_dprot(address, context, mmu_idx, env->tl); 566 } 567 568 if (!do_fault) { 569 *prot = PAGE_READ; 570 if (TTE_IS_W_OK(env->dtlb[i].tte)) { 571 *prot |= PAGE_WRITE; 572 } 573 574 TTE_SET_USED(env->dtlb[i].tte); 575 576 return 0; 577 } 578 579 if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */ 580 sfsr |= SFSR_OW_BIT; /* overflow (not read before 581 another fault) */ 582 } 583 584 if (env->pstate & PS_PRIV) { 585 sfsr |= SFSR_PR_BIT; 586 } 587 588 /* FIXME: ASI field in SFSR must be set */ 589 env->dmmu.sfsr = sfsr | SFSR_VALID_BIT; 590 591 env->dmmu.sfar = address; /* Fault address register */ 592 593 env->dmmu.tag_access = (address & ~0x1fffULL) | context; 594 595 return 1; 596 } 597 } 598 599 trace_mmu_helper_dmiss(address, context); 600 601 /* 602 * On MMU misses: 603 * - UltraSPARC IIi: SFSR and SFAR unmodified 604 * - JPS1: SFAR updated and some fields of SFSR updated 605 */ 606 env->dmmu.tag_access = (address & ~0x1fffULL) | context; 607 cs->exception_index = TT_DMISS; 608 return 1; 609 } 610 611 static int get_physical_address_code(CPUSPARCState *env, 612 hwaddr *physical, int *prot, 613 target_ulong address, int mmu_idx) 614 { 615 CPUState *cs = CPU(sparc_env_get_cpu(env)); 616 unsigned int i; 617 uint64_t context; 618 bool is_user = false; 619 620 switch (mmu_idx) { 621 case MMU_PHYS_IDX: 622 case MMU_USER_SECONDARY_IDX: 623 case MMU_KERNEL_SECONDARY_IDX: 624 g_assert_not_reached(); 625 case MMU_USER_IDX: 626 is_user = true; 627 /* fallthru */ 628 case MMU_KERNEL_IDX: 629 context = env->dmmu.mmu_primary_context & 0x1fff; 630 break; 631 default: 632 context = 0; 633 break; 634 } 635 636 if (env->tl == 0) { 637 /* PRIMARY context */ 638 context = env->dmmu.mmu_primary_context & 0x1fff; 639 } else { 640 /* NUCLEUS context */ 641 context = 0; 642 } 643 644 for (i = 0; i < 64; i++) { 645 /* ctx match, vaddr match, valid? */ 646 if (ultrasparc_tag_match(&env->itlb[i], 647 address, context, physical)) { 648 /* access ok? */ 649 if (TTE_IS_PRIV(env->itlb[i].tte) && is_user) { 650 /* Fault status register */ 651 if (env->immu.sfsr & SFSR_VALID_BIT) { 652 env->immu.sfsr = SFSR_OW_BIT; /* overflow (not read before 653 another fault) */ 654 } else { 655 env->immu.sfsr = 0; 656 } 657 if (env->pstate & PS_PRIV) { 658 env->immu.sfsr |= SFSR_PR_BIT; 659 } 660 if (env->tl > 0) { 661 env->immu.sfsr |= SFSR_CT_NUCLEUS; 662 } 663 664 /* FIXME: ASI field in SFSR must be set */ 665 env->immu.sfsr |= SFSR_FT_PRIV_BIT | SFSR_VALID_BIT; 666 cs->exception_index = TT_TFAULT; 667 668 env->immu.tag_access = (address & ~0x1fffULL) | context; 669 670 trace_mmu_helper_tfault(address, context); 671 672 return 1; 673 } 674 *prot = PAGE_EXEC; 675 TTE_SET_USED(env->itlb[i].tte); 676 return 0; 677 } 678 } 679 680 trace_mmu_helper_tmiss(address, context); 681 682 /* Context is stored in DMMU (dmmuregs[1]) also for IMMU */ 683 env->immu.tag_access = (address & ~0x1fffULL) | context; 684 cs->exception_index = TT_TMISS; 685 return 1; 686 } 687 688 static int get_physical_address(CPUSPARCState *env, hwaddr *physical, 689 int *prot, int *access_index, 690 target_ulong address, int rw, int mmu_idx, 691 target_ulong *page_size) 692 { 693 /* ??? We treat everything as a small page, then explicitly flush 694 everything when an entry is evicted. */ 695 *page_size = TARGET_PAGE_SIZE; 696 697 /* safety net to catch wrong softmmu index use from dynamic code */ 698 if (env->tl > 0 && mmu_idx != MMU_NUCLEUS_IDX) { 699 if (rw == 2) { 700 trace_mmu_helper_get_phys_addr_code(env->tl, mmu_idx, 701 env->dmmu.mmu_primary_context, 702 env->dmmu.mmu_secondary_context, 703 address); 704 } else { 705 trace_mmu_helper_get_phys_addr_data(env->tl, mmu_idx, 706 env->dmmu.mmu_primary_context, 707 env->dmmu.mmu_secondary_context, 708 address); 709 } 710 } 711 712 if (mmu_idx == MMU_PHYS_IDX) { 713 *physical = ultrasparc_truncate_physical(address); 714 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 715 return 0; 716 } 717 718 if (rw == 2) { 719 return get_physical_address_code(env, physical, prot, address, 720 mmu_idx); 721 } else { 722 return get_physical_address_data(env, physical, prot, address, rw, 723 mmu_idx); 724 } 725 } 726 727 /* Perform address translation */ 728 bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 729 MMUAccessType access_type, int mmu_idx, 730 bool probe, uintptr_t retaddr) 731 { 732 SPARCCPU *cpu = SPARC_CPU(cs); 733 CPUSPARCState *env = &cpu->env; 734 target_ulong vaddr; 735 hwaddr paddr; 736 target_ulong page_size; 737 int error_code = 0, prot, access_index; 738 739 address &= TARGET_PAGE_MASK; 740 error_code = get_physical_address(env, &paddr, &prot, &access_index, 741 address, access_type, 742 mmu_idx, &page_size); 743 if (likely(error_code == 0)) { 744 vaddr = address; 745 746 trace_mmu_helper_mmu_fault(address, paddr, mmu_idx, env->tl, 747 env->dmmu.mmu_primary_context, 748 env->dmmu.mmu_secondary_context); 749 750 tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size); 751 return true; 752 } 753 if (probe) { 754 return false; 755 } 756 cpu_loop_exit_restore(cs, retaddr); 757 } 758 759 void dump_mmu(CPUSPARCState *env) 760 { 761 unsigned int i; 762 const char *mask; 763 764 qemu_printf("MMU contexts: Primary: %" PRId64 ", Secondary: %" 765 PRId64 "\n", 766 env->dmmu.mmu_primary_context, 767 env->dmmu.mmu_secondary_context); 768 qemu_printf("DMMU Tag Access: %" PRIx64 ", TSB Tag Target: %" PRIx64 769 "\n", env->dmmu.tag_access, env->dmmu.tsb_tag_target); 770 if ((env->lsu & DMMU_E) == 0) { 771 qemu_printf("DMMU disabled\n"); 772 } else { 773 qemu_printf("DMMU dump\n"); 774 for (i = 0; i < 64; i++) { 775 switch (TTE_PGSIZE(env->dtlb[i].tte)) { 776 default: 777 case 0x0: 778 mask = " 8k"; 779 break; 780 case 0x1: 781 mask = " 64k"; 782 break; 783 case 0x2: 784 mask = "512k"; 785 break; 786 case 0x3: 787 mask = " 4M"; 788 break; 789 } 790 if (TTE_IS_VALID(env->dtlb[i].tte)) { 791 qemu_printf("[%02u] VA: %" PRIx64 ", PA: %llx" 792 ", %s, %s, %s, %s, ctx %" PRId64 " %s\n", 793 i, 794 env->dtlb[i].tag & (uint64_t)~0x1fffULL, 795 TTE_PA(env->dtlb[i].tte), 796 mask, 797 TTE_IS_PRIV(env->dtlb[i].tte) ? "priv" : "user", 798 TTE_IS_W_OK(env->dtlb[i].tte) ? "RW" : "RO", 799 TTE_IS_LOCKED(env->dtlb[i].tte) ? 800 "locked" : "unlocked", 801 env->dtlb[i].tag & (uint64_t)0x1fffULL, 802 TTE_IS_GLOBAL(env->dtlb[i].tte) ? 803 "global" : "local"); 804 } 805 } 806 } 807 if ((env->lsu & IMMU_E) == 0) { 808 qemu_printf("IMMU disabled\n"); 809 } else { 810 qemu_printf("IMMU dump\n"); 811 for (i = 0; i < 64; i++) { 812 switch (TTE_PGSIZE(env->itlb[i].tte)) { 813 default: 814 case 0x0: 815 mask = " 8k"; 816 break; 817 case 0x1: 818 mask = " 64k"; 819 break; 820 case 0x2: 821 mask = "512k"; 822 break; 823 case 0x3: 824 mask = " 4M"; 825 break; 826 } 827 if (TTE_IS_VALID(env->itlb[i].tte)) { 828 qemu_printf("[%02u] VA: %" PRIx64 ", PA: %llx" 829 ", %s, %s, %s, ctx %" PRId64 " %s\n", 830 i, 831 env->itlb[i].tag & (uint64_t)~0x1fffULL, 832 TTE_PA(env->itlb[i].tte), 833 mask, 834 TTE_IS_PRIV(env->itlb[i].tte) ? "priv" : "user", 835 TTE_IS_LOCKED(env->itlb[i].tte) ? 836 "locked" : "unlocked", 837 env->itlb[i].tag & (uint64_t)0x1fffULL, 838 TTE_IS_GLOBAL(env->itlb[i].tte) ? 839 "global" : "local"); 840 } 841 } 842 } 843 } 844 845 #endif /* TARGET_SPARC64 */ 846 847 static int cpu_sparc_get_phys_page(CPUSPARCState *env, hwaddr *phys, 848 target_ulong addr, int rw, int mmu_idx) 849 { 850 target_ulong page_size; 851 int prot, access_index; 852 853 return get_physical_address(env, phys, &prot, &access_index, addr, rw, 854 mmu_idx, &page_size); 855 } 856 857 #if defined(TARGET_SPARC64) 858 hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr, 859 int mmu_idx) 860 { 861 hwaddr phys_addr; 862 863 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 4, mmu_idx) != 0) { 864 return -1; 865 } 866 return phys_addr; 867 } 868 #endif 869 870 hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 871 { 872 SPARCCPU *cpu = SPARC_CPU(cs); 873 CPUSPARCState *env = &cpu->env; 874 hwaddr phys_addr; 875 int mmu_idx = cpu_mmu_index(env, false); 876 877 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) { 878 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 0, mmu_idx) != 0) { 879 return -1; 880 } 881 } 882 return phys_addr; 883 } 884 #endif 885