1 /* 2 * Helpers for loads and stores 3 * 4 * Copyright (c) 2003-2005 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "tcg.h" 23 #include "exec/helper-proto.h" 24 #include "exec/exec-all.h" 25 #include "exec/cpu_ldst.h" 26 #include "asi.h" 27 28 //#define DEBUG_MMU 29 //#define DEBUG_MXCC 30 //#define DEBUG_UNALIGNED 31 //#define DEBUG_UNASSIGNED 32 //#define DEBUG_ASI 33 //#define DEBUG_CACHE_CONTROL 34 35 #ifdef DEBUG_MMU 36 #define DPRINTF_MMU(fmt, ...) \ 37 do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0) 38 #else 39 #define DPRINTF_MMU(fmt, ...) do {} while (0) 40 #endif 41 42 #ifdef DEBUG_MXCC 43 #define DPRINTF_MXCC(fmt, ...) \ 44 do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0) 45 #else 46 #define DPRINTF_MXCC(fmt, ...) do {} while (0) 47 #endif 48 49 #ifdef DEBUG_ASI 50 #define DPRINTF_ASI(fmt, ...) \ 51 do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0) 52 #endif 53 54 #ifdef DEBUG_CACHE_CONTROL 55 #define DPRINTF_CACHE_CONTROL(fmt, ...) \ 56 do { printf("CACHE_CONTROL: " fmt , ## __VA_ARGS__); } while (0) 57 #else 58 #define DPRINTF_CACHE_CONTROL(fmt, ...) do {} while (0) 59 #endif 60 61 #ifdef TARGET_SPARC64 62 #ifndef TARGET_ABI32 63 #define AM_CHECK(env1) ((env1)->pstate & PS_AM) 64 #else 65 #define AM_CHECK(env1) (1) 66 #endif 67 #endif 68 69 #define QT0 (env->qt0) 70 #define QT1 (env->qt1) 71 72 #if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) 73 /* Calculates TSB pointer value for fault page size 8k or 64k */ 74 static uint64_t ultrasparc_tsb_pointer(uint64_t tsb_register, 75 uint64_t tag_access_register, 76 int page_size) 77 { 78 uint64_t tsb_base = tsb_register & ~0x1fffULL; 79 int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0; 80 int tsb_size = tsb_register & 0xf; 81 82 /* discard lower 13 bits which hold tag access context */ 83 uint64_t tag_access_va = tag_access_register & ~0x1fffULL; 84 85 /* now reorder bits */ 86 uint64_t tsb_base_mask = ~0x1fffULL; 87 uint64_t va = tag_access_va; 88 89 /* move va bits to correct position */ 90 if (page_size == 8*1024) { 91 va >>= 9; 92 } else if (page_size == 64*1024) { 93 va >>= 12; 94 } 95 96 if (tsb_size) { 97 tsb_base_mask <<= tsb_size; 98 } 99 100 /* calculate tsb_base mask and adjust va if split is in use */ 101 if (tsb_split) { 102 if (page_size == 8*1024) { 103 va &= ~(1ULL << (13 + tsb_size)); 104 } else if (page_size == 64*1024) { 105 va |= (1ULL << (13 + tsb_size)); 106 } 107 tsb_base_mask <<= 1; 108 } 109 110 return ((tsb_base & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL; 111 } 112 113 /* Calculates tag target register value by reordering bits 114 in tag access register */ 115 static uint64_t ultrasparc_tag_target(uint64_t tag_access_register) 116 { 117 return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22); 118 } 119 120 static void replace_tlb_entry(SparcTLBEntry *tlb, 121 uint64_t tlb_tag, uint64_t tlb_tte, 122 CPUSPARCState *env1) 123 { 124 target_ulong mask, size, va, offset; 125 126 /* flush page range if translation is valid */ 127 if (TTE_IS_VALID(tlb->tte)) { 128 CPUState *cs = CPU(sparc_env_get_cpu(env1)); 129 130 mask = 0xffffffffffffe000ULL; 131 mask <<= 3 * ((tlb->tte >> 61) & 3); 132 size = ~mask + 1; 133 134 va = tlb->tag & mask; 135 136 for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) { 137 tlb_flush_page(cs, va + offset); 138 } 139 } 140 141 tlb->tag = tlb_tag; 142 tlb->tte = tlb_tte; 143 } 144 145 static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr, 146 const char *strmmu, CPUSPARCState *env1) 147 { 148 unsigned int i; 149 target_ulong mask; 150 uint64_t context; 151 152 int is_demap_context = (demap_addr >> 6) & 1; 153 154 /* demap context */ 155 switch ((demap_addr >> 4) & 3) { 156 case 0: /* primary */ 157 context = env1->dmmu.mmu_primary_context; 158 break; 159 case 1: /* secondary */ 160 context = env1->dmmu.mmu_secondary_context; 161 break; 162 case 2: /* nucleus */ 163 context = 0; 164 break; 165 case 3: /* reserved */ 166 default: 167 return; 168 } 169 170 for (i = 0; i < 64; i++) { 171 if (TTE_IS_VALID(tlb[i].tte)) { 172 173 if (is_demap_context) { 174 /* will remove non-global entries matching context value */ 175 if (TTE_IS_GLOBAL(tlb[i].tte) || 176 !tlb_compare_context(&tlb[i], context)) { 177 continue; 178 } 179 } else { 180 /* demap page 181 will remove any entry matching VA */ 182 mask = 0xffffffffffffe000ULL; 183 mask <<= 3 * ((tlb[i].tte >> 61) & 3); 184 185 if (!compare_masked(demap_addr, tlb[i].tag, mask)) { 186 continue; 187 } 188 189 /* entry should be global or matching context value */ 190 if (!TTE_IS_GLOBAL(tlb[i].tte) && 191 !tlb_compare_context(&tlb[i], context)) { 192 continue; 193 } 194 } 195 196 replace_tlb_entry(&tlb[i], 0, 0, env1); 197 #ifdef DEBUG_MMU 198 DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i); 199 dump_mmu(stdout, fprintf, env1); 200 #endif 201 } 202 } 203 } 204 205 static void replace_tlb_1bit_lru(SparcTLBEntry *tlb, 206 uint64_t tlb_tag, uint64_t tlb_tte, 207 const char *strmmu, CPUSPARCState *env1) 208 { 209 unsigned int i, replace_used; 210 211 /* Try replacing invalid entry */ 212 for (i = 0; i < 64; i++) { 213 if (!TTE_IS_VALID(tlb[i].tte)) { 214 replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1); 215 #ifdef DEBUG_MMU 216 DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu, i); 217 dump_mmu(stdout, fprintf, env1); 218 #endif 219 return; 220 } 221 } 222 223 /* All entries are valid, try replacing unlocked entry */ 224 225 for (replace_used = 0; replace_used < 2; ++replace_used) { 226 227 /* Used entries are not replaced on first pass */ 228 229 for (i = 0; i < 64; i++) { 230 if (!TTE_IS_LOCKED(tlb[i].tte) && !TTE_IS_USED(tlb[i].tte)) { 231 232 replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1); 233 #ifdef DEBUG_MMU 234 DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n", 235 strmmu, (replace_used ? "used" : "unused"), i); 236 dump_mmu(stdout, fprintf, env1); 237 #endif 238 return; 239 } 240 } 241 242 /* Now reset used bit and search for unused entries again */ 243 244 for (i = 0; i < 64; i++) { 245 TTE_SET_UNUSED(tlb[i].tte); 246 } 247 } 248 249 #ifdef DEBUG_MMU 250 DPRINTF_MMU("%s lru replacement failed: no entries available\n", strmmu); 251 #endif 252 /* error state? */ 253 } 254 255 #endif 256 257 #ifdef TARGET_SPARC64 258 /* returns true if access using this ASI is to have address translated by MMU 259 otherwise access is to raw physical address */ 260 /* TODO: check sparc32 bits */ 261 static inline int is_translating_asi(int asi) 262 { 263 /* Ultrasparc IIi translating asi 264 - note this list is defined by cpu implementation 265 */ 266 switch (asi) { 267 case 0x04 ... 0x11: 268 case 0x16 ... 0x19: 269 case 0x1E ... 0x1F: 270 case 0x24 ... 0x2C: 271 case 0x70 ... 0x73: 272 case 0x78 ... 0x79: 273 case 0x80 ... 0xFF: 274 return 1; 275 276 default: 277 return 0; 278 } 279 } 280 281 static inline target_ulong address_mask(CPUSPARCState *env1, target_ulong addr) 282 { 283 if (AM_CHECK(env1)) { 284 addr &= 0xffffffffULL; 285 } 286 return addr; 287 } 288 289 static inline target_ulong asi_address_mask(CPUSPARCState *env, 290 int asi, target_ulong addr) 291 { 292 if (is_translating_asi(asi)) { 293 addr = address_mask(env, addr); 294 } 295 return addr; 296 } 297 #endif 298 299 static void do_check_align(CPUSPARCState *env, target_ulong addr, 300 uint32_t align, uintptr_t ra) 301 { 302 if (addr & align) { 303 #ifdef DEBUG_UNALIGNED 304 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx 305 "\n", addr, env->pc); 306 #endif 307 cpu_raise_exception_ra(env, TT_UNALIGNED, ra); 308 } 309 } 310 311 void helper_check_align(CPUSPARCState *env, target_ulong addr, uint32_t align) 312 { 313 do_check_align(env, addr, align, GETPC()); 314 } 315 316 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \ 317 defined(DEBUG_MXCC) 318 static void dump_mxcc(CPUSPARCState *env) 319 { 320 printf("mxccdata: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 321 "\n", 322 env->mxccdata[0], env->mxccdata[1], 323 env->mxccdata[2], env->mxccdata[3]); 324 printf("mxccregs: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 325 "\n" 326 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 327 "\n", 328 env->mxccregs[0], env->mxccregs[1], 329 env->mxccregs[2], env->mxccregs[3], 330 env->mxccregs[4], env->mxccregs[5], 331 env->mxccregs[6], env->mxccregs[7]); 332 } 333 #endif 334 335 #if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \ 336 && defined(DEBUG_ASI) 337 static void dump_asi(const char *txt, target_ulong addr, int asi, int size, 338 uint64_t r1) 339 { 340 switch (size) { 341 case 1: 342 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt, 343 addr, asi, r1 & 0xff); 344 break; 345 case 2: 346 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt, 347 addr, asi, r1 & 0xffff); 348 break; 349 case 4: 350 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt, 351 addr, asi, r1 & 0xffffffff); 352 break; 353 case 8: 354 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt, 355 addr, asi, r1); 356 break; 357 } 358 } 359 #endif 360 361 #ifndef TARGET_SPARC64 362 #ifndef CONFIG_USER_ONLY 363 364 365 /* Leon3 cache control */ 366 367 static void leon3_cache_control_st(CPUSPARCState *env, target_ulong addr, 368 uint64_t val, int size) 369 { 370 DPRINTF_CACHE_CONTROL("st addr:%08x, val:%" PRIx64 ", size:%d\n", 371 addr, val, size); 372 373 if (size != 4) { 374 DPRINTF_CACHE_CONTROL("32bits only\n"); 375 return; 376 } 377 378 switch (addr) { 379 case 0x00: /* Cache control */ 380 381 /* These values must always be read as zeros */ 382 val &= ~CACHE_CTRL_FD; 383 val &= ~CACHE_CTRL_FI; 384 val &= ~CACHE_CTRL_IB; 385 val &= ~CACHE_CTRL_IP; 386 val &= ~CACHE_CTRL_DP; 387 388 env->cache_control = val; 389 break; 390 case 0x04: /* Instruction cache configuration */ 391 case 0x08: /* Data cache configuration */ 392 /* Read Only */ 393 break; 394 default: 395 DPRINTF_CACHE_CONTROL("write unknown register %08x\n", addr); 396 break; 397 }; 398 } 399 400 static uint64_t leon3_cache_control_ld(CPUSPARCState *env, target_ulong addr, 401 int size) 402 { 403 uint64_t ret = 0; 404 405 if (size != 4) { 406 DPRINTF_CACHE_CONTROL("32bits only\n"); 407 return 0; 408 } 409 410 switch (addr) { 411 case 0x00: /* Cache control */ 412 ret = env->cache_control; 413 break; 414 415 /* Configuration registers are read and only always keep those 416 predefined values */ 417 418 case 0x04: /* Instruction cache configuration */ 419 ret = 0x10220000; 420 break; 421 case 0x08: /* Data cache configuration */ 422 ret = 0x18220000; 423 break; 424 default: 425 DPRINTF_CACHE_CONTROL("read unknown register %08x\n", addr); 426 break; 427 }; 428 DPRINTF_CACHE_CONTROL("ld addr:%08x, ret:0x%" PRIx64 ", size:%d\n", 429 addr, ret, size); 430 return ret; 431 } 432 433 uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, 434 int asi, uint32_t memop) 435 { 436 int size = 1 << (memop & MO_SIZE); 437 int sign = memop & MO_SIGN; 438 CPUState *cs = CPU(sparc_env_get_cpu(env)); 439 uint64_t ret = 0; 440 #if defined(DEBUG_MXCC) || defined(DEBUG_ASI) 441 uint32_t last_addr = addr; 442 #endif 443 444 do_check_align(env, addr, size - 1, GETPC()); 445 switch (asi) { 446 case ASI_M_MXCC: /* SuperSparc MXCC registers, or... */ 447 /* case ASI_LEON_CACHEREGS: Leon3 cache control */ 448 switch (addr) { 449 case 0x00: /* Leon3 Cache Control */ 450 case 0x08: /* Leon3 Instruction Cache config */ 451 case 0x0C: /* Leon3 Date Cache config */ 452 if (env->def->features & CPU_FEATURE_CACHE_CTRL) { 453 ret = leon3_cache_control_ld(env, addr, size); 454 } 455 break; 456 case 0x01c00a00: /* MXCC control register */ 457 if (size == 8) { 458 ret = env->mxccregs[3]; 459 } else { 460 qemu_log_mask(LOG_UNIMP, 461 "%08x: unimplemented access size: %d\n", addr, 462 size); 463 } 464 break; 465 case 0x01c00a04: /* MXCC control register */ 466 if (size == 4) { 467 ret = env->mxccregs[3]; 468 } else { 469 qemu_log_mask(LOG_UNIMP, 470 "%08x: unimplemented access size: %d\n", addr, 471 size); 472 } 473 break; 474 case 0x01c00c00: /* Module reset register */ 475 if (size == 8) { 476 ret = env->mxccregs[5]; 477 /* should we do something here? */ 478 } else { 479 qemu_log_mask(LOG_UNIMP, 480 "%08x: unimplemented access size: %d\n", addr, 481 size); 482 } 483 break; 484 case 0x01c00f00: /* MBus port address register */ 485 if (size == 8) { 486 ret = env->mxccregs[7]; 487 } else { 488 qemu_log_mask(LOG_UNIMP, 489 "%08x: unimplemented access size: %d\n", addr, 490 size); 491 } 492 break; 493 default: 494 qemu_log_mask(LOG_UNIMP, 495 "%08x: unimplemented address, size: %d\n", addr, 496 size); 497 break; 498 } 499 DPRINTF_MXCC("asi = %d, size = %d, sign = %d, " 500 "addr = %08x -> ret = %" PRIx64 "," 501 "addr = %08x\n", asi, size, sign, last_addr, ret, addr); 502 #ifdef DEBUG_MXCC 503 dump_mxcc(env); 504 #endif 505 break; 506 case ASI_M_FLUSH_PROBE: /* SuperSparc MMU probe */ 507 case ASI_LEON_MMUFLUSH: /* LEON3 MMU probe */ 508 { 509 int mmulev; 510 511 mmulev = (addr >> 8) & 15; 512 if (mmulev > 4) { 513 ret = 0; 514 } else { 515 ret = mmu_probe(env, addr, mmulev); 516 } 517 DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n", 518 addr, mmulev, ret); 519 } 520 break; 521 case ASI_M_MMUREGS: /* SuperSparc MMU regs */ 522 case ASI_LEON_MMUREGS: /* LEON3 MMU regs */ 523 { 524 int reg = (addr >> 8) & 0x1f; 525 526 ret = env->mmuregs[reg]; 527 if (reg == 3) { /* Fault status cleared on read */ 528 env->mmuregs[3] = 0; 529 } else if (reg == 0x13) { /* Fault status read */ 530 ret = env->mmuregs[3]; 531 } else if (reg == 0x14) { /* Fault address read */ 532 ret = env->mmuregs[4]; 533 } 534 DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret); 535 } 536 break; 537 case ASI_M_TLBDIAG: /* Turbosparc ITLB Diagnostic */ 538 case ASI_M_DIAGS: /* Turbosparc DTLB Diagnostic */ 539 case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */ 540 break; 541 case ASI_KERNELTXT: /* Supervisor code access */ 542 switch (size) { 543 case 1: 544 ret = cpu_ldub_code(env, addr); 545 break; 546 case 2: 547 ret = cpu_lduw_code(env, addr); 548 break; 549 default: 550 case 4: 551 ret = cpu_ldl_code(env, addr); 552 break; 553 case 8: 554 ret = cpu_ldq_code(env, addr); 555 break; 556 } 557 break; 558 case ASI_M_TXTC_TAG: /* SparcStation 5 I-cache tag */ 559 case ASI_M_TXTC_DATA: /* SparcStation 5 I-cache data */ 560 case ASI_M_DATAC_TAG: /* SparcStation 5 D-cache tag */ 561 case ASI_M_DATAC_DATA: /* SparcStation 5 D-cache data */ 562 break; 563 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */ 564 switch (size) { 565 case 1: 566 ret = ldub_phys(cs->as, (hwaddr)addr 567 | ((hwaddr)(asi & 0xf) << 32)); 568 break; 569 case 2: 570 ret = lduw_phys(cs->as, (hwaddr)addr 571 | ((hwaddr)(asi & 0xf) << 32)); 572 break; 573 default: 574 case 4: 575 ret = ldl_phys(cs->as, (hwaddr)addr 576 | ((hwaddr)(asi & 0xf) << 32)); 577 break; 578 case 8: 579 ret = ldq_phys(cs->as, (hwaddr)addr 580 | ((hwaddr)(asi & 0xf) << 32)); 581 break; 582 } 583 break; 584 case 0x30: /* Turbosparc secondary cache diagnostic */ 585 case 0x31: /* Turbosparc RAM snoop */ 586 case 0x32: /* Turbosparc page table descriptor diagnostic */ 587 case 0x39: /* data cache diagnostic register */ 588 ret = 0; 589 break; 590 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */ 591 { 592 int reg = (addr >> 8) & 3; 593 594 switch (reg) { 595 case 0: /* Breakpoint Value (Addr) */ 596 ret = env->mmubpregs[reg]; 597 break; 598 case 1: /* Breakpoint Mask */ 599 ret = env->mmubpregs[reg]; 600 break; 601 case 2: /* Breakpoint Control */ 602 ret = env->mmubpregs[reg]; 603 break; 604 case 3: /* Breakpoint Status */ 605 ret = env->mmubpregs[reg]; 606 env->mmubpregs[reg] = 0ULL; 607 break; 608 } 609 DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64 "\n", reg, 610 ret); 611 } 612 break; 613 case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */ 614 ret = env->mmubpctrv; 615 break; 616 case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */ 617 ret = env->mmubpctrc; 618 break; 619 case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */ 620 ret = env->mmubpctrs; 621 break; 622 case 0x4c: /* SuperSPARC MMU Breakpoint Action */ 623 ret = env->mmubpaction; 624 break; 625 case ASI_USERTXT: /* User code access, XXX */ 626 default: 627 cpu_unassigned_access(cs, addr, false, false, asi, size); 628 ret = 0; 629 break; 630 631 case ASI_USERDATA: /* User data access */ 632 case ASI_KERNELDATA: /* Supervisor data access */ 633 case ASI_P: /* Implicit primary context data access (v9 only?) */ 634 case ASI_M_BYPASS: /* MMU passthrough */ 635 case ASI_LEON_BYPASS: /* LEON MMU passthrough */ 636 /* These are always handled inline. */ 637 g_assert_not_reached(); 638 } 639 if (sign) { 640 switch (size) { 641 case 1: 642 ret = (int8_t) ret; 643 break; 644 case 2: 645 ret = (int16_t) ret; 646 break; 647 case 4: 648 ret = (int32_t) ret; 649 break; 650 default: 651 break; 652 } 653 } 654 #ifdef DEBUG_ASI 655 dump_asi("read ", last_addr, asi, size, ret); 656 #endif 657 return ret; 658 } 659 660 void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val, 661 int asi, uint32_t memop) 662 { 663 int size = 1 << (memop & MO_SIZE); 664 SPARCCPU *cpu = sparc_env_get_cpu(env); 665 CPUState *cs = CPU(cpu); 666 667 do_check_align(env, addr, size - 1, GETPC()); 668 switch (asi) { 669 case ASI_M_MXCC: /* SuperSparc MXCC registers, or... */ 670 /* case ASI_LEON_CACHEREGS: Leon3 cache control */ 671 switch (addr) { 672 case 0x00: /* Leon3 Cache Control */ 673 case 0x08: /* Leon3 Instruction Cache config */ 674 case 0x0C: /* Leon3 Date Cache config */ 675 if (env->def->features & CPU_FEATURE_CACHE_CTRL) { 676 leon3_cache_control_st(env, addr, val, size); 677 } 678 break; 679 680 case 0x01c00000: /* MXCC stream data register 0 */ 681 if (size == 8) { 682 env->mxccdata[0] = val; 683 } else { 684 qemu_log_mask(LOG_UNIMP, 685 "%08x: unimplemented access size: %d\n", addr, 686 size); 687 } 688 break; 689 case 0x01c00008: /* MXCC stream data register 1 */ 690 if (size == 8) { 691 env->mxccdata[1] = val; 692 } else { 693 qemu_log_mask(LOG_UNIMP, 694 "%08x: unimplemented access size: %d\n", addr, 695 size); 696 } 697 break; 698 case 0x01c00010: /* MXCC stream data register 2 */ 699 if (size == 8) { 700 env->mxccdata[2] = val; 701 } else { 702 qemu_log_mask(LOG_UNIMP, 703 "%08x: unimplemented access size: %d\n", addr, 704 size); 705 } 706 break; 707 case 0x01c00018: /* MXCC stream data register 3 */ 708 if (size == 8) { 709 env->mxccdata[3] = val; 710 } else { 711 qemu_log_mask(LOG_UNIMP, 712 "%08x: unimplemented access size: %d\n", addr, 713 size); 714 } 715 break; 716 case 0x01c00100: /* MXCC stream source */ 717 if (size == 8) { 718 env->mxccregs[0] = val; 719 } else { 720 qemu_log_mask(LOG_UNIMP, 721 "%08x: unimplemented access size: %d\n", addr, 722 size); 723 } 724 env->mxccdata[0] = ldq_phys(cs->as, 725 (env->mxccregs[0] & 0xffffffffULL) + 726 0); 727 env->mxccdata[1] = ldq_phys(cs->as, 728 (env->mxccregs[0] & 0xffffffffULL) + 729 8); 730 env->mxccdata[2] = ldq_phys(cs->as, 731 (env->mxccregs[0] & 0xffffffffULL) + 732 16); 733 env->mxccdata[3] = ldq_phys(cs->as, 734 (env->mxccregs[0] & 0xffffffffULL) + 735 24); 736 break; 737 case 0x01c00200: /* MXCC stream destination */ 738 if (size == 8) { 739 env->mxccregs[1] = val; 740 } else { 741 qemu_log_mask(LOG_UNIMP, 742 "%08x: unimplemented access size: %d\n", addr, 743 size); 744 } 745 stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 0, 746 env->mxccdata[0]); 747 stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 8, 748 env->mxccdata[1]); 749 stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 16, 750 env->mxccdata[2]); 751 stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 24, 752 env->mxccdata[3]); 753 break; 754 case 0x01c00a00: /* MXCC control register */ 755 if (size == 8) { 756 env->mxccregs[3] = val; 757 } else { 758 qemu_log_mask(LOG_UNIMP, 759 "%08x: unimplemented access size: %d\n", addr, 760 size); 761 } 762 break; 763 case 0x01c00a04: /* MXCC control register */ 764 if (size == 4) { 765 env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL) 766 | val; 767 } else { 768 qemu_log_mask(LOG_UNIMP, 769 "%08x: unimplemented access size: %d\n", addr, 770 size); 771 } 772 break; 773 case 0x01c00e00: /* MXCC error register */ 774 /* writing a 1 bit clears the error */ 775 if (size == 8) { 776 env->mxccregs[6] &= ~val; 777 } else { 778 qemu_log_mask(LOG_UNIMP, 779 "%08x: unimplemented access size: %d\n", addr, 780 size); 781 } 782 break; 783 case 0x01c00f00: /* MBus port address register */ 784 if (size == 8) { 785 env->mxccregs[7] = val; 786 } else { 787 qemu_log_mask(LOG_UNIMP, 788 "%08x: unimplemented access size: %d\n", addr, 789 size); 790 } 791 break; 792 default: 793 qemu_log_mask(LOG_UNIMP, 794 "%08x: unimplemented address, size: %d\n", addr, 795 size); 796 break; 797 } 798 DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n", 799 asi, size, addr, val); 800 #ifdef DEBUG_MXCC 801 dump_mxcc(env); 802 #endif 803 break; 804 case ASI_M_FLUSH_PROBE: /* SuperSparc MMU flush */ 805 case ASI_LEON_MMUFLUSH: /* LEON3 MMU flush */ 806 { 807 int mmulev; 808 809 mmulev = (addr >> 8) & 15; 810 DPRINTF_MMU("mmu flush level %d\n", mmulev); 811 switch (mmulev) { 812 case 0: /* flush page */ 813 tlb_flush_page(CPU(cpu), addr & 0xfffff000); 814 break; 815 case 1: /* flush segment (256k) */ 816 case 2: /* flush region (16M) */ 817 case 3: /* flush context (4G) */ 818 case 4: /* flush entire */ 819 tlb_flush(CPU(cpu)); 820 break; 821 default: 822 break; 823 } 824 #ifdef DEBUG_MMU 825 dump_mmu(stdout, fprintf, env); 826 #endif 827 } 828 break; 829 case ASI_M_MMUREGS: /* write MMU regs */ 830 case ASI_LEON_MMUREGS: /* LEON3 write MMU regs */ 831 { 832 int reg = (addr >> 8) & 0x1f; 833 uint32_t oldreg; 834 835 oldreg = env->mmuregs[reg]; 836 switch (reg) { 837 case 0: /* Control Register */ 838 env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) | 839 (val & 0x00ffffff); 840 /* Mappings generated during no-fault mode 841 are invalid in normal mode. */ 842 if ((oldreg ^ env->mmuregs[reg]) 843 & (MMU_NF | env->def->mmu_bm)) { 844 tlb_flush(CPU(cpu)); 845 } 846 break; 847 case 1: /* Context Table Pointer Register */ 848 env->mmuregs[reg] = val & env->def->mmu_ctpr_mask; 849 break; 850 case 2: /* Context Register */ 851 env->mmuregs[reg] = val & env->def->mmu_cxr_mask; 852 if (oldreg != env->mmuregs[reg]) { 853 /* we flush when the MMU context changes because 854 QEMU has no MMU context support */ 855 tlb_flush(CPU(cpu)); 856 } 857 break; 858 case 3: /* Synchronous Fault Status Register with Clear */ 859 case 4: /* Synchronous Fault Address Register */ 860 break; 861 case 0x10: /* TLB Replacement Control Register */ 862 env->mmuregs[reg] = val & env->def->mmu_trcr_mask; 863 break; 864 case 0x13: /* Synchronous Fault Status Register with Read 865 and Clear */ 866 env->mmuregs[3] = val & env->def->mmu_sfsr_mask; 867 break; 868 case 0x14: /* Synchronous Fault Address Register */ 869 env->mmuregs[4] = val; 870 break; 871 default: 872 env->mmuregs[reg] = val; 873 break; 874 } 875 if (oldreg != env->mmuregs[reg]) { 876 DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n", 877 reg, oldreg, env->mmuregs[reg]); 878 } 879 #ifdef DEBUG_MMU 880 dump_mmu(stdout, fprintf, env); 881 #endif 882 } 883 break; 884 case ASI_M_TLBDIAG: /* Turbosparc ITLB Diagnostic */ 885 case ASI_M_DIAGS: /* Turbosparc DTLB Diagnostic */ 886 case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */ 887 break; 888 case ASI_M_TXTC_TAG: /* I-cache tag */ 889 case ASI_M_TXTC_DATA: /* I-cache data */ 890 case ASI_M_DATAC_TAG: /* D-cache tag */ 891 case ASI_M_DATAC_DATA: /* D-cache data */ 892 case ASI_M_FLUSH_PAGE: /* I/D-cache flush page */ 893 case ASI_M_FLUSH_SEG: /* I/D-cache flush segment */ 894 case ASI_M_FLUSH_REGION: /* I/D-cache flush region */ 895 case ASI_M_FLUSH_CTX: /* I/D-cache flush context */ 896 case ASI_M_FLUSH_USER: /* I/D-cache flush user */ 897 break; 898 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */ 899 { 900 switch (size) { 901 case 1: 902 stb_phys(cs->as, (hwaddr)addr 903 | ((hwaddr)(asi & 0xf) << 32), val); 904 break; 905 case 2: 906 stw_phys(cs->as, (hwaddr)addr 907 | ((hwaddr)(asi & 0xf) << 32), val); 908 break; 909 case 4: 910 default: 911 stl_phys(cs->as, (hwaddr)addr 912 | ((hwaddr)(asi & 0xf) << 32), val); 913 break; 914 case 8: 915 stq_phys(cs->as, (hwaddr)addr 916 | ((hwaddr)(asi & 0xf) << 32), val); 917 break; 918 } 919 } 920 break; 921 case 0x30: /* store buffer tags or Turbosparc secondary cache diagnostic */ 922 case 0x31: /* store buffer data, Ross RT620 I-cache flush or 923 Turbosparc snoop RAM */ 924 case 0x32: /* store buffer control or Turbosparc page table 925 descriptor diagnostic */ 926 case 0x36: /* I-cache flash clear */ 927 case 0x37: /* D-cache flash clear */ 928 break; 929 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/ 930 { 931 int reg = (addr >> 8) & 3; 932 933 switch (reg) { 934 case 0: /* Breakpoint Value (Addr) */ 935 env->mmubpregs[reg] = (val & 0xfffffffffULL); 936 break; 937 case 1: /* Breakpoint Mask */ 938 env->mmubpregs[reg] = (val & 0xfffffffffULL); 939 break; 940 case 2: /* Breakpoint Control */ 941 env->mmubpregs[reg] = (val & 0x7fULL); 942 break; 943 case 3: /* Breakpoint Status */ 944 env->mmubpregs[reg] = (val & 0xfULL); 945 break; 946 } 947 DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg, 948 env->mmuregs[reg]); 949 } 950 break; 951 case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */ 952 env->mmubpctrv = val & 0xffffffff; 953 break; 954 case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */ 955 env->mmubpctrc = val & 0x3; 956 break; 957 case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */ 958 env->mmubpctrs = val & 0x3; 959 break; 960 case 0x4c: /* SuperSPARC MMU Breakpoint Action */ 961 env->mmubpaction = val & 0x1fff; 962 break; 963 case ASI_USERTXT: /* User code access, XXX */ 964 case ASI_KERNELTXT: /* Supervisor code access, XXX */ 965 default: 966 cpu_unassigned_access(CPU(sparc_env_get_cpu(env)), 967 addr, true, false, asi, size); 968 break; 969 970 case ASI_USERDATA: /* User data access */ 971 case ASI_KERNELDATA: /* Supervisor data access */ 972 case ASI_P: 973 case ASI_M_BYPASS: /* MMU passthrough */ 974 case ASI_LEON_BYPASS: /* LEON MMU passthrough */ 975 case ASI_M_BCOPY: /* Block copy, sta access */ 976 case ASI_M_BFILL: /* Block fill, stda access */ 977 /* These are always handled inline. */ 978 g_assert_not_reached(); 979 } 980 #ifdef DEBUG_ASI 981 dump_asi("write", addr, asi, size, val); 982 #endif 983 } 984 985 #endif /* CONFIG_USER_ONLY */ 986 #else /* TARGET_SPARC64 */ 987 988 #ifdef CONFIG_USER_ONLY 989 uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, 990 int asi, uint32_t memop) 991 { 992 int size = 1 << (memop & MO_SIZE); 993 int sign = memop & MO_SIGN; 994 uint64_t ret = 0; 995 996 if (asi < 0x80) { 997 cpu_raise_exception_ra(env, TT_PRIV_ACT, GETPC()); 998 } 999 do_check_align(env, addr, size - 1, GETPC()); 1000 addr = asi_address_mask(env, asi, addr); 1001 1002 switch (asi) { 1003 case ASI_PNF: /* Primary no-fault */ 1004 case ASI_PNFL: /* Primary no-fault LE */ 1005 case ASI_SNF: /* Secondary no-fault */ 1006 case ASI_SNFL: /* Secondary no-fault LE */ 1007 if (page_check_range(addr, size, PAGE_READ) == -1) { 1008 ret = 0; 1009 break; 1010 } 1011 switch (size) { 1012 case 1: 1013 ret = cpu_ldub_data(env, addr); 1014 break; 1015 case 2: 1016 ret = cpu_lduw_data(env, addr); 1017 break; 1018 case 4: 1019 ret = cpu_ldl_data(env, addr); 1020 break; 1021 case 8: 1022 ret = cpu_ldq_data(env, addr); 1023 break; 1024 default: 1025 g_assert_not_reached(); 1026 } 1027 break; 1028 break; 1029 1030 case ASI_P: /* Primary */ 1031 case ASI_PL: /* Primary LE */ 1032 case ASI_S: /* Secondary */ 1033 case ASI_SL: /* Secondary LE */ 1034 /* These are always handled inline. */ 1035 g_assert_not_reached(); 1036 1037 default: 1038 cpu_raise_exception_ra(env, TT_DATA_ACCESS, GETPC()); 1039 } 1040 1041 /* Convert from little endian */ 1042 switch (asi) { 1043 case ASI_PNFL: /* Primary no-fault LE */ 1044 case ASI_SNFL: /* Secondary no-fault LE */ 1045 switch (size) { 1046 case 2: 1047 ret = bswap16(ret); 1048 break; 1049 case 4: 1050 ret = bswap32(ret); 1051 break; 1052 case 8: 1053 ret = bswap64(ret); 1054 break; 1055 } 1056 } 1057 1058 /* Convert to signed number */ 1059 if (sign) { 1060 switch (size) { 1061 case 1: 1062 ret = (int8_t) ret; 1063 break; 1064 case 2: 1065 ret = (int16_t) ret; 1066 break; 1067 case 4: 1068 ret = (int32_t) ret; 1069 break; 1070 } 1071 } 1072 #ifdef DEBUG_ASI 1073 dump_asi("read", addr, asi, size, ret); 1074 #endif 1075 return ret; 1076 } 1077 1078 void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val, 1079 int asi, uint32_t memop) 1080 { 1081 int size = 1 << (memop & MO_SIZE); 1082 #ifdef DEBUG_ASI 1083 dump_asi("write", addr, asi, size, val); 1084 #endif 1085 if (asi < 0x80) { 1086 cpu_raise_exception_ra(env, TT_PRIV_ACT, GETPC()); 1087 } 1088 do_check_align(env, addr, size - 1, GETPC()); 1089 1090 switch (asi) { 1091 case ASI_P: /* Primary */ 1092 case ASI_PL: /* Primary LE */ 1093 case ASI_S: /* Secondary */ 1094 case ASI_SL: /* Secondary LE */ 1095 /* These are always handled inline. */ 1096 g_assert_not_reached(); 1097 1098 case ASI_PNF: /* Primary no-fault, RO */ 1099 case ASI_SNF: /* Secondary no-fault, RO */ 1100 case ASI_PNFL: /* Primary no-fault LE, RO */ 1101 case ASI_SNFL: /* Secondary no-fault LE, RO */ 1102 default: 1103 cpu_raise_exception_ra(env, TT_DATA_ACCESS, GETPC()); 1104 } 1105 } 1106 1107 #else /* CONFIG_USER_ONLY */ 1108 1109 uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, 1110 int asi, uint32_t memop) 1111 { 1112 int size = 1 << (memop & MO_SIZE); 1113 int sign = memop & MO_SIGN; 1114 CPUState *cs = CPU(sparc_env_get_cpu(env)); 1115 uint64_t ret = 0; 1116 #if defined(DEBUG_ASI) 1117 target_ulong last_addr = addr; 1118 #endif 1119 1120 asi &= 0xff; 1121 1122 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0) 1123 || (cpu_has_hypervisor(env) 1124 && asi >= 0x30 && asi < 0x80 1125 && !(env->hpstate & HS_PRIV))) { 1126 cpu_raise_exception_ra(env, TT_PRIV_ACT, GETPC()); 1127 } 1128 1129 do_check_align(env, addr, size - 1, GETPC()); 1130 addr = asi_address_mask(env, asi, addr); 1131 1132 switch (asi) { 1133 case ASI_PNF: 1134 case ASI_PNFL: 1135 case ASI_SNF: 1136 case ASI_SNFL: 1137 { 1138 TCGMemOpIdx oi; 1139 int idx = (env->pstate & PS_PRIV 1140 ? (asi & 1 ? MMU_KERNEL_SECONDARY_IDX : MMU_KERNEL_IDX) 1141 : (asi & 1 ? MMU_USER_SECONDARY_IDX : MMU_USER_IDX)); 1142 1143 if (cpu_get_phys_page_nofault(env, addr, idx) == -1ULL) { 1144 #ifdef DEBUG_ASI 1145 dump_asi("read ", last_addr, asi, size, ret); 1146 #endif 1147 /* exception_index is set in get_physical_address_data. */ 1148 cpu_raise_exception_ra(env, cs->exception_index, GETPC()); 1149 } 1150 oi = make_memop_idx(memop, idx); 1151 switch (size) { 1152 case 1: 1153 ret = helper_ret_ldub_mmu(env, addr, oi, GETPC()); 1154 break; 1155 case 2: 1156 if (asi & 8) { 1157 ret = helper_le_lduw_mmu(env, addr, oi, GETPC()); 1158 } else { 1159 ret = helper_be_lduw_mmu(env, addr, oi, GETPC()); 1160 } 1161 break; 1162 case 4: 1163 if (asi & 8) { 1164 ret = helper_le_ldul_mmu(env, addr, oi, GETPC()); 1165 } else { 1166 ret = helper_be_ldul_mmu(env, addr, oi, GETPC()); 1167 } 1168 break; 1169 case 8: 1170 if (asi & 8) { 1171 ret = helper_le_ldq_mmu(env, addr, oi, GETPC()); 1172 } else { 1173 ret = helper_be_ldq_mmu(env, addr, oi, GETPC()); 1174 } 1175 break; 1176 default: 1177 g_assert_not_reached(); 1178 } 1179 } 1180 break; 1181 1182 case ASI_AIUP: /* As if user primary */ 1183 case ASI_AIUS: /* As if user secondary */ 1184 case ASI_AIUPL: /* As if user primary LE */ 1185 case ASI_AIUSL: /* As if user secondary LE */ 1186 case ASI_P: /* Primary */ 1187 case ASI_S: /* Secondary */ 1188 case ASI_PL: /* Primary LE */ 1189 case ASI_SL: /* Secondary LE */ 1190 case ASI_REAL: /* Bypass */ 1191 case ASI_REAL_IO: /* Bypass, non-cacheable */ 1192 case ASI_REAL_L: /* Bypass LE */ 1193 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */ 1194 case ASI_N: /* Nucleus */ 1195 case ASI_NL: /* Nucleus Little Endian (LE) */ 1196 case ASI_NUCLEUS_QUAD_LDD: /* Nucleus quad LDD 128 bit atomic */ 1197 case ASI_NUCLEUS_QUAD_LDD_L: /* Nucleus quad LDD 128 bit atomic LE */ 1198 case ASI_TWINX_AIUP: /* As if user primary, twinx */ 1199 case ASI_TWINX_AIUS: /* As if user secondary, twinx */ 1200 case ASI_TWINX_REAL: /* Real address, twinx */ 1201 case ASI_TWINX_AIUP_L: /* As if user primary, twinx, LE */ 1202 case ASI_TWINX_AIUS_L: /* As if user secondary, twinx, LE */ 1203 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */ 1204 case ASI_TWINX_N: /* Nucleus, twinx */ 1205 case ASI_TWINX_NL: /* Nucleus, twinx, LE */ 1206 /* ??? From the UA2011 document; overlaps BLK_INIT_QUAD_LDD_* */ 1207 case ASI_TWINX_P: /* Primary, twinx */ 1208 case ASI_TWINX_PL: /* Primary, twinx, LE */ 1209 case ASI_TWINX_S: /* Secondary, twinx */ 1210 case ASI_TWINX_SL: /* Secondary, twinx, LE */ 1211 /* These are always handled inline. */ 1212 g_assert_not_reached(); 1213 1214 case ASI_UPA_CONFIG: /* UPA config */ 1215 /* XXX */ 1216 break; 1217 case ASI_LSU_CONTROL: /* LSU */ 1218 ret = env->lsu; 1219 break; 1220 case ASI_IMMU: /* I-MMU regs */ 1221 { 1222 int reg = (addr >> 3) & 0xf; 1223 1224 if (reg == 0) { 1225 /* I-TSB Tag Target register */ 1226 ret = ultrasparc_tag_target(env->immu.tag_access); 1227 } else { 1228 ret = env->immuregs[reg]; 1229 } 1230 1231 break; 1232 } 1233 case ASI_IMMU_TSB_8KB_PTR: /* I-MMU 8k TSB pointer */ 1234 { 1235 /* env->immuregs[5] holds I-MMU TSB register value 1236 env->immuregs[6] holds I-MMU Tag Access register value */ 1237 ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access, 1238 8*1024); 1239 break; 1240 } 1241 case ASI_IMMU_TSB_64KB_PTR: /* I-MMU 64k TSB pointer */ 1242 { 1243 /* env->immuregs[5] holds I-MMU TSB register value 1244 env->immuregs[6] holds I-MMU Tag Access register value */ 1245 ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access, 1246 64*1024); 1247 break; 1248 } 1249 case ASI_ITLB_DATA_ACCESS: /* I-MMU data access */ 1250 { 1251 int reg = (addr >> 3) & 0x3f; 1252 1253 ret = env->itlb[reg].tte; 1254 break; 1255 } 1256 case ASI_ITLB_TAG_READ: /* I-MMU tag read */ 1257 { 1258 int reg = (addr >> 3) & 0x3f; 1259 1260 ret = env->itlb[reg].tag; 1261 break; 1262 } 1263 case ASI_DMMU: /* D-MMU regs */ 1264 { 1265 int reg = (addr >> 3) & 0xf; 1266 1267 if (reg == 0) { 1268 /* D-TSB Tag Target register */ 1269 ret = ultrasparc_tag_target(env->dmmu.tag_access); 1270 } else { 1271 ret = env->dmmuregs[reg]; 1272 } 1273 break; 1274 } 1275 case ASI_DMMU_TSB_8KB_PTR: /* D-MMU 8k TSB pointer */ 1276 { 1277 /* env->dmmuregs[5] holds D-MMU TSB register value 1278 env->dmmuregs[6] holds D-MMU Tag Access register value */ 1279 ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access, 1280 8*1024); 1281 break; 1282 } 1283 case ASI_DMMU_TSB_64KB_PTR: /* D-MMU 64k TSB pointer */ 1284 { 1285 /* env->dmmuregs[5] holds D-MMU TSB register value 1286 env->dmmuregs[6] holds D-MMU Tag Access register value */ 1287 ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access, 1288 64*1024); 1289 break; 1290 } 1291 case ASI_DTLB_DATA_ACCESS: /* D-MMU data access */ 1292 { 1293 int reg = (addr >> 3) & 0x3f; 1294 1295 ret = env->dtlb[reg].tte; 1296 break; 1297 } 1298 case ASI_DTLB_TAG_READ: /* D-MMU tag read */ 1299 { 1300 int reg = (addr >> 3) & 0x3f; 1301 1302 ret = env->dtlb[reg].tag; 1303 break; 1304 } 1305 case ASI_INTR_DISPATCH_STAT: /* Interrupt dispatch, RO */ 1306 break; 1307 case ASI_INTR_RECEIVE: /* Interrupt data receive */ 1308 ret = env->ivec_status; 1309 break; 1310 case ASI_INTR_R: /* Incoming interrupt vector, RO */ 1311 { 1312 int reg = (addr >> 4) & 0x3; 1313 if (reg < 3) { 1314 ret = env->ivec_data[reg]; 1315 } 1316 break; 1317 } 1318 case ASI_DCACHE_DATA: /* D-cache data */ 1319 case ASI_DCACHE_TAG: /* D-cache tag access */ 1320 case ASI_ESTATE_ERROR_EN: /* E-cache error enable */ 1321 case ASI_AFSR: /* E-cache asynchronous fault status */ 1322 case ASI_AFAR: /* E-cache asynchronous fault address */ 1323 case ASI_EC_TAG_DATA: /* E-cache tag data */ 1324 case ASI_IC_INSTR: /* I-cache instruction access */ 1325 case ASI_IC_TAG: /* I-cache tag access */ 1326 case ASI_IC_PRE_DECODE: /* I-cache predecode */ 1327 case ASI_IC_NEXT_FIELD: /* I-cache LRU etc. */ 1328 case ASI_EC_W: /* E-cache tag */ 1329 case ASI_EC_R: /* E-cache tag */ 1330 break; 1331 case ASI_DMMU_TSB_DIRECT_PTR: /* D-MMU data pointer */ 1332 case ASI_ITLB_DATA_IN: /* I-MMU data in, WO */ 1333 case ASI_IMMU_DEMAP: /* I-MMU demap, WO */ 1334 case ASI_DTLB_DATA_IN: /* D-MMU data in, WO */ 1335 case ASI_DMMU_DEMAP: /* D-MMU demap, WO */ 1336 case ASI_INTR_W: /* Interrupt vector, WO */ 1337 default: 1338 cpu_unassigned_access(cs, addr, false, false, 1, size); 1339 ret = 0; 1340 break; 1341 } 1342 1343 /* Convert to signed number */ 1344 if (sign) { 1345 switch (size) { 1346 case 1: 1347 ret = (int8_t) ret; 1348 break; 1349 case 2: 1350 ret = (int16_t) ret; 1351 break; 1352 case 4: 1353 ret = (int32_t) ret; 1354 break; 1355 default: 1356 break; 1357 } 1358 } 1359 #ifdef DEBUG_ASI 1360 dump_asi("read ", last_addr, asi, size, ret); 1361 #endif 1362 return ret; 1363 } 1364 1365 void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val, 1366 int asi, uint32_t memop) 1367 { 1368 int size = 1 << (memop & MO_SIZE); 1369 SPARCCPU *cpu = sparc_env_get_cpu(env); 1370 CPUState *cs = CPU(cpu); 1371 1372 #ifdef DEBUG_ASI 1373 dump_asi("write", addr, asi, size, val); 1374 #endif 1375 1376 asi &= 0xff; 1377 1378 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0) 1379 || (cpu_has_hypervisor(env) 1380 && asi >= 0x30 && asi < 0x80 1381 && !(env->hpstate & HS_PRIV))) { 1382 cpu_raise_exception_ra(env, TT_PRIV_ACT, GETPC()); 1383 } 1384 1385 do_check_align(env, addr, size - 1, GETPC()); 1386 addr = asi_address_mask(env, asi, addr); 1387 1388 switch (asi) { 1389 case ASI_AIUP: /* As if user primary */ 1390 case ASI_AIUS: /* As if user secondary */ 1391 case ASI_AIUPL: /* As if user primary LE */ 1392 case ASI_AIUSL: /* As if user secondary LE */ 1393 case ASI_P: /* Primary */ 1394 case ASI_S: /* Secondary */ 1395 case ASI_PL: /* Primary LE */ 1396 case ASI_SL: /* Secondary LE */ 1397 case ASI_REAL: /* Bypass */ 1398 case ASI_REAL_IO: /* Bypass, non-cacheable */ 1399 case ASI_REAL_L: /* Bypass LE */ 1400 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */ 1401 case ASI_N: /* Nucleus */ 1402 case ASI_NL: /* Nucleus Little Endian (LE) */ 1403 case ASI_NUCLEUS_QUAD_LDD: /* Nucleus quad LDD 128 bit atomic */ 1404 case ASI_NUCLEUS_QUAD_LDD_L: /* Nucleus quad LDD 128 bit atomic LE */ 1405 case ASI_TWINX_AIUP: /* As if user primary, twinx */ 1406 case ASI_TWINX_AIUS: /* As if user secondary, twinx */ 1407 case ASI_TWINX_REAL: /* Real address, twinx */ 1408 case ASI_TWINX_AIUP_L: /* As if user primary, twinx, LE */ 1409 case ASI_TWINX_AIUS_L: /* As if user secondary, twinx, LE */ 1410 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */ 1411 case ASI_TWINX_N: /* Nucleus, twinx */ 1412 case ASI_TWINX_NL: /* Nucleus, twinx, LE */ 1413 /* ??? From the UA2011 document; overlaps BLK_INIT_QUAD_LDD_* */ 1414 case ASI_TWINX_P: /* Primary, twinx */ 1415 case ASI_TWINX_PL: /* Primary, twinx, LE */ 1416 case ASI_TWINX_S: /* Secondary, twinx */ 1417 case ASI_TWINX_SL: /* Secondary, twinx, LE */ 1418 /* These are always handled inline. */ 1419 g_assert_not_reached(); 1420 1421 case ASI_UPA_CONFIG: /* UPA config */ 1422 /* XXX */ 1423 return; 1424 case ASI_LSU_CONTROL: /* LSU */ 1425 env->lsu = val & (DMMU_E | IMMU_E); 1426 return; 1427 case ASI_IMMU: /* I-MMU regs */ 1428 { 1429 int reg = (addr >> 3) & 0xf; 1430 uint64_t oldreg; 1431 1432 oldreg = env->immuregs[reg]; 1433 switch (reg) { 1434 case 0: /* RO */ 1435 return; 1436 case 1: /* Not in I-MMU */ 1437 case 2: 1438 return; 1439 case 3: /* SFSR */ 1440 if ((val & 1) == 0) { 1441 val = 0; /* Clear SFSR */ 1442 } 1443 env->immu.sfsr = val; 1444 break; 1445 case 4: /* RO */ 1446 return; 1447 case 5: /* TSB access */ 1448 DPRINTF_MMU("immu TSB write: 0x%016" PRIx64 " -> 0x%016" 1449 PRIx64 "\n", env->immu.tsb, val); 1450 env->immu.tsb = val; 1451 break; 1452 case 6: /* Tag access */ 1453 env->immu.tag_access = val; 1454 break; 1455 case 7: 1456 case 8: 1457 return; 1458 default: 1459 break; 1460 } 1461 1462 if (oldreg != env->immuregs[reg]) { 1463 DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016" 1464 PRIx64 "\n", reg, oldreg, env->immuregs[reg]); 1465 } 1466 #ifdef DEBUG_MMU 1467 dump_mmu(stdout, fprintf, env); 1468 #endif 1469 return; 1470 } 1471 case ASI_ITLB_DATA_IN: /* I-MMU data in */ 1472 replace_tlb_1bit_lru(env->itlb, env->immu.tag_access, val, "immu", env); 1473 return; 1474 case ASI_ITLB_DATA_ACCESS: /* I-MMU data access */ 1475 { 1476 /* TODO: auto demap */ 1477 1478 unsigned int i = (addr >> 3) & 0x3f; 1479 1480 replace_tlb_entry(&env->itlb[i], env->immu.tag_access, val, env); 1481 1482 #ifdef DEBUG_MMU 1483 DPRINTF_MMU("immu data access replaced entry [%i]\n", i); 1484 dump_mmu(stdout, fprintf, env); 1485 #endif 1486 return; 1487 } 1488 case ASI_IMMU_DEMAP: /* I-MMU demap */ 1489 demap_tlb(env->itlb, addr, "immu", env); 1490 return; 1491 case ASI_DMMU: /* D-MMU regs */ 1492 { 1493 int reg = (addr >> 3) & 0xf; 1494 uint64_t oldreg; 1495 1496 oldreg = env->dmmuregs[reg]; 1497 switch (reg) { 1498 case 0: /* RO */ 1499 case 4: 1500 return; 1501 case 3: /* SFSR */ 1502 if ((val & 1) == 0) { 1503 val = 0; /* Clear SFSR, Fault address */ 1504 env->dmmu.sfar = 0; 1505 } 1506 env->dmmu.sfsr = val; 1507 break; 1508 case 1: /* Primary context */ 1509 env->dmmu.mmu_primary_context = val; 1510 /* can be optimized to only flush MMU_USER_IDX 1511 and MMU_KERNEL_IDX entries */ 1512 tlb_flush(CPU(cpu)); 1513 break; 1514 case 2: /* Secondary context */ 1515 env->dmmu.mmu_secondary_context = val; 1516 /* can be optimized to only flush MMU_USER_SECONDARY_IDX 1517 and MMU_KERNEL_SECONDARY_IDX entries */ 1518 tlb_flush(CPU(cpu)); 1519 break; 1520 case 5: /* TSB access */ 1521 DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016" 1522 PRIx64 "\n", env->dmmu.tsb, val); 1523 env->dmmu.tsb = val; 1524 break; 1525 case 6: /* Tag access */ 1526 env->dmmu.tag_access = val; 1527 break; 1528 case 7: /* Virtual Watchpoint */ 1529 case 8: /* Physical Watchpoint */ 1530 default: 1531 env->dmmuregs[reg] = val; 1532 break; 1533 } 1534 1535 if (oldreg != env->dmmuregs[reg]) { 1536 DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016" 1537 PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]); 1538 } 1539 #ifdef DEBUG_MMU 1540 dump_mmu(stdout, fprintf, env); 1541 #endif 1542 return; 1543 } 1544 case ASI_DTLB_DATA_IN: /* D-MMU data in */ 1545 replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access, val, "dmmu", env); 1546 return; 1547 case ASI_DTLB_DATA_ACCESS: /* D-MMU data access */ 1548 { 1549 unsigned int i = (addr >> 3) & 0x3f; 1550 1551 replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access, val, env); 1552 1553 #ifdef DEBUG_MMU 1554 DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i); 1555 dump_mmu(stdout, fprintf, env); 1556 #endif 1557 return; 1558 } 1559 case ASI_DMMU_DEMAP: /* D-MMU demap */ 1560 demap_tlb(env->dtlb, addr, "dmmu", env); 1561 return; 1562 case ASI_INTR_RECEIVE: /* Interrupt data receive */ 1563 env->ivec_status = val & 0x20; 1564 return; 1565 case ASI_DCACHE_DATA: /* D-cache data */ 1566 case ASI_DCACHE_TAG: /* D-cache tag access */ 1567 case ASI_ESTATE_ERROR_EN: /* E-cache error enable */ 1568 case ASI_AFSR: /* E-cache asynchronous fault status */ 1569 case ASI_AFAR: /* E-cache asynchronous fault address */ 1570 case ASI_EC_TAG_DATA: /* E-cache tag data */ 1571 case ASI_IC_INSTR: /* I-cache instruction access */ 1572 case ASI_IC_TAG: /* I-cache tag access */ 1573 case ASI_IC_PRE_DECODE: /* I-cache predecode */ 1574 case ASI_IC_NEXT_FIELD: /* I-cache LRU etc. */ 1575 case ASI_EC_W: /* E-cache tag */ 1576 case ASI_EC_R: /* E-cache tag */ 1577 return; 1578 case ASI_IMMU_TSB_8KB_PTR: /* I-MMU 8k TSB pointer, RO */ 1579 case ASI_IMMU_TSB_64KB_PTR: /* I-MMU 64k TSB pointer, RO */ 1580 case ASI_ITLB_TAG_READ: /* I-MMU tag read, RO */ 1581 case ASI_DMMU_TSB_8KB_PTR: /* D-MMU 8k TSB pointer, RO */ 1582 case ASI_DMMU_TSB_64KB_PTR: /* D-MMU 64k TSB pointer, RO */ 1583 case ASI_DMMU_TSB_DIRECT_PTR: /* D-MMU data pointer, RO */ 1584 case ASI_DTLB_TAG_READ: /* D-MMU tag read, RO */ 1585 case ASI_INTR_DISPATCH_STAT: /* Interrupt dispatch, RO */ 1586 case ASI_INTR_R: /* Incoming interrupt vector, RO */ 1587 case ASI_PNF: /* Primary no-fault, RO */ 1588 case ASI_SNF: /* Secondary no-fault, RO */ 1589 case ASI_PNFL: /* Primary no-fault LE, RO */ 1590 case ASI_SNFL: /* Secondary no-fault LE, RO */ 1591 default: 1592 cpu_unassigned_access(cs, addr, true, false, 1, size); 1593 return; 1594 } 1595 } 1596 #endif /* CONFIG_USER_ONLY */ 1597 #endif /* TARGET_SPARC64 */ 1598 1599 #if !defined(CONFIG_USER_ONLY) 1600 #ifndef TARGET_SPARC64 1601 void sparc_cpu_unassigned_access(CPUState *cs, hwaddr addr, 1602 bool is_write, bool is_exec, int is_asi, 1603 unsigned size) 1604 { 1605 SPARCCPU *cpu = SPARC_CPU(cs); 1606 CPUSPARCState *env = &cpu->env; 1607 int fault_type; 1608 1609 #ifdef DEBUG_UNASSIGNED 1610 if (is_asi) { 1611 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx 1612 " asi 0x%02x from " TARGET_FMT_lx "\n", 1613 is_exec ? "exec" : is_write ? "write" : "read", size, 1614 size == 1 ? "" : "s", addr, is_asi, env->pc); 1615 } else { 1616 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx 1617 " from " TARGET_FMT_lx "\n", 1618 is_exec ? "exec" : is_write ? "write" : "read", size, 1619 size == 1 ? "" : "s", addr, env->pc); 1620 } 1621 #endif 1622 /* Don't overwrite translation and access faults */ 1623 fault_type = (env->mmuregs[3] & 0x1c) >> 2; 1624 if ((fault_type > 4) || (fault_type == 0)) { 1625 env->mmuregs[3] = 0; /* Fault status register */ 1626 if (is_asi) { 1627 env->mmuregs[3] |= 1 << 16; 1628 } 1629 if (env->psrs) { 1630 env->mmuregs[3] |= 1 << 5; 1631 } 1632 if (is_exec) { 1633 env->mmuregs[3] |= 1 << 6; 1634 } 1635 if (is_write) { 1636 env->mmuregs[3] |= 1 << 7; 1637 } 1638 env->mmuregs[3] |= (5 << 2) | 2; 1639 /* SuperSPARC will never place instruction fault addresses in the FAR */ 1640 if (!is_exec) { 1641 env->mmuregs[4] = addr; /* Fault address register */ 1642 } 1643 } 1644 /* overflow (same type fault was not read before another fault) */ 1645 if (fault_type == ((env->mmuregs[3] & 0x1c)) >> 2) { 1646 env->mmuregs[3] |= 1; 1647 } 1648 1649 if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) { 1650 int tt = is_exec ? TT_CODE_ACCESS : TT_DATA_ACCESS; 1651 cpu_raise_exception_ra(env, tt, GETPC()); 1652 } 1653 1654 /* flush neverland mappings created during no-fault mode, 1655 so the sequential MMU faults report proper fault types */ 1656 if (env->mmuregs[0] & MMU_NF) { 1657 tlb_flush(cs); 1658 } 1659 } 1660 #else 1661 void sparc_cpu_unassigned_access(CPUState *cs, hwaddr addr, 1662 bool is_write, bool is_exec, int is_asi, 1663 unsigned size) 1664 { 1665 SPARCCPU *cpu = SPARC_CPU(cs); 1666 CPUSPARCState *env = &cpu->env; 1667 int tt = is_exec ? TT_CODE_ACCESS : TT_DATA_ACCESS; 1668 1669 #ifdef DEBUG_UNASSIGNED 1670 printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx 1671 "\n", addr, env->pc); 1672 #endif 1673 1674 cpu_raise_exception_ra(env, tt, GETPC()); 1675 } 1676 #endif 1677 #endif 1678 1679 #if !defined(CONFIG_USER_ONLY) 1680 void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 1681 MMUAccessType access_type, 1682 int mmu_idx, 1683 uintptr_t retaddr) 1684 { 1685 SPARCCPU *cpu = SPARC_CPU(cs); 1686 CPUSPARCState *env = &cpu->env; 1687 1688 #ifdef DEBUG_UNALIGNED 1689 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx 1690 "\n", addr, env->pc); 1691 #endif 1692 cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr); 1693 } 1694 1695 /* try to fill the TLB and return an exception if error. If retaddr is 1696 NULL, it means that the function was called in C code (i.e. not 1697 from generated code or from helper.c) */ 1698 /* XXX: fix it to restore all registers */ 1699 void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type, 1700 int mmu_idx, uintptr_t retaddr) 1701 { 1702 int ret; 1703 1704 ret = sparc_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx); 1705 if (ret) { 1706 cpu_loop_exit_restore(cs, retaddr); 1707 } 1708 } 1709 #endif 1710