1 /* 2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "cpu.h" 23 #include "sysemu/kvm.h" 24 #include "kvm_ppc.h" 25 #include "mmu-hash64.h" 26 #include "mmu-hash32.h" 27 #include "exec/exec-all.h" 28 #include "exec/log.h" 29 #include "helper_regs.h" 30 #include "qemu/error-report.h" 31 #include "qemu/main-loop.h" 32 #include "qemu/qemu-print.h" 33 #include "internal.h" 34 #include "mmu-book3s-v3.h" 35 #include "mmu-radix64.h" 36 37 /* #define DEBUG_MMU */ 38 /* #define DEBUG_BATS */ 39 /* #define DEBUG_SOFTWARE_TLB */ 40 /* #define DUMP_PAGE_TABLES */ 41 /* #define FLUSH_ALL_TLBS */ 42 43 #ifdef DEBUG_MMU 44 # define LOG_MMU_STATE(cpu) log_cpu_state_mask(CPU_LOG_MMU, (cpu), 0) 45 #else 46 # define LOG_MMU_STATE(cpu) do { } while (0) 47 #endif 48 49 #ifdef DEBUG_SOFTWARE_TLB 50 # define LOG_SWTLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) 51 #else 52 # define LOG_SWTLB(...) do { } while (0) 53 #endif 54 55 #ifdef DEBUG_BATS 56 # define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) 57 #else 58 # define LOG_BATS(...) do { } while (0) 59 #endif 60 61 /*****************************************************************************/ 62 /* PowerPC MMU emulation */ 63 64 static int pp_check(int key, int pp, int nx) 65 { 66 int access; 67 68 /* Compute access rights */ 69 access = 0; 70 if (key == 0) { 71 switch (pp) { 72 case 0x0: 73 case 0x1: 74 case 0x2: 75 access |= PAGE_WRITE; 76 /* fall through */ 77 case 0x3: 78 access |= PAGE_READ; 79 break; 80 } 81 } else { 82 switch (pp) { 83 case 0x0: 84 access = 0; 85 break; 86 case 0x1: 87 case 0x3: 88 access = PAGE_READ; 89 break; 90 case 0x2: 91 access = PAGE_READ | PAGE_WRITE; 92 break; 93 } 94 } 95 if (nx == 0) { 96 access |= PAGE_EXEC; 97 } 98 99 return access; 100 } 101 102 static int check_prot(int prot, MMUAccessType access_type) 103 { 104 return prot & prot_for_access_type(access_type) ? 0 : -2; 105 } 106 107 int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr, 108 int way, int is_code) 109 { 110 int nr; 111 112 /* Select TLB num in a way from address */ 113 nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1); 114 /* Select TLB way */ 115 nr += env->tlb_per_way * way; 116 /* 6xx have separate TLBs for instructions and data */ 117 if (is_code && env->id_tlbs == 1) { 118 nr += env->nb_tlb; 119 } 120 121 return nr; 122 } 123 124 static int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0, 125 target_ulong pte1, int h, 126 MMUAccessType access_type) 127 { 128 target_ulong ptem, mmask; 129 int access, ret, pteh, ptev, pp; 130 131 ret = -1; 132 /* Check validity and table match */ 133 ptev = pte_is_valid(pte0); 134 pteh = (pte0 >> 6) & 1; 135 if (ptev && h == pteh) { 136 /* Check vsid & api */ 137 ptem = pte0 & PTE_PTEM_MASK; 138 mmask = PTE_CHECK_MASK; 139 pp = pte1 & 0x00000003; 140 if (ptem == ctx->ptem) { 141 if (ctx->raddr != (hwaddr)-1ULL) { 142 /* all matches should have equal RPN, WIMG & PP */ 143 if ((ctx->raddr & mmask) != (pte1 & mmask)) { 144 qemu_log_mask(CPU_LOG_MMU, "Bad RPN/WIMG/PP\n"); 145 return -3; 146 } 147 } 148 /* Compute access rights */ 149 access = pp_check(ctx->key, pp, ctx->nx); 150 /* Keep the matching PTE information */ 151 ctx->raddr = pte1; 152 ctx->prot = access; 153 ret = check_prot(ctx->prot, access_type); 154 if (ret == 0) { 155 /* Access granted */ 156 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); 157 } else { 158 /* Access right violation */ 159 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); 160 } 161 } 162 } 163 164 return ret; 165 } 166 167 static int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p, 168 int ret, MMUAccessType access_type) 169 { 170 int store = 0; 171 172 /* Update page flags */ 173 if (!(*pte1p & 0x00000100)) { 174 /* Update accessed flag */ 175 *pte1p |= 0x00000100; 176 store = 1; 177 } 178 if (!(*pte1p & 0x00000080)) { 179 if (access_type == MMU_DATA_STORE && ret == 0) { 180 /* Update changed flag */ 181 *pte1p |= 0x00000080; 182 store = 1; 183 } else { 184 /* Force page fault for first write access */ 185 ctx->prot &= ~PAGE_WRITE; 186 } 187 } 188 189 return store; 190 } 191 192 /* Software driven TLB helpers */ 193 194 static int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx, 195 target_ulong eaddr, MMUAccessType access_type) 196 { 197 ppc6xx_tlb_t *tlb; 198 int nr, best, way; 199 int ret; 200 201 best = -1; 202 ret = -1; /* No TLB found */ 203 for (way = 0; way < env->nb_ways; way++) { 204 nr = ppc6xx_tlb_getnum(env, eaddr, way, access_type == MMU_INST_FETCH); 205 tlb = &env->tlb.tlb6[nr]; 206 /* This test "emulates" the PTE index match for hardware TLBs */ 207 if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) { 208 LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx " " TARGET_FMT_lx 209 "] <> " TARGET_FMT_lx "\n", nr, env->nb_tlb, 210 pte_is_valid(tlb->pte0) ? "valid" : "inval", 211 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr); 212 continue; 213 } 214 LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx " <> " TARGET_FMT_lx " " 215 TARGET_FMT_lx " %c %c\n", nr, env->nb_tlb, 216 pte_is_valid(tlb->pte0) ? "valid" : "inval", 217 tlb->EPN, eaddr, tlb->pte1, 218 access_type == MMU_DATA_STORE ? 'S' : 'L', 219 access_type == MMU_INST_FETCH ? 'I' : 'D'); 220 switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1, 221 0, access_type)) { 222 case -3: 223 /* TLB inconsistency */ 224 return -1; 225 case -2: 226 /* Access violation */ 227 ret = -2; 228 best = nr; 229 break; 230 case -1: 231 default: 232 /* No match */ 233 break; 234 case 0: 235 /* access granted */ 236 /* 237 * XXX: we should go on looping to check all TLBs 238 * consistency but we can speed-up the whole thing as 239 * the result would be undefined if TLBs are not 240 * consistent. 241 */ 242 ret = 0; 243 best = nr; 244 goto done; 245 } 246 } 247 if (best != -1) { 248 done: 249 LOG_SWTLB("found TLB at addr " TARGET_FMT_plx " prot=%01x ret=%d\n", 250 ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret); 251 /* Update page flags */ 252 pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, access_type); 253 } 254 255 return ret; 256 } 257 258 /* Perform BAT hit & translation */ 259 static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp, 260 int *validp, int *protp, target_ulong *BATu, 261 target_ulong *BATl) 262 { 263 target_ulong bl; 264 int pp, valid, prot; 265 266 bl = (*BATu & 0x00001FFC) << 15; 267 valid = 0; 268 prot = 0; 269 if (((msr_pr == 0) && (*BATu & 0x00000002)) || 270 ((msr_pr != 0) && (*BATu & 0x00000001))) { 271 valid = 1; 272 pp = *BATl & 0x00000003; 273 if (pp != 0) { 274 prot = PAGE_READ | PAGE_EXEC; 275 if (pp == 0x2) { 276 prot |= PAGE_WRITE; 277 } 278 } 279 } 280 *blp = bl; 281 *validp = valid; 282 *protp = prot; 283 } 284 285 static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, 286 target_ulong virtual, MMUAccessType access_type) 287 { 288 target_ulong *BATlt, *BATut, *BATu, *BATl; 289 target_ulong BEPIl, BEPIu, bl; 290 int i, valid, prot; 291 int ret = -1; 292 bool ifetch = access_type == MMU_INST_FETCH; 293 294 LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__, 295 ifetch ? 'I' : 'D', virtual); 296 if (ifetch) { 297 BATlt = env->IBAT[1]; 298 BATut = env->IBAT[0]; 299 } else { 300 BATlt = env->DBAT[1]; 301 BATut = env->DBAT[0]; 302 } 303 for (i = 0; i < env->nb_BATs; i++) { 304 BATu = &BATut[i]; 305 BATl = &BATlt[i]; 306 BEPIu = *BATu & 0xF0000000; 307 BEPIl = *BATu & 0x0FFE0000; 308 bat_size_prot(env, &bl, &valid, &prot, BATu, BATl); 309 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx 310 " BATl " TARGET_FMT_lx "\n", __func__, 311 ifetch ? 'I' : 'D', i, virtual, *BATu, *BATl); 312 if ((virtual & 0xF0000000) == BEPIu && 313 ((virtual & 0x0FFE0000) & ~bl) == BEPIl) { 314 /* BAT matches */ 315 if (valid != 0) { 316 /* Get physical address */ 317 ctx->raddr = (*BATl & 0xF0000000) | 318 ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) | 319 (virtual & 0x0001F000); 320 /* Compute access rights */ 321 ctx->prot = prot; 322 ret = check_prot(ctx->prot, access_type); 323 if (ret == 0) { 324 LOG_BATS("BAT %d match: r " TARGET_FMT_plx " prot=%c%c\n", 325 i, ctx->raddr, ctx->prot & PAGE_READ ? 'R' : '-', 326 ctx->prot & PAGE_WRITE ? 'W' : '-'); 327 } 328 break; 329 } 330 } 331 } 332 if (ret < 0) { 333 #if defined(DEBUG_BATS) 334 if (qemu_log_enabled()) { 335 LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", virtual); 336 for (i = 0; i < 4; i++) { 337 BATu = &BATut[i]; 338 BATl = &BATlt[i]; 339 BEPIu = *BATu & 0xF0000000; 340 BEPIl = *BATu & 0x0FFE0000; 341 bl = (*BATu & 0x00001FFC) << 15; 342 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx 343 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " 344 TARGET_FMT_lx " " TARGET_FMT_lx "\n", 345 __func__, ifetch ? 'I' : 'D', i, virtual, 346 *BATu, *BATl, BEPIu, BEPIl, bl); 347 } 348 } 349 #endif 350 } 351 /* No hit */ 352 return ret; 353 } 354 355 /* Perform segment based translation */ 356 static int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, 357 target_ulong eaddr, MMUAccessType access_type, 358 int type) 359 { 360 PowerPCCPU *cpu = env_archcpu(env); 361 hwaddr hash; 362 target_ulong vsid; 363 int ds, pr, target_page_bits; 364 int ret; 365 target_ulong sr, pgidx; 366 367 pr = msr_pr; 368 ctx->eaddr = eaddr; 369 370 sr = env->sr[eaddr >> 28]; 371 ctx->key = (((sr & 0x20000000) && (pr != 0)) || 372 ((sr & 0x40000000) && (pr == 0))) ? 1 : 0; 373 ds = sr & 0x80000000 ? 1 : 0; 374 ctx->nx = sr & 0x10000000 ? 1 : 0; 375 vsid = sr & 0x00FFFFFF; 376 target_page_bits = TARGET_PAGE_BITS; 377 qemu_log_mask(CPU_LOG_MMU, 378 "Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx 379 " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx 380 " ir=%d dr=%d pr=%d %d t=%d\n", 381 eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir, 382 (int)msr_dr, pr != 0 ? 1 : 0, access_type == MMU_DATA_STORE, type); 383 pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits; 384 hash = vsid ^ pgidx; 385 ctx->ptem = (vsid << 7) | (pgidx >> 10); 386 387 qemu_log_mask(CPU_LOG_MMU, 388 "pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n", 389 ctx->key, ds, ctx->nx, vsid); 390 ret = -1; 391 if (!ds) { 392 /* Check if instruction fetch is allowed, if needed */ 393 if (type != ACCESS_CODE || ctx->nx == 0) { 394 /* Page address translation */ 395 qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx 396 " htab_mask " TARGET_FMT_plx 397 " hash " TARGET_FMT_plx "\n", 398 ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash); 399 ctx->hash[0] = hash; 400 ctx->hash[1] = ~hash; 401 402 /* Initialize real address with an invalid value */ 403 ctx->raddr = (hwaddr)-1ULL; 404 /* Software TLB search */ 405 ret = ppc6xx_tlb_check(env, ctx, eaddr, access_type); 406 #if defined(DUMP_PAGE_TABLES) 407 if (qemu_loglevel_mask(CPU_LOG_MMU)) { 408 CPUState *cs = env_cpu(env); 409 hwaddr curaddr; 410 uint32_t a0, a1, a2, a3; 411 412 qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx 413 "\n", ppc_hash32_hpt_base(cpu), 414 ppc_hash32_hpt_mask(cpu) + 0x80); 415 for (curaddr = ppc_hash32_hpt_base(cpu); 416 curaddr < (ppc_hash32_hpt_base(cpu) 417 + ppc_hash32_hpt_mask(cpu) + 0x80); 418 curaddr += 16) { 419 a0 = ldl_phys(cs->as, curaddr); 420 a1 = ldl_phys(cs->as, curaddr + 4); 421 a2 = ldl_phys(cs->as, curaddr + 8); 422 a3 = ldl_phys(cs->as, curaddr + 12); 423 if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) { 424 qemu_log(TARGET_FMT_plx ": %08x %08x %08x %08x\n", 425 curaddr, a0, a1, a2, a3); 426 } 427 } 428 } 429 #endif 430 } else { 431 qemu_log_mask(CPU_LOG_MMU, "No access allowed\n"); 432 ret = -3; 433 } 434 } else { 435 target_ulong sr; 436 437 qemu_log_mask(CPU_LOG_MMU, "direct store...\n"); 438 /* Direct-store segment : absolutely *BUGGY* for now */ 439 440 /* 441 * Direct-store implies a 32-bit MMU. 442 * Check the Segment Register's bus unit ID (BUID). 443 */ 444 sr = env->sr[eaddr >> 28]; 445 if ((sr & 0x1FF00000) >> 20 == 0x07f) { 446 /* 447 * Memory-forced I/O controller interface access 448 * 449 * If T=1 and BUID=x'07F', the 601 performs a memory 450 * access to SR[28-31] LA[4-31], bypassing all protection 451 * mechanisms. 452 */ 453 ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF); 454 ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 455 return 0; 456 } 457 458 switch (type) { 459 case ACCESS_INT: 460 /* Integer load/store : only access allowed */ 461 break; 462 case ACCESS_CODE: 463 /* No code fetch is allowed in direct-store areas */ 464 return -4; 465 case ACCESS_FLOAT: 466 /* Floating point load/store */ 467 return -4; 468 case ACCESS_RES: 469 /* lwarx, ldarx or srwcx. */ 470 return -4; 471 case ACCESS_CACHE: 472 /* 473 * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi 474 * 475 * Should make the instruction do no-op. As it already do 476 * no-op, it's quite easy :-) 477 */ 478 ctx->raddr = eaddr; 479 return 0; 480 case ACCESS_EXT: 481 /* eciwx or ecowx */ 482 return -4; 483 default: 484 qemu_log_mask(CPU_LOG_MMU, "ERROR: instruction should not need " 485 "address translation\n"); 486 return -4; 487 } 488 if ((access_type == MMU_DATA_STORE || ctx->key != 1) && 489 (access_type == MMU_DATA_LOAD || ctx->key != 0)) { 490 ctx->raddr = eaddr; 491 ret = 2; 492 } else { 493 ret = -2; 494 } 495 } 496 497 return ret; 498 } 499 500 /* Generic TLB check function for embedded PowerPC implementations */ 501 int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb, 502 hwaddr *raddrp, 503 target_ulong address, uint32_t pid, int ext, 504 int i) 505 { 506 target_ulong mask; 507 508 /* Check valid flag */ 509 if (!(tlb->prot & PAGE_VALID)) { 510 return -1; 511 } 512 mask = ~(tlb->size - 1); 513 LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx " PID %u <=> " TARGET_FMT_lx 514 " " TARGET_FMT_lx " %u %x\n", __func__, i, address, pid, tlb->EPN, 515 mask, (uint32_t)tlb->PID, tlb->prot); 516 /* Check PID */ 517 if (tlb->PID != 0 && tlb->PID != pid) { 518 return -1; 519 } 520 /* Check effective address */ 521 if ((address & mask) != tlb->EPN) { 522 return -1; 523 } 524 *raddrp = (tlb->RPN & mask) | (address & ~mask); 525 if (ext) { 526 /* Extend the physical address to 36 bits */ 527 *raddrp |= (uint64_t)(tlb->RPN & 0xF) << 32; 528 } 529 530 return 0; 531 } 532 533 static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 534 target_ulong address, 535 MMUAccessType access_type) 536 { 537 ppcemb_tlb_t *tlb; 538 hwaddr raddr; 539 int i, ret, zsel, zpr, pr; 540 541 ret = -1; 542 raddr = (hwaddr)-1ULL; 543 pr = msr_pr; 544 for (i = 0; i < env->nb_tlb; i++) { 545 tlb = &env->tlb.tlbe[i]; 546 if (ppcemb_tlb_check(env, tlb, &raddr, address, 547 env->spr[SPR_40x_PID], 0, i) < 0) { 548 continue; 549 } 550 zsel = (tlb->attr >> 4) & 0xF; 551 zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3; 552 LOG_SWTLB("%s: TLB %d zsel %d zpr %d ty %d attr %08x\n", 553 __func__, i, zsel, zpr, access_type, tlb->attr); 554 /* Check execute enable bit */ 555 switch (zpr) { 556 case 0x2: 557 if (pr != 0) { 558 goto check_perms; 559 } 560 /* fall through */ 561 case 0x3: 562 /* All accesses granted */ 563 ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 564 ret = 0; 565 break; 566 case 0x0: 567 if (pr != 0) { 568 /* Raise Zone protection fault. */ 569 env->spr[SPR_40x_ESR] = 1 << 22; 570 ctx->prot = 0; 571 ret = -2; 572 break; 573 } 574 /* fall through */ 575 case 0x1: 576 check_perms: 577 /* Check from TLB entry */ 578 ctx->prot = tlb->prot; 579 ret = check_prot(ctx->prot, access_type); 580 if (ret == -2) { 581 env->spr[SPR_40x_ESR] = 0; 582 } 583 break; 584 } 585 if (ret >= 0) { 586 ctx->raddr = raddr; 587 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx 588 " %d %d\n", __func__, address, ctx->raddr, ctx->prot, 589 ret); 590 return 0; 591 } 592 } 593 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx 594 " %d %d\n", __func__, address, raddr, ctx->prot, ret); 595 596 return ret; 597 } 598 599 void store_40x_sler(CPUPPCState *env, uint32_t val) 600 { 601 /* XXX: TO BE FIXED */ 602 if (val != 0x00000000) { 603 cpu_abort(env_cpu(env), 604 "Little-endian regions are not supported by now\n"); 605 } 606 env->spr[SPR_405_SLER] = val; 607 } 608 609 static int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb, 610 hwaddr *raddr, int *prot, target_ulong address, 611 MMUAccessType access_type, int i) 612 { 613 int prot2; 614 615 if (ppcemb_tlb_check(env, tlb, raddr, address, 616 env->spr[SPR_BOOKE_PID], 617 !env->nb_pids, i) >= 0) { 618 goto found_tlb; 619 } 620 621 if (env->spr[SPR_BOOKE_PID1] && 622 ppcemb_tlb_check(env, tlb, raddr, address, 623 env->spr[SPR_BOOKE_PID1], 0, i) >= 0) { 624 goto found_tlb; 625 } 626 627 if (env->spr[SPR_BOOKE_PID2] && 628 ppcemb_tlb_check(env, tlb, raddr, address, 629 env->spr[SPR_BOOKE_PID2], 0, i) >= 0) { 630 goto found_tlb; 631 } 632 633 LOG_SWTLB("%s: TLB entry not found\n", __func__); 634 return -1; 635 636 found_tlb: 637 638 if (msr_pr != 0) { 639 prot2 = tlb->prot & 0xF; 640 } else { 641 prot2 = (tlb->prot >> 4) & 0xF; 642 } 643 644 /* Check the address space */ 645 if ((access_type == MMU_INST_FETCH ? msr_ir : msr_dr) != (tlb->attr & 1)) { 646 LOG_SWTLB("%s: AS doesn't match\n", __func__); 647 return -1; 648 } 649 650 *prot = prot2; 651 if (prot2 & prot_for_access_type(access_type)) { 652 LOG_SWTLB("%s: good TLB!\n", __func__); 653 return 0; 654 } 655 656 LOG_SWTLB("%s: no prot match: %x\n", __func__, prot2); 657 return access_type == MMU_INST_FETCH ? -3 : -2; 658 } 659 660 static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 661 target_ulong address, 662 MMUAccessType access_type) 663 { 664 ppcemb_tlb_t *tlb; 665 hwaddr raddr; 666 int i, ret; 667 668 ret = -1; 669 raddr = (hwaddr)-1ULL; 670 for (i = 0; i < env->nb_tlb; i++) { 671 tlb = &env->tlb.tlbe[i]; 672 ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address, 673 access_type, i); 674 if (ret != -1) { 675 break; 676 } 677 } 678 679 if (ret >= 0) { 680 ctx->raddr = raddr; 681 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx 682 " %d %d\n", __func__, address, ctx->raddr, ctx->prot, 683 ret); 684 } else { 685 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx 686 " %d %d\n", __func__, address, raddr, ctx->prot, ret); 687 } 688 689 return ret; 690 } 691 692 hwaddr booke206_tlb_to_page_size(CPUPPCState *env, 693 ppcmas_tlb_t *tlb) 694 { 695 int tlbm_size; 696 697 tlbm_size = (tlb->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 698 699 return 1024ULL << tlbm_size; 700 } 701 702 /* TLB check function for MAS based SoftTLBs */ 703 int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb, 704 hwaddr *raddrp, target_ulong address, 705 uint32_t pid) 706 { 707 hwaddr mask; 708 uint32_t tlb_pid; 709 710 if (!msr_cm) { 711 /* In 32bit mode we can only address 32bit EAs */ 712 address = (uint32_t)address; 713 } 714 715 /* Check valid flag */ 716 if (!(tlb->mas1 & MAS1_VALID)) { 717 return -1; 718 } 719 720 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); 721 LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx " PID=0x%x MAS1=0x%x MAS2=0x%" 722 PRIx64 " mask=0x%" HWADDR_PRIx " MAS7_3=0x%" PRIx64 " MAS8=0x%" 723 PRIx32 "\n", __func__, address, pid, tlb->mas1, tlb->mas2, mask, 724 tlb->mas7_3, tlb->mas8); 725 726 /* Check PID */ 727 tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT; 728 if (tlb_pid != 0 && tlb_pid != pid) { 729 return -1; 730 } 731 732 /* Check effective address */ 733 if ((address & mask) != (tlb->mas2 & MAS2_EPN_MASK)) { 734 return -1; 735 } 736 737 if (raddrp) { 738 *raddrp = (tlb->mas7_3 & mask) | (address & ~mask); 739 } 740 741 return 0; 742 } 743 744 static bool is_epid_mmu(int mmu_idx) 745 { 746 return mmu_idx == PPC_TLB_EPID_STORE || mmu_idx == PPC_TLB_EPID_LOAD; 747 } 748 749 static uint32_t mmubooke206_esr(int mmu_idx, MMUAccessType access_type) 750 { 751 uint32_t esr = 0; 752 if (access_type == MMU_DATA_STORE) { 753 esr |= ESR_ST; 754 } 755 if (is_epid_mmu(mmu_idx)) { 756 esr |= ESR_EPID; 757 } 758 return esr; 759 } 760 761 /* 762 * Get EPID register given the mmu_idx. If this is regular load, 763 * construct the EPID access bits from current processor state 764 * 765 * Get the effective AS and PR bits and the PID. The PID is returned 766 * only if EPID load is requested, otherwise the caller must detect 767 * the correct EPID. Return true if valid EPID is returned. 768 */ 769 static bool mmubooke206_get_as(CPUPPCState *env, 770 int mmu_idx, uint32_t *epid_out, 771 bool *as_out, bool *pr_out) 772 { 773 if (is_epid_mmu(mmu_idx)) { 774 uint32_t epidr; 775 if (mmu_idx == PPC_TLB_EPID_STORE) { 776 epidr = env->spr[SPR_BOOKE_EPSC]; 777 } else { 778 epidr = env->spr[SPR_BOOKE_EPLC]; 779 } 780 *epid_out = (epidr & EPID_EPID) >> EPID_EPID_SHIFT; 781 *as_out = !!(epidr & EPID_EAS); 782 *pr_out = !!(epidr & EPID_EPR); 783 return true; 784 } else { 785 *as_out = msr_ds; 786 *pr_out = msr_pr; 787 return false; 788 } 789 } 790 791 /* Check if the tlb found by hashing really matches */ 792 static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb, 793 hwaddr *raddr, int *prot, 794 target_ulong address, 795 MMUAccessType access_type, int mmu_idx) 796 { 797 int prot2 = 0; 798 uint32_t epid; 799 bool as, pr; 800 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); 801 802 if (!use_epid) { 803 if (ppcmas_tlb_check(env, tlb, raddr, address, 804 env->spr[SPR_BOOKE_PID]) >= 0) { 805 goto found_tlb; 806 } 807 808 if (env->spr[SPR_BOOKE_PID1] && 809 ppcmas_tlb_check(env, tlb, raddr, address, 810 env->spr[SPR_BOOKE_PID1]) >= 0) { 811 goto found_tlb; 812 } 813 814 if (env->spr[SPR_BOOKE_PID2] && 815 ppcmas_tlb_check(env, tlb, raddr, address, 816 env->spr[SPR_BOOKE_PID2]) >= 0) { 817 goto found_tlb; 818 } 819 } else { 820 if (ppcmas_tlb_check(env, tlb, raddr, address, epid) >= 0) { 821 goto found_tlb; 822 } 823 } 824 825 LOG_SWTLB("%s: TLB entry not found\n", __func__); 826 return -1; 827 828 found_tlb: 829 830 if (pr) { 831 if (tlb->mas7_3 & MAS3_UR) { 832 prot2 |= PAGE_READ; 833 } 834 if (tlb->mas7_3 & MAS3_UW) { 835 prot2 |= PAGE_WRITE; 836 } 837 if (tlb->mas7_3 & MAS3_UX) { 838 prot2 |= PAGE_EXEC; 839 } 840 } else { 841 if (tlb->mas7_3 & MAS3_SR) { 842 prot2 |= PAGE_READ; 843 } 844 if (tlb->mas7_3 & MAS3_SW) { 845 prot2 |= PAGE_WRITE; 846 } 847 if (tlb->mas7_3 & MAS3_SX) { 848 prot2 |= PAGE_EXEC; 849 } 850 } 851 852 /* Check the address space and permissions */ 853 if (access_type == MMU_INST_FETCH) { 854 /* There is no way to fetch code using epid load */ 855 assert(!use_epid); 856 as = msr_ir; 857 } 858 859 if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { 860 LOG_SWTLB("%s: AS doesn't match\n", __func__); 861 return -1; 862 } 863 864 *prot = prot2; 865 if (prot2 & prot_for_access_type(access_type)) { 866 LOG_SWTLB("%s: good TLB!\n", __func__); 867 return 0; 868 } 869 870 LOG_SWTLB("%s: no prot match: %x\n", __func__, prot2); 871 return access_type == MMU_INST_FETCH ? -3 : -2; 872 } 873 874 static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 875 target_ulong address, 876 MMUAccessType access_type, 877 int mmu_idx) 878 { 879 ppcmas_tlb_t *tlb; 880 hwaddr raddr; 881 int i, j, ret; 882 883 ret = -1; 884 raddr = (hwaddr)-1ULL; 885 886 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 887 int ways = booke206_tlb_ways(env, i); 888 889 for (j = 0; j < ways; j++) { 890 tlb = booke206_get_tlbm(env, i, address, j); 891 if (!tlb) { 892 continue; 893 } 894 ret = mmubooke206_check_tlb(env, tlb, &raddr, &ctx->prot, address, 895 access_type, mmu_idx); 896 if (ret != -1) { 897 goto found_tlb; 898 } 899 } 900 } 901 902 found_tlb: 903 904 if (ret >= 0) { 905 ctx->raddr = raddr; 906 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx 907 " %d %d\n", __func__, address, ctx->raddr, ctx->prot, 908 ret); 909 } else { 910 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx 911 " %d %d\n", __func__, address, raddr, ctx->prot, ret); 912 } 913 914 return ret; 915 } 916 917 static const char *book3e_tsize_to_str[32] = { 918 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", 919 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", 920 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G", 921 "1T", "2T" 922 }; 923 924 static void mmubooke_dump_mmu(CPUPPCState *env) 925 { 926 ppcemb_tlb_t *entry; 927 int i; 928 929 if (kvm_enabled() && !env->kvm_sw_tlb) { 930 qemu_printf("Cannot access KVM TLB\n"); 931 return; 932 } 933 934 qemu_printf("\nTLB:\n"); 935 qemu_printf("Effective Physical Size PID Prot " 936 "Attr\n"); 937 938 entry = &env->tlb.tlbe[0]; 939 for (i = 0; i < env->nb_tlb; i++, entry++) { 940 hwaddr ea, pa; 941 target_ulong mask; 942 uint64_t size = (uint64_t)entry->size; 943 char size_buf[20]; 944 945 /* Check valid flag */ 946 if (!(entry->prot & PAGE_VALID)) { 947 continue; 948 } 949 950 mask = ~(entry->size - 1); 951 ea = entry->EPN & mask; 952 pa = entry->RPN & mask; 953 /* Extend the physical address to 36 bits */ 954 pa |= (hwaddr)(entry->RPN & 0xF) << 32; 955 if (size >= 1 * MiB) { 956 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "M", size / MiB); 957 } else { 958 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "k", size / KiB); 959 } 960 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n", 961 (uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID, 962 entry->prot, entry->attr); 963 } 964 965 } 966 967 static void mmubooke206_dump_one_tlb(CPUPPCState *env, int tlbn, int offset, 968 int tlbsize) 969 { 970 ppcmas_tlb_t *entry; 971 int i; 972 973 qemu_printf("\nTLB%d:\n", tlbn); 974 qemu_printf("Effective Physical Size TID TS SRWX" 975 " URWX WIMGE U0123\n"); 976 977 entry = &env->tlb.tlbm[offset]; 978 for (i = 0; i < tlbsize; i++, entry++) { 979 hwaddr ea, pa, size; 980 int tsize; 981 982 if (!(entry->mas1 & MAS1_VALID)) { 983 continue; 984 } 985 986 tsize = (entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 987 size = 1024ULL << tsize; 988 ea = entry->mas2 & ~(size - 1); 989 pa = entry->mas7_3 & ~(size - 1); 990 991 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c" 992 "U%c%c%c %c%c%c%c%c U%c%c%c%c\n", 993 (uint64_t)ea, (uint64_t)pa, 994 book3e_tsize_to_str[tsize], 995 (entry->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT, 996 (entry->mas1 & MAS1_TS) >> MAS1_TS_SHIFT, 997 entry->mas7_3 & MAS3_SR ? 'R' : '-', 998 entry->mas7_3 & MAS3_SW ? 'W' : '-', 999 entry->mas7_3 & MAS3_SX ? 'X' : '-', 1000 entry->mas7_3 & MAS3_UR ? 'R' : '-', 1001 entry->mas7_3 & MAS3_UW ? 'W' : '-', 1002 entry->mas7_3 & MAS3_UX ? 'X' : '-', 1003 entry->mas2 & MAS2_W ? 'W' : '-', 1004 entry->mas2 & MAS2_I ? 'I' : '-', 1005 entry->mas2 & MAS2_M ? 'M' : '-', 1006 entry->mas2 & MAS2_G ? 'G' : '-', 1007 entry->mas2 & MAS2_E ? 'E' : '-', 1008 entry->mas7_3 & MAS3_U0 ? '0' : '-', 1009 entry->mas7_3 & MAS3_U1 ? '1' : '-', 1010 entry->mas7_3 & MAS3_U2 ? '2' : '-', 1011 entry->mas7_3 & MAS3_U3 ? '3' : '-'); 1012 } 1013 } 1014 1015 static void mmubooke206_dump_mmu(CPUPPCState *env) 1016 { 1017 int offset = 0; 1018 int i; 1019 1020 if (kvm_enabled() && !env->kvm_sw_tlb) { 1021 qemu_printf("Cannot access KVM TLB\n"); 1022 return; 1023 } 1024 1025 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 1026 int size = booke206_tlb_size(env, i); 1027 1028 if (size == 0) { 1029 continue; 1030 } 1031 1032 mmubooke206_dump_one_tlb(env, i, offset, size); 1033 offset += size; 1034 } 1035 } 1036 1037 static void mmu6xx_dump_BATs(CPUPPCState *env, int type) 1038 { 1039 target_ulong *BATlt, *BATut, *BATu, *BATl; 1040 target_ulong BEPIl, BEPIu, bl; 1041 int i; 1042 1043 switch (type) { 1044 case ACCESS_CODE: 1045 BATlt = env->IBAT[1]; 1046 BATut = env->IBAT[0]; 1047 break; 1048 default: 1049 BATlt = env->DBAT[1]; 1050 BATut = env->DBAT[0]; 1051 break; 1052 } 1053 1054 for (i = 0; i < env->nb_BATs; i++) { 1055 BATu = &BATut[i]; 1056 BATl = &BATlt[i]; 1057 BEPIu = *BATu & 0xF0000000; 1058 BEPIl = *BATu & 0x0FFE0000; 1059 bl = (*BATu & 0x00001FFC) << 15; 1060 qemu_printf("%s BAT%d BATu " TARGET_FMT_lx 1061 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " 1062 TARGET_FMT_lx " " TARGET_FMT_lx "\n", 1063 type == ACCESS_CODE ? "code" : "data", i, 1064 *BATu, *BATl, BEPIu, BEPIl, bl); 1065 } 1066 } 1067 1068 static void mmu6xx_dump_mmu(CPUPPCState *env) 1069 { 1070 PowerPCCPU *cpu = env_archcpu(env); 1071 ppc6xx_tlb_t *tlb; 1072 target_ulong sr; 1073 int type, way, entry, i; 1074 1075 qemu_printf("HTAB base = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_base(cpu)); 1076 qemu_printf("HTAB mask = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_mask(cpu)); 1077 1078 qemu_printf("\nSegment registers:\n"); 1079 for (i = 0; i < 32; i++) { 1080 sr = env->sr[i]; 1081 if (sr & 0x80000000) { 1082 qemu_printf("%02d T=%d Ks=%d Kp=%d BUID=0x%03x " 1083 "CNTLR_SPEC=0x%05x\n", i, 1084 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, 1085 sr & 0x20000000 ? 1 : 0, (uint32_t)((sr >> 20) & 0x1FF), 1086 (uint32_t)(sr & 0xFFFFF)); 1087 } else { 1088 qemu_printf("%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i, 1089 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, 1090 sr & 0x20000000 ? 1 : 0, sr & 0x10000000 ? 1 : 0, 1091 (uint32_t)(sr & 0x00FFFFFF)); 1092 } 1093 } 1094 1095 qemu_printf("\nBATs:\n"); 1096 mmu6xx_dump_BATs(env, ACCESS_INT); 1097 mmu6xx_dump_BATs(env, ACCESS_CODE); 1098 1099 if (env->id_tlbs != 1) { 1100 qemu_printf("ERROR: 6xx MMU should have separated TLB" 1101 " for code and data\n"); 1102 } 1103 1104 qemu_printf("\nTLBs [EPN EPN + SIZE]\n"); 1105 1106 for (type = 0; type < 2; type++) { 1107 for (way = 0; way < env->nb_ways; way++) { 1108 for (entry = env->nb_tlb * type + env->tlb_per_way * way; 1109 entry < (env->nb_tlb * type + env->tlb_per_way * (way + 1)); 1110 entry++) { 1111 1112 tlb = &env->tlb.tlb6[entry]; 1113 qemu_printf("%s TLB %02d/%02d way:%d %s [" 1114 TARGET_FMT_lx " " TARGET_FMT_lx "]\n", 1115 type ? "code" : "data", entry % env->nb_tlb, 1116 env->nb_tlb, way, 1117 pte_is_valid(tlb->pte0) ? "valid" : "inval", 1118 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE); 1119 } 1120 } 1121 } 1122 } 1123 1124 void dump_mmu(CPUPPCState *env) 1125 { 1126 switch (env->mmu_model) { 1127 case POWERPC_MMU_BOOKE: 1128 mmubooke_dump_mmu(env); 1129 break; 1130 case POWERPC_MMU_BOOKE206: 1131 mmubooke206_dump_mmu(env); 1132 break; 1133 case POWERPC_MMU_SOFT_6xx: 1134 case POWERPC_MMU_SOFT_74xx: 1135 mmu6xx_dump_mmu(env); 1136 break; 1137 #if defined(TARGET_PPC64) 1138 case POWERPC_MMU_64B: 1139 case POWERPC_MMU_2_03: 1140 case POWERPC_MMU_2_06: 1141 case POWERPC_MMU_2_07: 1142 dump_slb(env_archcpu(env)); 1143 break; 1144 case POWERPC_MMU_3_00: 1145 if (ppc64_v3_radix(env_archcpu(env))) { 1146 qemu_log_mask(LOG_UNIMP, "%s: the PPC64 MMU is unsupported\n", 1147 __func__); 1148 } else { 1149 dump_slb(env_archcpu(env)); 1150 } 1151 break; 1152 #endif 1153 default: 1154 qemu_log_mask(LOG_UNIMP, "%s: unimplemented\n", __func__); 1155 } 1156 } 1157 1158 static int check_physical(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr, 1159 MMUAccessType access_type) 1160 { 1161 int in_plb, ret; 1162 1163 ctx->raddr = eaddr; 1164 ctx->prot = PAGE_READ | PAGE_EXEC; 1165 ret = 0; 1166 switch (env->mmu_model) { 1167 case POWERPC_MMU_SOFT_6xx: 1168 case POWERPC_MMU_SOFT_74xx: 1169 case POWERPC_MMU_SOFT_4xx: 1170 case POWERPC_MMU_REAL: 1171 case POWERPC_MMU_BOOKE: 1172 ctx->prot |= PAGE_WRITE; 1173 break; 1174 1175 case POWERPC_MMU_SOFT_4xx_Z: 1176 if (unlikely(msr_pe != 0)) { 1177 /* 1178 * 403 family add some particular protections, using 1179 * PBL/PBU registers for accesses with no translation. 1180 */ 1181 in_plb = 1182 /* Check PLB validity */ 1183 (env->pb[0] < env->pb[1] && 1184 /* and address in plb area */ 1185 eaddr >= env->pb[0] && eaddr < env->pb[1]) || 1186 (env->pb[2] < env->pb[3] && 1187 eaddr >= env->pb[2] && eaddr < env->pb[3]) ? 1 : 0; 1188 if (in_plb ^ msr_px) { 1189 /* Access in protected area */ 1190 if (access_type == MMU_DATA_STORE) { 1191 /* Access is not allowed */ 1192 ret = -2; 1193 } 1194 } else { 1195 /* Read-write access is allowed */ 1196 ctx->prot |= PAGE_WRITE; 1197 } 1198 } 1199 break; 1200 1201 default: 1202 /* Caller's checks mean we should never get here for other models */ 1203 abort(); 1204 return -1; 1205 } 1206 1207 return ret; 1208 } 1209 1210 int get_physical_address_wtlb(CPUPPCState *env, mmu_ctx_t *ctx, 1211 target_ulong eaddr, 1212 MMUAccessType access_type, int type, 1213 int mmu_idx) 1214 { 1215 int ret = -1; 1216 bool real_mode = (type == ACCESS_CODE && msr_ir == 0) 1217 || (type != ACCESS_CODE && msr_dr == 0); 1218 1219 switch (env->mmu_model) { 1220 case POWERPC_MMU_SOFT_6xx: 1221 case POWERPC_MMU_SOFT_74xx: 1222 if (real_mode) { 1223 ret = check_physical(env, ctx, eaddr, access_type); 1224 } else { 1225 /* Try to find a BAT */ 1226 if (env->nb_BATs != 0) { 1227 ret = get_bat_6xx_tlb(env, ctx, eaddr, access_type); 1228 } 1229 if (ret < 0) { 1230 /* We didn't match any BAT entry or don't have BATs */ 1231 ret = get_segment_6xx_tlb(env, ctx, eaddr, access_type, type); 1232 } 1233 } 1234 break; 1235 1236 case POWERPC_MMU_SOFT_4xx: 1237 case POWERPC_MMU_SOFT_4xx_Z: 1238 if (real_mode) { 1239 ret = check_physical(env, ctx, eaddr, access_type); 1240 } else { 1241 ret = mmu40x_get_physical_address(env, ctx, eaddr, access_type); 1242 } 1243 break; 1244 case POWERPC_MMU_BOOKE: 1245 ret = mmubooke_get_physical_address(env, ctx, eaddr, access_type); 1246 break; 1247 case POWERPC_MMU_BOOKE206: 1248 ret = mmubooke206_get_physical_address(env, ctx, eaddr, access_type, 1249 mmu_idx); 1250 break; 1251 case POWERPC_MMU_MPC8xx: 1252 /* XXX: TODO */ 1253 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n"); 1254 break; 1255 case POWERPC_MMU_REAL: 1256 if (real_mode) { 1257 ret = check_physical(env, ctx, eaddr, access_type); 1258 } else { 1259 cpu_abort(env_cpu(env), 1260 "PowerPC in real mode do not do any translation\n"); 1261 } 1262 return -1; 1263 default: 1264 cpu_abort(env_cpu(env), "Unknown or invalid MMU model\n"); 1265 return -1; 1266 } 1267 1268 return ret; 1269 } 1270 1271 static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address, 1272 MMUAccessType access_type, int mmu_idx) 1273 { 1274 uint32_t epid; 1275 bool as, pr; 1276 uint32_t missed_tid = 0; 1277 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); 1278 1279 if (access_type == MMU_INST_FETCH) { 1280 as = msr_ir; 1281 } 1282 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; 1283 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; 1284 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; 1285 env->spr[SPR_BOOKE_MAS3] = 0; 1286 env->spr[SPR_BOOKE_MAS6] = 0; 1287 env->spr[SPR_BOOKE_MAS7] = 0; 1288 1289 /* AS */ 1290 if (as) { 1291 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; 1292 env->spr[SPR_BOOKE_MAS6] |= MAS6_SAS; 1293 } 1294 1295 env->spr[SPR_BOOKE_MAS1] |= MAS1_VALID; 1296 env->spr[SPR_BOOKE_MAS2] |= address & MAS2_EPN_MASK; 1297 1298 if (!use_epid) { 1299 switch (env->spr[SPR_BOOKE_MAS4] & MAS4_TIDSELD_PIDZ) { 1300 case MAS4_TIDSELD_PID0: 1301 missed_tid = env->spr[SPR_BOOKE_PID]; 1302 break; 1303 case MAS4_TIDSELD_PID1: 1304 missed_tid = env->spr[SPR_BOOKE_PID1]; 1305 break; 1306 case MAS4_TIDSELD_PID2: 1307 missed_tid = env->spr[SPR_BOOKE_PID2]; 1308 break; 1309 } 1310 env->spr[SPR_BOOKE_MAS6] |= env->spr[SPR_BOOKE_PID] << 16; 1311 } else { 1312 missed_tid = epid; 1313 env->spr[SPR_BOOKE_MAS6] |= missed_tid << 16; 1314 } 1315 env->spr[SPR_BOOKE_MAS1] |= (missed_tid << MAS1_TID_SHIFT); 1316 1317 1318 /* next victim logic */ 1319 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; 1320 env->last_way++; 1321 env->last_way &= booke206_tlb_ways(env, 0) - 1; 1322 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; 1323 } 1324 1325 /* Perform address translation */ 1326 /* TODO: Split this by mmu_model. */ 1327 static bool ppc_jumbo_xlate(PowerPCCPU *cpu, vaddr eaddr, 1328 MMUAccessType access_type, 1329 hwaddr *raddrp, int *psizep, int *protp, 1330 int mmu_idx, bool guest_visible) 1331 { 1332 CPUState *cs = CPU(cpu); 1333 CPUPPCState *env = &cpu->env; 1334 mmu_ctx_t ctx; 1335 int type; 1336 int ret; 1337 1338 if (access_type == MMU_INST_FETCH) { 1339 /* code access */ 1340 type = ACCESS_CODE; 1341 } else if (guest_visible) { 1342 /* data access */ 1343 type = env->access_type; 1344 } else { 1345 type = ACCESS_INT; 1346 } 1347 1348 ret = get_physical_address_wtlb(env, &ctx, eaddr, access_type, 1349 type, mmu_idx); 1350 if (ret == 0) { 1351 *raddrp = ctx.raddr; 1352 *protp = ctx.prot; 1353 *psizep = TARGET_PAGE_BITS; 1354 return true; 1355 } 1356 1357 if (guest_visible) { 1358 LOG_MMU_STATE(cs); 1359 if (type == ACCESS_CODE) { 1360 switch (ret) { 1361 case -1: 1362 /* No matches in page tables or TLB */ 1363 switch (env->mmu_model) { 1364 case POWERPC_MMU_SOFT_6xx: 1365 cs->exception_index = POWERPC_EXCP_IFTLB; 1366 env->error_code = 1 << 18; 1367 env->spr[SPR_IMISS] = eaddr; 1368 env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem; 1369 goto tlb_miss; 1370 case POWERPC_MMU_SOFT_74xx: 1371 cs->exception_index = POWERPC_EXCP_IFTLB; 1372 goto tlb_miss_74xx; 1373 case POWERPC_MMU_SOFT_4xx: 1374 case POWERPC_MMU_SOFT_4xx_Z: 1375 cs->exception_index = POWERPC_EXCP_ITLB; 1376 env->error_code = 0; 1377 env->spr[SPR_40x_DEAR] = eaddr; 1378 env->spr[SPR_40x_ESR] = 0x00000000; 1379 break; 1380 case POWERPC_MMU_BOOKE206: 1381 booke206_update_mas_tlb_miss(env, eaddr, 2, mmu_idx); 1382 /* fall through */ 1383 case POWERPC_MMU_BOOKE: 1384 cs->exception_index = POWERPC_EXCP_ITLB; 1385 env->error_code = 0; 1386 env->spr[SPR_BOOKE_DEAR] = eaddr; 1387 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, MMU_DATA_LOAD); 1388 break; 1389 case POWERPC_MMU_MPC8xx: 1390 cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); 1391 case POWERPC_MMU_REAL: 1392 cpu_abort(cs, "PowerPC in real mode should never raise " 1393 "any MMU exceptions\n"); 1394 default: 1395 cpu_abort(cs, "Unknown or invalid MMU model\n"); 1396 } 1397 break; 1398 case -2: 1399 /* Access rights violation */ 1400 cs->exception_index = POWERPC_EXCP_ISI; 1401 env->error_code = 0x08000000; 1402 break; 1403 case -3: 1404 /* No execute protection violation */ 1405 if ((env->mmu_model == POWERPC_MMU_BOOKE) || 1406 (env->mmu_model == POWERPC_MMU_BOOKE206)) { 1407 env->spr[SPR_BOOKE_ESR] = 0x00000000; 1408 } 1409 cs->exception_index = POWERPC_EXCP_ISI; 1410 env->error_code = 0x10000000; 1411 break; 1412 case -4: 1413 /* Direct store exception */ 1414 /* No code fetch is allowed in direct-store areas */ 1415 cs->exception_index = POWERPC_EXCP_ISI; 1416 env->error_code = 0x10000000; 1417 break; 1418 } 1419 } else { 1420 switch (ret) { 1421 case -1: 1422 /* No matches in page tables or TLB */ 1423 switch (env->mmu_model) { 1424 case POWERPC_MMU_SOFT_6xx: 1425 if (access_type == MMU_DATA_STORE) { 1426 cs->exception_index = POWERPC_EXCP_DSTLB; 1427 env->error_code = 1 << 16; 1428 } else { 1429 cs->exception_index = POWERPC_EXCP_DLTLB; 1430 env->error_code = 0; 1431 } 1432 env->spr[SPR_DMISS] = eaddr; 1433 env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem; 1434 tlb_miss: 1435 env->error_code |= ctx.key << 19; 1436 env->spr[SPR_HASH1] = ppc_hash32_hpt_base(cpu) + 1437 get_pteg_offset32(cpu, ctx.hash[0]); 1438 env->spr[SPR_HASH2] = ppc_hash32_hpt_base(cpu) + 1439 get_pteg_offset32(cpu, ctx.hash[1]); 1440 break; 1441 case POWERPC_MMU_SOFT_74xx: 1442 if (access_type == MMU_DATA_STORE) { 1443 cs->exception_index = POWERPC_EXCP_DSTLB; 1444 } else { 1445 cs->exception_index = POWERPC_EXCP_DLTLB; 1446 } 1447 tlb_miss_74xx: 1448 /* Implement LRU algorithm */ 1449 env->error_code = ctx.key << 19; 1450 env->spr[SPR_TLBMISS] = (eaddr & ~((target_ulong)0x3)) | 1451 ((env->last_way + 1) & (env->nb_ways - 1)); 1452 env->spr[SPR_PTEHI] = 0x80000000 | ctx.ptem; 1453 break; 1454 case POWERPC_MMU_SOFT_4xx: 1455 case POWERPC_MMU_SOFT_4xx_Z: 1456 cs->exception_index = POWERPC_EXCP_DTLB; 1457 env->error_code = 0; 1458 env->spr[SPR_40x_DEAR] = eaddr; 1459 if (access_type == MMU_DATA_STORE) { 1460 env->spr[SPR_40x_ESR] = 0x00800000; 1461 } else { 1462 env->spr[SPR_40x_ESR] = 0x00000000; 1463 } 1464 break; 1465 case POWERPC_MMU_MPC8xx: 1466 /* XXX: TODO */ 1467 cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); 1468 case POWERPC_MMU_BOOKE206: 1469 booke206_update_mas_tlb_miss(env, eaddr, access_type, mmu_idx); 1470 /* fall through */ 1471 case POWERPC_MMU_BOOKE: 1472 cs->exception_index = POWERPC_EXCP_DTLB; 1473 env->error_code = 0; 1474 env->spr[SPR_BOOKE_DEAR] = eaddr; 1475 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); 1476 break; 1477 case POWERPC_MMU_REAL: 1478 cpu_abort(cs, "PowerPC in real mode should never raise " 1479 "any MMU exceptions\n"); 1480 default: 1481 cpu_abort(cs, "Unknown or invalid MMU model\n"); 1482 } 1483 break; 1484 case -2: 1485 /* Access rights violation */ 1486 cs->exception_index = POWERPC_EXCP_DSI; 1487 env->error_code = 0; 1488 if (env->mmu_model == POWERPC_MMU_SOFT_4xx 1489 || env->mmu_model == POWERPC_MMU_SOFT_4xx_Z) { 1490 env->spr[SPR_40x_DEAR] = eaddr; 1491 if (access_type == MMU_DATA_STORE) { 1492 env->spr[SPR_40x_ESR] |= 0x00800000; 1493 } 1494 } else if ((env->mmu_model == POWERPC_MMU_BOOKE) || 1495 (env->mmu_model == POWERPC_MMU_BOOKE206)) { 1496 env->spr[SPR_BOOKE_DEAR] = eaddr; 1497 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); 1498 } else { 1499 env->spr[SPR_DAR] = eaddr; 1500 if (access_type == MMU_DATA_STORE) { 1501 env->spr[SPR_DSISR] = 0x0A000000; 1502 } else { 1503 env->spr[SPR_DSISR] = 0x08000000; 1504 } 1505 } 1506 break; 1507 case -4: 1508 /* Direct store exception */ 1509 switch (type) { 1510 case ACCESS_FLOAT: 1511 /* Floating point load/store */ 1512 cs->exception_index = POWERPC_EXCP_ALIGN; 1513 env->error_code = POWERPC_EXCP_ALIGN_FP; 1514 env->spr[SPR_DAR] = eaddr; 1515 break; 1516 case ACCESS_RES: 1517 /* lwarx, ldarx or stwcx. */ 1518 cs->exception_index = POWERPC_EXCP_DSI; 1519 env->error_code = 0; 1520 env->spr[SPR_DAR] = eaddr; 1521 if (access_type == MMU_DATA_STORE) { 1522 env->spr[SPR_DSISR] = 0x06000000; 1523 } else { 1524 env->spr[SPR_DSISR] = 0x04000000; 1525 } 1526 break; 1527 case ACCESS_EXT: 1528 /* eciwx or ecowx */ 1529 cs->exception_index = POWERPC_EXCP_DSI; 1530 env->error_code = 0; 1531 env->spr[SPR_DAR] = eaddr; 1532 if (access_type == MMU_DATA_STORE) { 1533 env->spr[SPR_DSISR] = 0x06100000; 1534 } else { 1535 env->spr[SPR_DSISR] = 0x04100000; 1536 } 1537 break; 1538 default: 1539 printf("DSI: invalid exception (%d)\n", ret); 1540 cs->exception_index = POWERPC_EXCP_PROGRAM; 1541 env->error_code = 1542 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL; 1543 env->spr[SPR_DAR] = eaddr; 1544 break; 1545 } 1546 break; 1547 } 1548 } 1549 } 1550 return false; 1551 } 1552 1553 /*****************************************************************************/ 1554 1555 bool ppc_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, 1556 hwaddr *raddrp, int *psizep, int *protp, 1557 int mmu_idx, bool guest_visible) 1558 { 1559 switch (cpu->env.mmu_model) { 1560 #if defined(TARGET_PPC64) 1561 case POWERPC_MMU_3_00: 1562 if (ppc64_v3_radix(cpu)) { 1563 return ppc_radix64_xlate(cpu, eaddr, access_type, raddrp, 1564 psizep, protp, mmu_idx, guest_visible); 1565 } 1566 /* fall through */ 1567 case POWERPC_MMU_64B: 1568 case POWERPC_MMU_2_03: 1569 case POWERPC_MMU_2_06: 1570 case POWERPC_MMU_2_07: 1571 return ppc_hash64_xlate(cpu, eaddr, access_type, 1572 raddrp, psizep, protp, mmu_idx, guest_visible); 1573 #endif 1574 1575 case POWERPC_MMU_32B: 1576 case POWERPC_MMU_601: 1577 return ppc_hash32_xlate(cpu, eaddr, access_type, raddrp, 1578 psizep, protp, mmu_idx, guest_visible); 1579 1580 default: 1581 return ppc_jumbo_xlate(cpu, eaddr, access_type, raddrp, 1582 psizep, protp, mmu_idx, guest_visible); 1583 } 1584 } 1585 1586 hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 1587 { 1588 PowerPCCPU *cpu = POWERPC_CPU(cs); 1589 hwaddr raddr; 1590 int s, p; 1591 1592 /* 1593 * Some MMUs have separate TLBs for code and data. If we only 1594 * try an MMU_DATA_LOAD, we may not be able to read instructions 1595 * mapped by code TLBs, so we also try a MMU_INST_FETCH. 1596 */ 1597 if (ppc_xlate(cpu, addr, MMU_DATA_LOAD, &raddr, &s, &p, 1598 cpu_mmu_index(&cpu->env, false), false) || 1599 ppc_xlate(cpu, addr, MMU_INST_FETCH, &raddr, &s, &p, 1600 cpu_mmu_index(&cpu->env, true), false)) { 1601 return raddr & TARGET_PAGE_MASK; 1602 } 1603 return -1; 1604 } 1605