1 /* 2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "cpu.h" 23 #include "sysemu/kvm.h" 24 #include "kvm_ppc.h" 25 #include "mmu-hash64.h" 26 #include "mmu-hash32.h" 27 #include "exec/exec-all.h" 28 #include "exec/log.h" 29 #include "helper_regs.h" 30 #include "qemu/error-report.h" 31 #include "qemu/main-loop.h" 32 #include "qemu/qemu-print.h" 33 #include "internal.h" 34 #include "mmu-book3s-v3.h" 35 #include "mmu-radix64.h" 36 37 /* #define DUMP_PAGE_TABLES */ 38 39 void ppc_store_sdr1(CPUPPCState *env, target_ulong value) 40 { 41 PowerPCCPU *cpu = env_archcpu(env); 42 qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value); 43 assert(!cpu->env.has_hv_mode || !cpu->vhyp); 44 #if defined(TARGET_PPC64) 45 if (mmu_is_64bit(env->mmu_model)) { 46 target_ulong sdr_mask = SDR_64_HTABORG | SDR_64_HTABSIZE; 47 target_ulong htabsize = value & SDR_64_HTABSIZE; 48 49 if (value & ~sdr_mask) { 50 qemu_log_mask(LOG_GUEST_ERROR, "Invalid bits 0x"TARGET_FMT_lx 51 " set in SDR1", value & ~sdr_mask); 52 value &= sdr_mask; 53 } 54 if (htabsize > 28) { 55 qemu_log_mask(LOG_GUEST_ERROR, "Invalid HTABSIZE 0x" TARGET_FMT_lx 56 " stored in SDR1", htabsize); 57 return; 58 } 59 } 60 #endif /* defined(TARGET_PPC64) */ 61 /* FIXME: Should check for valid HTABMASK values in 32-bit case */ 62 env->spr[SPR_SDR1] = value; 63 } 64 65 /*****************************************************************************/ 66 /* PowerPC MMU emulation */ 67 68 static int pp_check(int key, int pp, int nx) 69 { 70 int access; 71 72 /* Compute access rights */ 73 access = 0; 74 if (key == 0) { 75 switch (pp) { 76 case 0x0: 77 case 0x1: 78 case 0x2: 79 access |= PAGE_WRITE; 80 /* fall through */ 81 case 0x3: 82 access |= PAGE_READ; 83 break; 84 } 85 } else { 86 switch (pp) { 87 case 0x0: 88 access = 0; 89 break; 90 case 0x1: 91 case 0x3: 92 access = PAGE_READ; 93 break; 94 case 0x2: 95 access = PAGE_READ | PAGE_WRITE; 96 break; 97 } 98 } 99 if (nx == 0) { 100 access |= PAGE_EXEC; 101 } 102 103 return access; 104 } 105 106 static int check_prot(int prot, MMUAccessType access_type) 107 { 108 return prot & prot_for_access_type(access_type) ? 0 : -2; 109 } 110 111 int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr, 112 int way, int is_code) 113 { 114 int nr; 115 116 /* Select TLB num in a way from address */ 117 nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1); 118 /* Select TLB way */ 119 nr += env->tlb_per_way * way; 120 /* 6xx have separate TLBs for instructions and data */ 121 if (is_code && env->id_tlbs == 1) { 122 nr += env->nb_tlb; 123 } 124 125 return nr; 126 } 127 128 static int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0, 129 target_ulong pte1, int h, 130 MMUAccessType access_type) 131 { 132 target_ulong ptem, mmask; 133 int access, ret, pteh, ptev, pp; 134 135 ret = -1; 136 /* Check validity and table match */ 137 ptev = pte_is_valid(pte0); 138 pteh = (pte0 >> 6) & 1; 139 if (ptev && h == pteh) { 140 /* Check vsid & api */ 141 ptem = pte0 & PTE_PTEM_MASK; 142 mmask = PTE_CHECK_MASK; 143 pp = pte1 & 0x00000003; 144 if (ptem == ctx->ptem) { 145 if (ctx->raddr != (hwaddr)-1ULL) { 146 /* all matches should have equal RPN, WIMG & PP */ 147 if ((ctx->raddr & mmask) != (pte1 & mmask)) { 148 qemu_log_mask(CPU_LOG_MMU, "Bad RPN/WIMG/PP\n"); 149 return -3; 150 } 151 } 152 /* Compute access rights */ 153 access = pp_check(ctx->key, pp, ctx->nx); 154 /* Keep the matching PTE information */ 155 ctx->raddr = pte1; 156 ctx->prot = access; 157 ret = check_prot(ctx->prot, access_type); 158 if (ret == 0) { 159 /* Access granted */ 160 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); 161 } else { 162 /* Access right violation */ 163 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); 164 } 165 } 166 } 167 168 return ret; 169 } 170 171 static int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p, 172 int ret, MMUAccessType access_type) 173 { 174 int store = 0; 175 176 /* Update page flags */ 177 if (!(*pte1p & 0x00000100)) { 178 /* Update accessed flag */ 179 *pte1p |= 0x00000100; 180 store = 1; 181 } 182 if (!(*pte1p & 0x00000080)) { 183 if (access_type == MMU_DATA_STORE && ret == 0) { 184 /* Update changed flag */ 185 *pte1p |= 0x00000080; 186 store = 1; 187 } else { 188 /* Force page fault for first write access */ 189 ctx->prot &= ~PAGE_WRITE; 190 } 191 } 192 193 return store; 194 } 195 196 /* Software driven TLB helpers */ 197 198 static int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx, 199 target_ulong eaddr, MMUAccessType access_type) 200 { 201 ppc6xx_tlb_t *tlb; 202 int nr, best, way; 203 int ret; 204 205 best = -1; 206 ret = -1; /* No TLB found */ 207 for (way = 0; way < env->nb_ways; way++) { 208 nr = ppc6xx_tlb_getnum(env, eaddr, way, access_type == MMU_INST_FETCH); 209 tlb = &env->tlb.tlb6[nr]; 210 /* This test "emulates" the PTE index match for hardware TLBs */ 211 if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) { 212 qemu_log_mask(CPU_LOG_MMU, "TLB %d/%d %s [" TARGET_FMT_lx 213 " " TARGET_FMT_lx "] <> " TARGET_FMT_lx "\n", 214 nr, env->nb_tlb, 215 pte_is_valid(tlb->pte0) ? "valid" : "inval", 216 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr); 217 continue; 218 } 219 qemu_log_mask(CPU_LOG_MMU, "TLB %d/%d %s " TARGET_FMT_lx " <> " 220 TARGET_FMT_lx " " TARGET_FMT_lx " %c %c\n", 221 nr, env->nb_tlb, 222 pte_is_valid(tlb->pte0) ? "valid" : "inval", 223 tlb->EPN, eaddr, tlb->pte1, 224 access_type == MMU_DATA_STORE ? 'S' : 'L', 225 access_type == MMU_INST_FETCH ? 'I' : 'D'); 226 switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1, 227 0, access_type)) { 228 case -3: 229 /* TLB inconsistency */ 230 return -1; 231 case -2: 232 /* Access violation */ 233 ret = -2; 234 best = nr; 235 break; 236 case -1: 237 default: 238 /* No match */ 239 break; 240 case 0: 241 /* access granted */ 242 /* 243 * XXX: we should go on looping to check all TLBs 244 * consistency but we can speed-up the whole thing as 245 * the result would be undefined if TLBs are not 246 * consistent. 247 */ 248 ret = 0; 249 best = nr; 250 goto done; 251 } 252 } 253 if (best != -1) { 254 done: 255 qemu_log_mask(CPU_LOG_MMU, "found TLB at addr " HWADDR_FMT_plx 256 " prot=%01x ret=%d\n", 257 ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret); 258 /* Update page flags */ 259 pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, access_type); 260 } 261 262 return ret; 263 } 264 265 /* Perform BAT hit & translation */ 266 static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp, 267 int *validp, int *protp, target_ulong *BATu, 268 target_ulong *BATl) 269 { 270 target_ulong bl; 271 int pp, valid, prot; 272 273 bl = (*BATu & 0x00001FFC) << 15; 274 valid = 0; 275 prot = 0; 276 if ((!FIELD_EX64(env->msr, MSR, PR) && (*BATu & 0x00000002)) || 277 (FIELD_EX64(env->msr, MSR, PR) && (*BATu & 0x00000001))) { 278 valid = 1; 279 pp = *BATl & 0x00000003; 280 if (pp != 0) { 281 prot = PAGE_READ | PAGE_EXEC; 282 if (pp == 0x2) { 283 prot |= PAGE_WRITE; 284 } 285 } 286 } 287 *blp = bl; 288 *validp = valid; 289 *protp = prot; 290 } 291 292 static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, 293 target_ulong virtual, MMUAccessType access_type) 294 { 295 target_ulong *BATlt, *BATut, *BATu, *BATl; 296 target_ulong BEPIl, BEPIu, bl; 297 int i, valid, prot; 298 int ret = -1; 299 bool ifetch = access_type == MMU_INST_FETCH; 300 301 qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT v " TARGET_FMT_lx "\n", __func__, 302 ifetch ? 'I' : 'D', virtual); 303 if (ifetch) { 304 BATlt = env->IBAT[1]; 305 BATut = env->IBAT[0]; 306 } else { 307 BATlt = env->DBAT[1]; 308 BATut = env->DBAT[0]; 309 } 310 for (i = 0; i < env->nb_BATs; i++) { 311 BATu = &BATut[i]; 312 BATl = &BATlt[i]; 313 BEPIu = *BATu & 0xF0000000; 314 BEPIl = *BATu & 0x0FFE0000; 315 bat_size_prot(env, &bl, &valid, &prot, BATu, BATl); 316 qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT%d v " TARGET_FMT_lx " BATu " 317 TARGET_FMT_lx " BATl " TARGET_FMT_lx "\n", __func__, 318 ifetch ? 'I' : 'D', i, virtual, *BATu, *BATl); 319 if ((virtual & 0xF0000000) == BEPIu && 320 ((virtual & 0x0FFE0000) & ~bl) == BEPIl) { 321 /* BAT matches */ 322 if (valid != 0) { 323 /* Get physical address */ 324 ctx->raddr = (*BATl & 0xF0000000) | 325 ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) | 326 (virtual & 0x0001F000); 327 /* Compute access rights */ 328 ctx->prot = prot; 329 ret = check_prot(ctx->prot, access_type); 330 if (ret == 0) { 331 qemu_log_mask(CPU_LOG_MMU, "BAT %d match: r " HWADDR_FMT_plx 332 " prot=%c%c\n", i, ctx->raddr, 333 ctx->prot & PAGE_READ ? 'R' : '-', 334 ctx->prot & PAGE_WRITE ? 'W' : '-'); 335 } 336 break; 337 } 338 } 339 } 340 if (ret < 0) { 341 if (qemu_log_enabled()) { 342 qemu_log_mask(CPU_LOG_MMU, "no BAT match for " 343 TARGET_FMT_lx ":\n", virtual); 344 for (i = 0; i < 4; i++) { 345 BATu = &BATut[i]; 346 BATl = &BATlt[i]; 347 BEPIu = *BATu & 0xF0000000; 348 BEPIl = *BATu & 0x0FFE0000; 349 bl = (*BATu & 0x00001FFC) << 15; 350 qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT%d v " 351 TARGET_FMT_lx " BATu " TARGET_FMT_lx 352 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " 353 TARGET_FMT_lx " " TARGET_FMT_lx "\n", 354 __func__, ifetch ? 'I' : 'D', i, virtual, 355 *BATu, *BATl, BEPIu, BEPIl, bl); 356 } 357 } 358 } 359 /* No hit */ 360 return ret; 361 } 362 363 /* Perform segment based translation */ 364 static int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, 365 target_ulong eaddr, MMUAccessType access_type, 366 int type) 367 { 368 PowerPCCPU *cpu = env_archcpu(env); 369 hwaddr hash; 370 target_ulong vsid; 371 int ds, target_page_bits; 372 bool pr; 373 int ret; 374 target_ulong sr, pgidx; 375 376 pr = FIELD_EX64(env->msr, MSR, PR); 377 ctx->eaddr = eaddr; 378 379 sr = env->sr[eaddr >> 28]; 380 ctx->key = (((sr & 0x20000000) && pr) || 381 ((sr & 0x40000000) && !pr)) ? 1 : 0; 382 ds = sr & 0x80000000 ? 1 : 0; 383 ctx->nx = sr & 0x10000000 ? 1 : 0; 384 vsid = sr & 0x00FFFFFF; 385 target_page_bits = TARGET_PAGE_BITS; 386 qemu_log_mask(CPU_LOG_MMU, 387 "Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx 388 " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx 389 " ir=%d dr=%d pr=%d %d t=%d\n", 390 eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, 391 (int)FIELD_EX64(env->msr, MSR, IR), 392 (int)FIELD_EX64(env->msr, MSR, DR), pr ? 1 : 0, 393 access_type == MMU_DATA_STORE, type); 394 pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits; 395 hash = vsid ^ pgidx; 396 ctx->ptem = (vsid << 7) | (pgidx >> 10); 397 398 qemu_log_mask(CPU_LOG_MMU, 399 "pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n", 400 ctx->key, ds, ctx->nx, vsid); 401 ret = -1; 402 if (!ds) { 403 /* Check if instruction fetch is allowed, if needed */ 404 if (type != ACCESS_CODE || ctx->nx == 0) { 405 /* Page address translation */ 406 qemu_log_mask(CPU_LOG_MMU, "htab_base " HWADDR_FMT_plx 407 " htab_mask " HWADDR_FMT_plx 408 " hash " HWADDR_FMT_plx "\n", 409 ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash); 410 ctx->hash[0] = hash; 411 ctx->hash[1] = ~hash; 412 413 /* Initialize real address with an invalid value */ 414 ctx->raddr = (hwaddr)-1ULL; 415 /* Software TLB search */ 416 ret = ppc6xx_tlb_check(env, ctx, eaddr, access_type); 417 #if defined(DUMP_PAGE_TABLES) 418 if (qemu_loglevel_mask(CPU_LOG_MMU)) { 419 CPUState *cs = env_cpu(env); 420 hwaddr curaddr; 421 uint32_t a0, a1, a2, a3; 422 423 qemu_log("Page table: " HWADDR_FMT_plx " len " HWADDR_FMT_plx 424 "\n", ppc_hash32_hpt_base(cpu), 425 ppc_hash32_hpt_mask(cpu) + 0x80); 426 for (curaddr = ppc_hash32_hpt_base(cpu); 427 curaddr < (ppc_hash32_hpt_base(cpu) 428 + ppc_hash32_hpt_mask(cpu) + 0x80); 429 curaddr += 16) { 430 a0 = ldl_phys(cs->as, curaddr); 431 a1 = ldl_phys(cs->as, curaddr + 4); 432 a2 = ldl_phys(cs->as, curaddr + 8); 433 a3 = ldl_phys(cs->as, curaddr + 12); 434 if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) { 435 qemu_log(HWADDR_FMT_plx ": %08x %08x %08x %08x\n", 436 curaddr, a0, a1, a2, a3); 437 } 438 } 439 } 440 #endif 441 } else { 442 qemu_log_mask(CPU_LOG_MMU, "No access allowed\n"); 443 ret = -3; 444 } 445 } else { 446 qemu_log_mask(CPU_LOG_MMU, "direct store...\n"); 447 /* Direct-store segment : absolutely *BUGGY* for now */ 448 449 switch (type) { 450 case ACCESS_INT: 451 /* Integer load/store : only access allowed */ 452 break; 453 case ACCESS_CODE: 454 /* No code fetch is allowed in direct-store areas */ 455 return -4; 456 case ACCESS_FLOAT: 457 /* Floating point load/store */ 458 return -4; 459 case ACCESS_RES: 460 /* lwarx, ldarx or srwcx. */ 461 return -4; 462 case ACCESS_CACHE: 463 /* 464 * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi 465 * 466 * Should make the instruction do no-op. As it already do 467 * no-op, it's quite easy :-) 468 */ 469 ctx->raddr = eaddr; 470 return 0; 471 case ACCESS_EXT: 472 /* eciwx or ecowx */ 473 return -4; 474 default: 475 qemu_log_mask(CPU_LOG_MMU, "ERROR: instruction should not need " 476 "address translation\n"); 477 return -4; 478 } 479 if ((access_type == MMU_DATA_STORE || ctx->key != 1) && 480 (access_type == MMU_DATA_LOAD || ctx->key != 0)) { 481 ctx->raddr = eaddr; 482 ret = 2; 483 } else { 484 ret = -2; 485 } 486 } 487 488 return ret; 489 } 490 491 /* Generic TLB check function for embedded PowerPC implementations */ 492 static bool ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb, 493 hwaddr *raddrp, 494 target_ulong address, uint32_t pid, int i) 495 { 496 target_ulong mask; 497 498 /* Check valid flag */ 499 if (!(tlb->prot & PAGE_VALID)) { 500 return false; 501 } 502 mask = ~(tlb->size - 1); 503 qemu_log_mask(CPU_LOG_MMU, "%s: TLB %d address " TARGET_FMT_lx 504 " PID %u <=> " TARGET_FMT_lx " " TARGET_FMT_lx " %u %x\n", 505 __func__, i, address, pid, tlb->EPN, 506 mask, (uint32_t)tlb->PID, tlb->prot); 507 /* Check PID */ 508 if (tlb->PID != 0 && tlb->PID != pid) { 509 return false; 510 } 511 /* Check effective address */ 512 if ((address & mask) != tlb->EPN) { 513 return false; 514 } 515 *raddrp = (tlb->RPN & mask) | (address & ~mask); 516 return true; 517 } 518 519 /* Generic TLB search function for PowerPC embedded implementations */ 520 int ppcemb_tlb_search(CPUPPCState *env, target_ulong address, uint32_t pid) 521 { 522 ppcemb_tlb_t *tlb; 523 hwaddr raddr; 524 int i; 525 526 for (i = 0; i < env->nb_tlb; i++) { 527 tlb = &env->tlb.tlbe[i]; 528 if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, i)) { 529 return i; 530 } 531 } 532 return -1; 533 } 534 535 static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 536 target_ulong address, 537 MMUAccessType access_type) 538 { 539 ppcemb_tlb_t *tlb; 540 hwaddr raddr; 541 int i, ret, zsel, zpr, pr; 542 543 ret = -1; 544 raddr = (hwaddr)-1ULL; 545 pr = FIELD_EX64(env->msr, MSR, PR); 546 for (i = 0; i < env->nb_tlb; i++) { 547 tlb = &env->tlb.tlbe[i]; 548 if (!ppcemb_tlb_check(env, tlb, &raddr, address, 549 env->spr[SPR_40x_PID], i)) { 550 continue; 551 } 552 zsel = (tlb->attr >> 4) & 0xF; 553 zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3; 554 qemu_log_mask(CPU_LOG_MMU, 555 "%s: TLB %d zsel %d zpr %d ty %d attr %08x\n", 556 __func__, i, zsel, zpr, access_type, tlb->attr); 557 /* Check execute enable bit */ 558 switch (zpr) { 559 case 0x2: 560 if (pr != 0) { 561 goto check_perms; 562 } 563 /* fall through */ 564 case 0x3: 565 /* All accesses granted */ 566 ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 567 ret = 0; 568 break; 569 case 0x0: 570 if (pr != 0) { 571 /* Raise Zone protection fault. */ 572 env->spr[SPR_40x_ESR] = 1 << 22; 573 ctx->prot = 0; 574 ret = -2; 575 break; 576 } 577 /* fall through */ 578 case 0x1: 579 check_perms: 580 /* Check from TLB entry */ 581 ctx->prot = tlb->prot; 582 ret = check_prot(ctx->prot, access_type); 583 if (ret == -2) { 584 env->spr[SPR_40x_ESR] = 0; 585 } 586 break; 587 } 588 if (ret >= 0) { 589 ctx->raddr = raddr; 590 qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx 591 " => " HWADDR_FMT_plx 592 " %d %d\n", __func__, address, ctx->raddr, ctx->prot, 593 ret); 594 return 0; 595 } 596 } 597 qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx 598 " => " HWADDR_FMT_plx 599 " %d %d\n", __func__, address, raddr, ctx->prot, ret); 600 601 return ret; 602 } 603 604 static bool mmubooke_check_pid(CPUPPCState *env, ppcemb_tlb_t *tlb, 605 hwaddr *raddr, target_ulong addr, int i) 606 { 607 if (ppcemb_tlb_check(env, tlb, raddr, addr, env->spr[SPR_BOOKE_PID], i)) { 608 if (!env->nb_pids) { 609 /* Extend the physical address to 36 bits */ 610 *raddr |= (uint64_t)(tlb->RPN & 0xF) << 32; 611 } 612 return true; 613 } else if (!env->nb_pids) { 614 return false; 615 } 616 if (env->spr[SPR_BOOKE_PID1] && 617 ppcemb_tlb_check(env, tlb, raddr, addr, env->spr[SPR_BOOKE_PID1], i)) { 618 return true; 619 } 620 if (env->spr[SPR_BOOKE_PID2] && 621 ppcemb_tlb_check(env, tlb, raddr, addr, env->spr[SPR_BOOKE_PID2], i)) { 622 return true; 623 } 624 return false; 625 } 626 627 static int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb, 628 hwaddr *raddr, int *prot, target_ulong address, 629 MMUAccessType access_type, int i) 630 { 631 int prot2; 632 633 if (!mmubooke_check_pid(env, tlb, raddr, address, i)) { 634 qemu_log_mask(CPU_LOG_MMU, "%s: TLB entry not found\n", __func__); 635 return -1; 636 } 637 638 if (FIELD_EX64(env->msr, MSR, PR)) { 639 prot2 = tlb->prot & 0xF; 640 } else { 641 prot2 = (tlb->prot >> 4) & 0xF; 642 } 643 644 /* Check the address space */ 645 if ((access_type == MMU_INST_FETCH ? 646 FIELD_EX64(env->msr, MSR, IR) : 647 FIELD_EX64(env->msr, MSR, DR)) != (tlb->attr & 1)) { 648 qemu_log_mask(CPU_LOG_MMU, "%s: AS doesn't match\n", __func__); 649 return -1; 650 } 651 652 *prot = prot2; 653 if (prot2 & prot_for_access_type(access_type)) { 654 qemu_log_mask(CPU_LOG_MMU, "%s: good TLB!\n", __func__); 655 return 0; 656 } 657 658 qemu_log_mask(CPU_LOG_MMU, "%s: no prot match: %x\n", __func__, prot2); 659 return access_type == MMU_INST_FETCH ? -3 : -2; 660 } 661 662 static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 663 target_ulong address, 664 MMUAccessType access_type) 665 { 666 ppcemb_tlb_t *tlb; 667 hwaddr raddr; 668 int i, ret; 669 670 ret = -1; 671 raddr = (hwaddr)-1ULL; 672 for (i = 0; i < env->nb_tlb; i++) { 673 tlb = &env->tlb.tlbe[i]; 674 ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address, 675 access_type, i); 676 if (ret != -1) { 677 break; 678 } 679 } 680 681 if (ret >= 0) { 682 ctx->raddr = raddr; 683 qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx 684 " => " HWADDR_FMT_plx " %d %d\n", __func__, 685 address, ctx->raddr, ctx->prot, ret); 686 } else { 687 qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx 688 " => " HWADDR_FMT_plx " %d %d\n", __func__, 689 address, raddr, ctx->prot, ret); 690 } 691 692 return ret; 693 } 694 695 hwaddr booke206_tlb_to_page_size(CPUPPCState *env, ppcmas_tlb_t *tlb) 696 { 697 int tlbm_size; 698 699 tlbm_size = (tlb->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 700 701 return 1024ULL << tlbm_size; 702 } 703 704 /* TLB check function for MAS based SoftTLBs */ 705 int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb, hwaddr *raddrp, 706 target_ulong address, uint32_t pid) 707 { 708 hwaddr mask; 709 uint32_t tlb_pid; 710 711 if (!FIELD_EX64(env->msr, MSR, CM)) { 712 /* In 32bit mode we can only address 32bit EAs */ 713 address = (uint32_t)address; 714 } 715 716 /* Check valid flag */ 717 if (!(tlb->mas1 & MAS1_VALID)) { 718 return -1; 719 } 720 721 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); 722 qemu_log_mask(CPU_LOG_MMU, "%s: TLB ADDR=0x" TARGET_FMT_lx 723 " PID=0x%x MAS1=0x%x MAS2=0x%" PRIx64 " mask=0x%" 724 HWADDR_PRIx " MAS7_3=0x%" PRIx64 " MAS8=0x%" PRIx32 "\n", 725 __func__, address, pid, tlb->mas1, tlb->mas2, mask, 726 tlb->mas7_3, tlb->mas8); 727 728 /* Check PID */ 729 tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT; 730 if (tlb_pid != 0 && tlb_pid != pid) { 731 return -1; 732 } 733 734 /* Check effective address */ 735 if ((address & mask) != (tlb->mas2 & MAS2_EPN_MASK)) { 736 return -1; 737 } 738 739 if (raddrp) { 740 *raddrp = (tlb->mas7_3 & mask) | (address & ~mask); 741 } 742 743 return 0; 744 } 745 746 static bool is_epid_mmu(int mmu_idx) 747 { 748 return mmu_idx == PPC_TLB_EPID_STORE || mmu_idx == PPC_TLB_EPID_LOAD; 749 } 750 751 static uint32_t mmubooke206_esr(int mmu_idx, MMUAccessType access_type) 752 { 753 uint32_t esr = 0; 754 if (access_type == MMU_DATA_STORE) { 755 esr |= ESR_ST; 756 } 757 if (is_epid_mmu(mmu_idx)) { 758 esr |= ESR_EPID; 759 } 760 return esr; 761 } 762 763 /* 764 * Get EPID register given the mmu_idx. If this is regular load, 765 * construct the EPID access bits from current processor state 766 * 767 * Get the effective AS and PR bits and the PID. The PID is returned 768 * only if EPID load is requested, otherwise the caller must detect 769 * the correct EPID. Return true if valid EPID is returned. 770 */ 771 static bool mmubooke206_get_as(CPUPPCState *env, 772 int mmu_idx, uint32_t *epid_out, 773 bool *as_out, bool *pr_out) 774 { 775 if (is_epid_mmu(mmu_idx)) { 776 uint32_t epidr; 777 if (mmu_idx == PPC_TLB_EPID_STORE) { 778 epidr = env->spr[SPR_BOOKE_EPSC]; 779 } else { 780 epidr = env->spr[SPR_BOOKE_EPLC]; 781 } 782 *epid_out = (epidr & EPID_EPID) >> EPID_EPID_SHIFT; 783 *as_out = !!(epidr & EPID_EAS); 784 *pr_out = !!(epidr & EPID_EPR); 785 return true; 786 } else { 787 *as_out = FIELD_EX64(env->msr, MSR, DS); 788 *pr_out = FIELD_EX64(env->msr, MSR, PR); 789 return false; 790 } 791 } 792 793 /* Check if the tlb found by hashing really matches */ 794 static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb, 795 hwaddr *raddr, int *prot, 796 target_ulong address, 797 MMUAccessType access_type, int mmu_idx) 798 { 799 int prot2 = 0; 800 uint32_t epid; 801 bool as, pr; 802 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); 803 804 if (!use_epid) { 805 if (ppcmas_tlb_check(env, tlb, raddr, address, 806 env->spr[SPR_BOOKE_PID]) >= 0) { 807 goto found_tlb; 808 } 809 810 if (env->spr[SPR_BOOKE_PID1] && 811 ppcmas_tlb_check(env, tlb, raddr, address, 812 env->spr[SPR_BOOKE_PID1]) >= 0) { 813 goto found_tlb; 814 } 815 816 if (env->spr[SPR_BOOKE_PID2] && 817 ppcmas_tlb_check(env, tlb, raddr, address, 818 env->spr[SPR_BOOKE_PID2]) >= 0) { 819 goto found_tlb; 820 } 821 } else { 822 if (ppcmas_tlb_check(env, tlb, raddr, address, epid) >= 0) { 823 goto found_tlb; 824 } 825 } 826 827 qemu_log_mask(CPU_LOG_MMU, "%s: No TLB entry found for effective address " 828 "0x" TARGET_FMT_lx "\n", __func__, address); 829 return -1; 830 831 found_tlb: 832 833 if (pr) { 834 if (tlb->mas7_3 & MAS3_UR) { 835 prot2 |= PAGE_READ; 836 } 837 if (tlb->mas7_3 & MAS3_UW) { 838 prot2 |= PAGE_WRITE; 839 } 840 if (tlb->mas7_3 & MAS3_UX) { 841 prot2 |= PAGE_EXEC; 842 } 843 } else { 844 if (tlb->mas7_3 & MAS3_SR) { 845 prot2 |= PAGE_READ; 846 } 847 if (tlb->mas7_3 & MAS3_SW) { 848 prot2 |= PAGE_WRITE; 849 } 850 if (tlb->mas7_3 & MAS3_SX) { 851 prot2 |= PAGE_EXEC; 852 } 853 } 854 855 /* Check the address space and permissions */ 856 if (access_type == MMU_INST_FETCH) { 857 /* There is no way to fetch code using epid load */ 858 assert(!use_epid); 859 as = FIELD_EX64(env->msr, MSR, IR); 860 } 861 862 if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { 863 qemu_log_mask(CPU_LOG_MMU, "%s: AS doesn't match\n", __func__); 864 return -1; 865 } 866 867 *prot = prot2; 868 if (prot2 & prot_for_access_type(access_type)) { 869 qemu_log_mask(CPU_LOG_MMU, "%s: good TLB!\n", __func__); 870 return 0; 871 } 872 873 qemu_log_mask(CPU_LOG_MMU, "%s: no prot match: %x\n", __func__, prot2); 874 return access_type == MMU_INST_FETCH ? -3 : -2; 875 } 876 877 static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 878 target_ulong address, 879 MMUAccessType access_type, 880 int mmu_idx) 881 { 882 ppcmas_tlb_t *tlb; 883 hwaddr raddr; 884 int i, j, ret; 885 886 ret = -1; 887 raddr = (hwaddr)-1ULL; 888 889 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 890 int ways = booke206_tlb_ways(env, i); 891 892 for (j = 0; j < ways; j++) { 893 tlb = booke206_get_tlbm(env, i, address, j); 894 if (!tlb) { 895 continue; 896 } 897 ret = mmubooke206_check_tlb(env, tlb, &raddr, &ctx->prot, address, 898 access_type, mmu_idx); 899 if (ret != -1) { 900 goto found_tlb; 901 } 902 } 903 } 904 905 found_tlb: 906 907 if (ret >= 0) { 908 ctx->raddr = raddr; 909 qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx 910 " => " HWADDR_FMT_plx " %d %d\n", __func__, address, 911 ctx->raddr, ctx->prot, ret); 912 } else { 913 qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx 914 " => " HWADDR_FMT_plx " %d %d\n", __func__, address, 915 raddr, ctx->prot, ret); 916 } 917 918 return ret; 919 } 920 921 static const char *book3e_tsize_to_str[32] = { 922 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", 923 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", 924 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G", 925 "1T", "2T" 926 }; 927 928 static void mmubooke_dump_mmu(CPUPPCState *env) 929 { 930 ppcemb_tlb_t *entry; 931 int i; 932 933 if (kvm_enabled() && !env->kvm_sw_tlb) { 934 qemu_printf("Cannot access KVM TLB\n"); 935 return; 936 } 937 938 qemu_printf("\nTLB:\n"); 939 qemu_printf("Effective Physical Size PID Prot " 940 "Attr\n"); 941 942 entry = &env->tlb.tlbe[0]; 943 for (i = 0; i < env->nb_tlb; i++, entry++) { 944 hwaddr ea, pa; 945 target_ulong mask; 946 uint64_t size = (uint64_t)entry->size; 947 char size_buf[20]; 948 949 /* Check valid flag */ 950 if (!(entry->prot & PAGE_VALID)) { 951 continue; 952 } 953 954 mask = ~(entry->size - 1); 955 ea = entry->EPN & mask; 956 pa = entry->RPN & mask; 957 /* Extend the physical address to 36 bits */ 958 pa |= (hwaddr)(entry->RPN & 0xF) << 32; 959 if (size >= 1 * MiB) { 960 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "M", size / MiB); 961 } else { 962 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "k", size / KiB); 963 } 964 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n", 965 (uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID, 966 entry->prot, entry->attr); 967 } 968 969 } 970 971 static void mmubooke206_dump_one_tlb(CPUPPCState *env, int tlbn, int offset, 972 int tlbsize) 973 { 974 ppcmas_tlb_t *entry; 975 int i; 976 977 qemu_printf("\nTLB%d:\n", tlbn); 978 qemu_printf("Effective Physical Size TID TS SRWX" 979 " URWX WIMGE U0123\n"); 980 981 entry = &env->tlb.tlbm[offset]; 982 for (i = 0; i < tlbsize; i++, entry++) { 983 hwaddr ea, pa, size; 984 int tsize; 985 986 if (!(entry->mas1 & MAS1_VALID)) { 987 continue; 988 } 989 990 tsize = (entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 991 size = 1024ULL << tsize; 992 ea = entry->mas2 & ~(size - 1); 993 pa = entry->mas7_3 & ~(size - 1); 994 995 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c" 996 " U%c%c%c %c%c%c%c%c U%c%c%c%c\n", 997 (uint64_t)ea, (uint64_t)pa, 998 book3e_tsize_to_str[tsize], 999 (entry->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT, 1000 (entry->mas1 & MAS1_TS) >> MAS1_TS_SHIFT, 1001 entry->mas7_3 & MAS3_SR ? 'R' : '-', 1002 entry->mas7_3 & MAS3_SW ? 'W' : '-', 1003 entry->mas7_3 & MAS3_SX ? 'X' : '-', 1004 entry->mas7_3 & MAS3_UR ? 'R' : '-', 1005 entry->mas7_3 & MAS3_UW ? 'W' : '-', 1006 entry->mas7_3 & MAS3_UX ? 'X' : '-', 1007 entry->mas2 & MAS2_W ? 'W' : '-', 1008 entry->mas2 & MAS2_I ? 'I' : '-', 1009 entry->mas2 & MAS2_M ? 'M' : '-', 1010 entry->mas2 & MAS2_G ? 'G' : '-', 1011 entry->mas2 & MAS2_E ? 'E' : '-', 1012 entry->mas7_3 & MAS3_U0 ? '0' : '-', 1013 entry->mas7_3 & MAS3_U1 ? '1' : '-', 1014 entry->mas7_3 & MAS3_U2 ? '2' : '-', 1015 entry->mas7_3 & MAS3_U3 ? '3' : '-'); 1016 } 1017 } 1018 1019 static void mmubooke206_dump_mmu(CPUPPCState *env) 1020 { 1021 int offset = 0; 1022 int i; 1023 1024 if (kvm_enabled() && !env->kvm_sw_tlb) { 1025 qemu_printf("Cannot access KVM TLB\n"); 1026 return; 1027 } 1028 1029 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 1030 int size = booke206_tlb_size(env, i); 1031 1032 if (size == 0) { 1033 continue; 1034 } 1035 1036 mmubooke206_dump_one_tlb(env, i, offset, size); 1037 offset += size; 1038 } 1039 } 1040 1041 static void mmu6xx_dump_BATs(CPUPPCState *env, int type) 1042 { 1043 target_ulong *BATlt, *BATut, *BATu, *BATl; 1044 target_ulong BEPIl, BEPIu, bl; 1045 int i; 1046 1047 switch (type) { 1048 case ACCESS_CODE: 1049 BATlt = env->IBAT[1]; 1050 BATut = env->IBAT[0]; 1051 break; 1052 default: 1053 BATlt = env->DBAT[1]; 1054 BATut = env->DBAT[0]; 1055 break; 1056 } 1057 1058 for (i = 0; i < env->nb_BATs; i++) { 1059 BATu = &BATut[i]; 1060 BATl = &BATlt[i]; 1061 BEPIu = *BATu & 0xF0000000; 1062 BEPIl = *BATu & 0x0FFE0000; 1063 bl = (*BATu & 0x00001FFC) << 15; 1064 qemu_printf("%s BAT%d BATu " TARGET_FMT_lx 1065 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " 1066 TARGET_FMT_lx " " TARGET_FMT_lx "\n", 1067 type == ACCESS_CODE ? "code" : "data", i, 1068 *BATu, *BATl, BEPIu, BEPIl, bl); 1069 } 1070 } 1071 1072 static void mmu6xx_dump_mmu(CPUPPCState *env) 1073 { 1074 PowerPCCPU *cpu = env_archcpu(env); 1075 ppc6xx_tlb_t *tlb; 1076 target_ulong sr; 1077 int type, way, entry, i; 1078 1079 qemu_printf("HTAB base = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_base(cpu)); 1080 qemu_printf("HTAB mask = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_mask(cpu)); 1081 1082 qemu_printf("\nSegment registers:\n"); 1083 for (i = 0; i < 32; i++) { 1084 sr = env->sr[i]; 1085 if (sr & 0x80000000) { 1086 qemu_printf("%02d T=%d Ks=%d Kp=%d BUID=0x%03x " 1087 "CNTLR_SPEC=0x%05x\n", i, 1088 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, 1089 sr & 0x20000000 ? 1 : 0, (uint32_t)((sr >> 20) & 0x1FF), 1090 (uint32_t)(sr & 0xFFFFF)); 1091 } else { 1092 qemu_printf("%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i, 1093 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, 1094 sr & 0x20000000 ? 1 : 0, sr & 0x10000000 ? 1 : 0, 1095 (uint32_t)(sr & 0x00FFFFFF)); 1096 } 1097 } 1098 1099 qemu_printf("\nBATs:\n"); 1100 mmu6xx_dump_BATs(env, ACCESS_INT); 1101 mmu6xx_dump_BATs(env, ACCESS_CODE); 1102 1103 if (env->id_tlbs != 1) { 1104 qemu_printf("ERROR: 6xx MMU should have separated TLB" 1105 " for code and data\n"); 1106 } 1107 1108 qemu_printf("\nTLBs [EPN EPN + SIZE]\n"); 1109 1110 for (type = 0; type < 2; type++) { 1111 for (way = 0; way < env->nb_ways; way++) { 1112 for (entry = env->nb_tlb * type + env->tlb_per_way * way; 1113 entry < (env->nb_tlb * type + env->tlb_per_way * (way + 1)); 1114 entry++) { 1115 1116 tlb = &env->tlb.tlb6[entry]; 1117 qemu_printf("%s TLB %02d/%02d way:%d %s [" 1118 TARGET_FMT_lx " " TARGET_FMT_lx "]\n", 1119 type ? "code" : "data", entry % env->nb_tlb, 1120 env->nb_tlb, way, 1121 pte_is_valid(tlb->pte0) ? "valid" : "inval", 1122 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE); 1123 } 1124 } 1125 } 1126 } 1127 1128 void dump_mmu(CPUPPCState *env) 1129 { 1130 switch (env->mmu_model) { 1131 case POWERPC_MMU_BOOKE: 1132 mmubooke_dump_mmu(env); 1133 break; 1134 case POWERPC_MMU_BOOKE206: 1135 mmubooke206_dump_mmu(env); 1136 break; 1137 case POWERPC_MMU_SOFT_6xx: 1138 mmu6xx_dump_mmu(env); 1139 break; 1140 #if defined(TARGET_PPC64) 1141 case POWERPC_MMU_64B: 1142 case POWERPC_MMU_2_03: 1143 case POWERPC_MMU_2_06: 1144 case POWERPC_MMU_2_07: 1145 dump_slb(env_archcpu(env)); 1146 break; 1147 case POWERPC_MMU_3_00: 1148 if (ppc64_v3_radix(env_archcpu(env))) { 1149 qemu_log_mask(LOG_UNIMP, "%s: the PPC64 MMU is unsupported\n", 1150 __func__); 1151 } else { 1152 dump_slb(env_archcpu(env)); 1153 } 1154 break; 1155 #endif 1156 default: 1157 qemu_log_mask(LOG_UNIMP, "%s: unimplemented\n", __func__); 1158 } 1159 } 1160 1161 static int check_physical(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr, 1162 MMUAccessType access_type) 1163 { 1164 ctx->raddr = eaddr; 1165 ctx->prot = PAGE_READ | PAGE_EXEC; 1166 1167 switch (env->mmu_model) { 1168 case POWERPC_MMU_SOFT_6xx: 1169 case POWERPC_MMU_SOFT_4xx: 1170 case POWERPC_MMU_REAL: 1171 case POWERPC_MMU_BOOKE: 1172 ctx->prot |= PAGE_WRITE; 1173 break; 1174 1175 default: 1176 /* Caller's checks mean we should never get here for other models */ 1177 g_assert_not_reached(); 1178 } 1179 1180 return 0; 1181 } 1182 1183 int get_physical_address_wtlb(CPUPPCState *env, mmu_ctx_t *ctx, 1184 target_ulong eaddr, 1185 MMUAccessType access_type, int type, 1186 int mmu_idx) 1187 { 1188 int ret = -1; 1189 bool real_mode = (type == ACCESS_CODE && !FIELD_EX64(env->msr, MSR, IR)) || 1190 (type != ACCESS_CODE && !FIELD_EX64(env->msr, MSR, DR)); 1191 1192 switch (env->mmu_model) { 1193 case POWERPC_MMU_SOFT_6xx: 1194 if (real_mode) { 1195 ret = check_physical(env, ctx, eaddr, access_type); 1196 } else { 1197 /* Try to find a BAT */ 1198 if (env->nb_BATs != 0) { 1199 ret = get_bat_6xx_tlb(env, ctx, eaddr, access_type); 1200 } 1201 if (ret < 0) { 1202 /* We didn't match any BAT entry or don't have BATs */ 1203 ret = get_segment_6xx_tlb(env, ctx, eaddr, access_type, type); 1204 } 1205 } 1206 break; 1207 1208 case POWERPC_MMU_SOFT_4xx: 1209 if (real_mode) { 1210 ret = check_physical(env, ctx, eaddr, access_type); 1211 } else { 1212 ret = mmu40x_get_physical_address(env, ctx, eaddr, access_type); 1213 } 1214 break; 1215 case POWERPC_MMU_BOOKE: 1216 ret = mmubooke_get_physical_address(env, ctx, eaddr, access_type); 1217 break; 1218 case POWERPC_MMU_BOOKE206: 1219 ret = mmubooke206_get_physical_address(env, ctx, eaddr, access_type, 1220 mmu_idx); 1221 break; 1222 case POWERPC_MMU_MPC8xx: 1223 /* XXX: TODO */ 1224 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n"); 1225 break; 1226 case POWERPC_MMU_REAL: 1227 if (real_mode) { 1228 ret = check_physical(env, ctx, eaddr, access_type); 1229 } else { 1230 cpu_abort(env_cpu(env), 1231 "PowerPC in real mode do not do any translation\n"); 1232 } 1233 return -1; 1234 default: 1235 cpu_abort(env_cpu(env), "Unknown or invalid MMU model\n"); 1236 return -1; 1237 } 1238 1239 return ret; 1240 } 1241 1242 static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address, 1243 MMUAccessType access_type, int mmu_idx) 1244 { 1245 uint32_t epid; 1246 bool as, pr; 1247 uint32_t missed_tid = 0; 1248 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); 1249 1250 if (access_type == MMU_INST_FETCH) { 1251 as = FIELD_EX64(env->msr, MSR, IR); 1252 } 1253 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; 1254 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; 1255 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; 1256 env->spr[SPR_BOOKE_MAS3] = 0; 1257 env->spr[SPR_BOOKE_MAS6] = 0; 1258 env->spr[SPR_BOOKE_MAS7] = 0; 1259 1260 /* AS */ 1261 if (as) { 1262 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; 1263 env->spr[SPR_BOOKE_MAS6] |= MAS6_SAS; 1264 } 1265 1266 env->spr[SPR_BOOKE_MAS1] |= MAS1_VALID; 1267 env->spr[SPR_BOOKE_MAS2] |= address & MAS2_EPN_MASK; 1268 1269 if (!use_epid) { 1270 switch (env->spr[SPR_BOOKE_MAS4] & MAS4_TIDSELD_PIDZ) { 1271 case MAS4_TIDSELD_PID0: 1272 missed_tid = env->spr[SPR_BOOKE_PID]; 1273 break; 1274 case MAS4_TIDSELD_PID1: 1275 missed_tid = env->spr[SPR_BOOKE_PID1]; 1276 break; 1277 case MAS4_TIDSELD_PID2: 1278 missed_tid = env->spr[SPR_BOOKE_PID2]; 1279 break; 1280 } 1281 env->spr[SPR_BOOKE_MAS6] |= env->spr[SPR_BOOKE_PID] << 16; 1282 } else { 1283 missed_tid = epid; 1284 env->spr[SPR_BOOKE_MAS6] |= missed_tid << 16; 1285 } 1286 env->spr[SPR_BOOKE_MAS1] |= (missed_tid << MAS1_TID_SHIFT); 1287 1288 1289 /* next victim logic */ 1290 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; 1291 env->last_way++; 1292 env->last_way &= booke206_tlb_ways(env, 0) - 1; 1293 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; 1294 } 1295 1296 /* Perform address translation */ 1297 /* TODO: Split this by mmu_model. */ 1298 static bool ppc_jumbo_xlate(PowerPCCPU *cpu, vaddr eaddr, 1299 MMUAccessType access_type, 1300 hwaddr *raddrp, int *psizep, int *protp, 1301 int mmu_idx, bool guest_visible) 1302 { 1303 CPUState *cs = CPU(cpu); 1304 CPUPPCState *env = &cpu->env; 1305 mmu_ctx_t ctx; 1306 int type; 1307 int ret; 1308 1309 if (access_type == MMU_INST_FETCH) { 1310 /* code access */ 1311 type = ACCESS_CODE; 1312 } else if (guest_visible) { 1313 /* data access */ 1314 type = env->access_type; 1315 } else { 1316 type = ACCESS_INT; 1317 } 1318 1319 ret = get_physical_address_wtlb(env, &ctx, eaddr, access_type, 1320 type, mmu_idx); 1321 if (ret == 0) { 1322 *raddrp = ctx.raddr; 1323 *protp = ctx.prot; 1324 *psizep = TARGET_PAGE_BITS; 1325 return true; 1326 } 1327 1328 if (guest_visible) { 1329 log_cpu_state_mask(CPU_LOG_MMU, cs, 0); 1330 if (type == ACCESS_CODE) { 1331 switch (ret) { 1332 case -1: 1333 /* No matches in page tables or TLB */ 1334 switch (env->mmu_model) { 1335 case POWERPC_MMU_SOFT_6xx: 1336 cs->exception_index = POWERPC_EXCP_IFTLB; 1337 env->error_code = 1 << 18; 1338 env->spr[SPR_IMISS] = eaddr; 1339 env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem; 1340 goto tlb_miss; 1341 case POWERPC_MMU_SOFT_4xx: 1342 cs->exception_index = POWERPC_EXCP_ITLB; 1343 env->error_code = 0; 1344 env->spr[SPR_40x_DEAR] = eaddr; 1345 env->spr[SPR_40x_ESR] = 0x00000000; 1346 break; 1347 case POWERPC_MMU_BOOKE206: 1348 booke206_update_mas_tlb_miss(env, eaddr, 2, mmu_idx); 1349 /* fall through */ 1350 case POWERPC_MMU_BOOKE: 1351 cs->exception_index = POWERPC_EXCP_ITLB; 1352 env->error_code = 0; 1353 env->spr[SPR_BOOKE_DEAR] = eaddr; 1354 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, MMU_DATA_LOAD); 1355 break; 1356 case POWERPC_MMU_MPC8xx: 1357 cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); 1358 case POWERPC_MMU_REAL: 1359 cpu_abort(cs, "PowerPC in real mode should never raise " 1360 "any MMU exceptions\n"); 1361 default: 1362 cpu_abort(cs, "Unknown or invalid MMU model\n"); 1363 } 1364 break; 1365 case -2: 1366 /* Access rights violation */ 1367 cs->exception_index = POWERPC_EXCP_ISI; 1368 if ((env->mmu_model == POWERPC_MMU_BOOKE) || 1369 (env->mmu_model == POWERPC_MMU_BOOKE206)) { 1370 env->error_code = 0; 1371 } else { 1372 env->error_code = 0x08000000; 1373 } 1374 break; 1375 case -3: 1376 /* No execute protection violation */ 1377 if ((env->mmu_model == POWERPC_MMU_BOOKE) || 1378 (env->mmu_model == POWERPC_MMU_BOOKE206)) { 1379 env->spr[SPR_BOOKE_ESR] = 0x00000000; 1380 env->error_code = 0; 1381 } else { 1382 env->error_code = 0x10000000; 1383 } 1384 cs->exception_index = POWERPC_EXCP_ISI; 1385 break; 1386 case -4: 1387 /* Direct store exception */ 1388 /* No code fetch is allowed in direct-store areas */ 1389 cs->exception_index = POWERPC_EXCP_ISI; 1390 if ((env->mmu_model == POWERPC_MMU_BOOKE) || 1391 (env->mmu_model == POWERPC_MMU_BOOKE206)) { 1392 env->error_code = 0; 1393 } else { 1394 env->error_code = 0x10000000; 1395 } 1396 break; 1397 } 1398 } else { 1399 switch (ret) { 1400 case -1: 1401 /* No matches in page tables or TLB */ 1402 switch (env->mmu_model) { 1403 case POWERPC_MMU_SOFT_6xx: 1404 if (access_type == MMU_DATA_STORE) { 1405 cs->exception_index = POWERPC_EXCP_DSTLB; 1406 env->error_code = 1 << 16; 1407 } else { 1408 cs->exception_index = POWERPC_EXCP_DLTLB; 1409 env->error_code = 0; 1410 } 1411 env->spr[SPR_DMISS] = eaddr; 1412 env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem; 1413 tlb_miss: 1414 env->error_code |= ctx.key << 19; 1415 env->spr[SPR_HASH1] = ppc_hash32_hpt_base(cpu) + 1416 get_pteg_offset32(cpu, ctx.hash[0]); 1417 env->spr[SPR_HASH2] = ppc_hash32_hpt_base(cpu) + 1418 get_pteg_offset32(cpu, ctx.hash[1]); 1419 break; 1420 case POWERPC_MMU_SOFT_4xx: 1421 cs->exception_index = POWERPC_EXCP_DTLB; 1422 env->error_code = 0; 1423 env->spr[SPR_40x_DEAR] = eaddr; 1424 if (access_type == MMU_DATA_STORE) { 1425 env->spr[SPR_40x_ESR] = 0x00800000; 1426 } else { 1427 env->spr[SPR_40x_ESR] = 0x00000000; 1428 } 1429 break; 1430 case POWERPC_MMU_MPC8xx: 1431 /* XXX: TODO */ 1432 cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); 1433 case POWERPC_MMU_BOOKE206: 1434 booke206_update_mas_tlb_miss(env, eaddr, access_type, mmu_idx); 1435 /* fall through */ 1436 case POWERPC_MMU_BOOKE: 1437 cs->exception_index = POWERPC_EXCP_DTLB; 1438 env->error_code = 0; 1439 env->spr[SPR_BOOKE_DEAR] = eaddr; 1440 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); 1441 break; 1442 case POWERPC_MMU_REAL: 1443 cpu_abort(cs, "PowerPC in real mode should never raise " 1444 "any MMU exceptions\n"); 1445 default: 1446 cpu_abort(cs, "Unknown or invalid MMU model\n"); 1447 } 1448 break; 1449 case -2: 1450 /* Access rights violation */ 1451 cs->exception_index = POWERPC_EXCP_DSI; 1452 env->error_code = 0; 1453 if (env->mmu_model == POWERPC_MMU_SOFT_4xx) { 1454 env->spr[SPR_40x_DEAR] = eaddr; 1455 if (access_type == MMU_DATA_STORE) { 1456 env->spr[SPR_40x_ESR] |= 0x00800000; 1457 } 1458 } else if ((env->mmu_model == POWERPC_MMU_BOOKE) || 1459 (env->mmu_model == POWERPC_MMU_BOOKE206)) { 1460 env->spr[SPR_BOOKE_DEAR] = eaddr; 1461 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); 1462 } else { 1463 env->spr[SPR_DAR] = eaddr; 1464 if (access_type == MMU_DATA_STORE) { 1465 env->spr[SPR_DSISR] = 0x0A000000; 1466 } else { 1467 env->spr[SPR_DSISR] = 0x08000000; 1468 } 1469 } 1470 break; 1471 case -4: 1472 /* Direct store exception */ 1473 switch (type) { 1474 case ACCESS_FLOAT: 1475 /* Floating point load/store */ 1476 cs->exception_index = POWERPC_EXCP_ALIGN; 1477 env->error_code = POWERPC_EXCP_ALIGN_FP; 1478 env->spr[SPR_DAR] = eaddr; 1479 break; 1480 case ACCESS_RES: 1481 /* lwarx, ldarx or stwcx. */ 1482 cs->exception_index = POWERPC_EXCP_DSI; 1483 env->error_code = 0; 1484 env->spr[SPR_DAR] = eaddr; 1485 if (access_type == MMU_DATA_STORE) { 1486 env->spr[SPR_DSISR] = 0x06000000; 1487 } else { 1488 env->spr[SPR_DSISR] = 0x04000000; 1489 } 1490 break; 1491 case ACCESS_EXT: 1492 /* eciwx or ecowx */ 1493 cs->exception_index = POWERPC_EXCP_DSI; 1494 env->error_code = 0; 1495 env->spr[SPR_DAR] = eaddr; 1496 if (access_type == MMU_DATA_STORE) { 1497 env->spr[SPR_DSISR] = 0x06100000; 1498 } else { 1499 env->spr[SPR_DSISR] = 0x04100000; 1500 } 1501 break; 1502 default: 1503 printf("DSI: invalid exception (%d)\n", ret); 1504 cs->exception_index = POWERPC_EXCP_PROGRAM; 1505 env->error_code = 1506 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL; 1507 env->spr[SPR_DAR] = eaddr; 1508 break; 1509 } 1510 break; 1511 } 1512 } 1513 } 1514 return false; 1515 } 1516 1517 /*****************************************************************************/ 1518 1519 bool ppc_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, 1520 hwaddr *raddrp, int *psizep, int *protp, 1521 int mmu_idx, bool guest_visible) 1522 { 1523 switch (cpu->env.mmu_model) { 1524 #if defined(TARGET_PPC64) 1525 case POWERPC_MMU_3_00: 1526 if (ppc64_v3_radix(cpu)) { 1527 return ppc_radix64_xlate(cpu, eaddr, access_type, raddrp, 1528 psizep, protp, mmu_idx, guest_visible); 1529 } 1530 /* fall through */ 1531 case POWERPC_MMU_64B: 1532 case POWERPC_MMU_2_03: 1533 case POWERPC_MMU_2_06: 1534 case POWERPC_MMU_2_07: 1535 return ppc_hash64_xlate(cpu, eaddr, access_type, 1536 raddrp, psizep, protp, mmu_idx, guest_visible); 1537 #endif 1538 1539 case POWERPC_MMU_32B: 1540 return ppc_hash32_xlate(cpu, eaddr, access_type, raddrp, 1541 psizep, protp, mmu_idx, guest_visible); 1542 1543 default: 1544 return ppc_jumbo_xlate(cpu, eaddr, access_type, raddrp, 1545 psizep, protp, mmu_idx, guest_visible); 1546 } 1547 } 1548 1549 hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 1550 { 1551 PowerPCCPU *cpu = POWERPC_CPU(cs); 1552 hwaddr raddr; 1553 int s, p; 1554 1555 /* 1556 * Some MMUs have separate TLBs for code and data. If we only 1557 * try an MMU_DATA_LOAD, we may not be able to read instructions 1558 * mapped by code TLBs, so we also try a MMU_INST_FETCH. 1559 */ 1560 if (ppc_xlate(cpu, addr, MMU_DATA_LOAD, &raddr, &s, &p, 1561 cpu_mmu_index(&cpu->env, false), false) || 1562 ppc_xlate(cpu, addr, MMU_INST_FETCH, &raddr, &s, &p, 1563 cpu_mmu_index(&cpu->env, true), false)) { 1564 return raddr & TARGET_PAGE_MASK; 1565 } 1566 return -1; 1567 } 1568