1 /* 2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "cpu.h" 23 #include "sysemu/kvm.h" 24 #include "kvm_ppc.h" 25 #include "mmu-hash64.h" 26 #include "mmu-hash32.h" 27 #include "exec/exec-all.h" 28 #include "exec/log.h" 29 #include "helper_regs.h" 30 #include "qemu/error-report.h" 31 #include "qemu/main-loop.h" 32 #include "qemu/qemu-print.h" 33 #include "internal.h" 34 #include "mmu-book3s-v3.h" 35 #include "mmu-radix64.h" 36 37 /* #define DUMP_PAGE_TABLES */ 38 39 void ppc_store_sdr1(CPUPPCState *env, target_ulong value) 40 { 41 PowerPCCPU *cpu = env_archcpu(env); 42 qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value); 43 assert(!cpu->env.has_hv_mode || !cpu->vhyp); 44 #if defined(TARGET_PPC64) 45 if (mmu_is_64bit(env->mmu_model)) { 46 target_ulong sdr_mask = SDR_64_HTABORG | SDR_64_HTABSIZE; 47 target_ulong htabsize = value & SDR_64_HTABSIZE; 48 49 if (value & ~sdr_mask) { 50 qemu_log_mask(LOG_GUEST_ERROR, "Invalid bits 0x"TARGET_FMT_lx 51 " set in SDR1", value & ~sdr_mask); 52 value &= sdr_mask; 53 } 54 if (htabsize > 28) { 55 qemu_log_mask(LOG_GUEST_ERROR, "Invalid HTABSIZE 0x" TARGET_FMT_lx 56 " stored in SDR1", htabsize); 57 return; 58 } 59 } 60 #endif /* defined(TARGET_PPC64) */ 61 /* FIXME: Should check for valid HTABMASK values in 32-bit case */ 62 env->spr[SPR_SDR1] = value; 63 } 64 65 /*****************************************************************************/ 66 /* PowerPC MMU emulation */ 67 68 static int pp_check(int key, int pp, int nx) 69 { 70 int access; 71 72 /* Compute access rights */ 73 access = 0; 74 if (key == 0) { 75 switch (pp) { 76 case 0x0: 77 case 0x1: 78 case 0x2: 79 access |= PAGE_WRITE; 80 /* fall through */ 81 case 0x3: 82 access |= PAGE_READ; 83 break; 84 } 85 } else { 86 switch (pp) { 87 case 0x0: 88 access = 0; 89 break; 90 case 0x1: 91 case 0x3: 92 access = PAGE_READ; 93 break; 94 case 0x2: 95 access = PAGE_READ | PAGE_WRITE; 96 break; 97 } 98 } 99 if (nx == 0) { 100 access |= PAGE_EXEC; 101 } 102 103 return access; 104 } 105 106 static int check_prot(int prot, MMUAccessType access_type) 107 { 108 return prot & prot_for_access_type(access_type) ? 0 : -2; 109 } 110 111 int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr, 112 int way, int is_code) 113 { 114 int nr; 115 116 /* Select TLB num in a way from address */ 117 nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1); 118 /* Select TLB way */ 119 nr += env->tlb_per_way * way; 120 /* 6xx have separate TLBs for instructions and data */ 121 if (is_code && env->id_tlbs == 1) { 122 nr += env->nb_tlb; 123 } 124 125 return nr; 126 } 127 128 static int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0, 129 target_ulong pte1, int h, 130 MMUAccessType access_type) 131 { 132 target_ulong ptem, mmask; 133 int access, ret, pteh, ptev, pp; 134 135 ret = -1; 136 /* Check validity and table match */ 137 ptev = pte_is_valid(pte0); 138 pteh = (pte0 >> 6) & 1; 139 if (ptev && h == pteh) { 140 /* Check vsid & api */ 141 ptem = pte0 & PTE_PTEM_MASK; 142 mmask = PTE_CHECK_MASK; 143 pp = pte1 & 0x00000003; 144 if (ptem == ctx->ptem) { 145 if (ctx->raddr != (hwaddr)-1ULL) { 146 /* all matches should have equal RPN, WIMG & PP */ 147 if ((ctx->raddr & mmask) != (pte1 & mmask)) { 148 qemu_log_mask(CPU_LOG_MMU, "Bad RPN/WIMG/PP\n"); 149 return -3; 150 } 151 } 152 /* Compute access rights */ 153 access = pp_check(ctx->key, pp, ctx->nx); 154 /* Keep the matching PTE information */ 155 ctx->raddr = pte1; 156 ctx->prot = access; 157 ret = check_prot(ctx->prot, access_type); 158 if (ret == 0) { 159 /* Access granted */ 160 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); 161 } else { 162 /* Access right violation */ 163 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); 164 } 165 } 166 } 167 168 return ret; 169 } 170 171 static int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p, 172 int ret, MMUAccessType access_type) 173 { 174 int store = 0; 175 176 /* Update page flags */ 177 if (!(*pte1p & 0x00000100)) { 178 /* Update accessed flag */ 179 *pte1p |= 0x00000100; 180 store = 1; 181 } 182 if (!(*pte1p & 0x00000080)) { 183 if (access_type == MMU_DATA_STORE && ret == 0) { 184 /* Update changed flag */ 185 *pte1p |= 0x00000080; 186 store = 1; 187 } else { 188 /* Force page fault for first write access */ 189 ctx->prot &= ~PAGE_WRITE; 190 } 191 } 192 193 return store; 194 } 195 196 /* Software driven TLB helpers */ 197 198 static int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx, 199 target_ulong eaddr, MMUAccessType access_type) 200 { 201 ppc6xx_tlb_t *tlb; 202 int nr, best, way; 203 int ret; 204 205 best = -1; 206 ret = -1; /* No TLB found */ 207 for (way = 0; way < env->nb_ways; way++) { 208 nr = ppc6xx_tlb_getnum(env, eaddr, way, access_type == MMU_INST_FETCH); 209 tlb = &env->tlb.tlb6[nr]; 210 /* This test "emulates" the PTE index match for hardware TLBs */ 211 if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) { 212 qemu_log_mask(CPU_LOG_MMU, "TLB %d/%d %s [" TARGET_FMT_lx 213 " " TARGET_FMT_lx "] <> " TARGET_FMT_lx "\n", 214 nr, env->nb_tlb, 215 pte_is_valid(tlb->pte0) ? "valid" : "inval", 216 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr); 217 continue; 218 } 219 qemu_log_mask(CPU_LOG_MMU, "TLB %d/%d %s " TARGET_FMT_lx " <> " 220 TARGET_FMT_lx " " TARGET_FMT_lx " %c %c\n", 221 nr, env->nb_tlb, 222 pte_is_valid(tlb->pte0) ? "valid" : "inval", 223 tlb->EPN, eaddr, tlb->pte1, 224 access_type == MMU_DATA_STORE ? 'S' : 'L', 225 access_type == MMU_INST_FETCH ? 'I' : 'D'); 226 switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1, 227 0, access_type)) { 228 case -3: 229 /* TLB inconsistency */ 230 return -1; 231 case -2: 232 /* Access violation */ 233 ret = -2; 234 best = nr; 235 break; 236 case -1: 237 default: 238 /* No match */ 239 break; 240 case 0: 241 /* access granted */ 242 /* 243 * XXX: we should go on looping to check all TLBs 244 * consistency but we can speed-up the whole thing as 245 * the result would be undefined if TLBs are not 246 * consistent. 247 */ 248 ret = 0; 249 best = nr; 250 goto done; 251 } 252 } 253 if (best != -1) { 254 done: 255 qemu_log_mask(CPU_LOG_MMU, "found TLB at addr " HWADDR_FMT_plx 256 " prot=%01x ret=%d\n", 257 ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret); 258 /* Update page flags */ 259 pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, access_type); 260 } 261 262 return ret; 263 } 264 265 /* Perform BAT hit & translation */ 266 static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp, 267 int *validp, int *protp, target_ulong *BATu, 268 target_ulong *BATl) 269 { 270 target_ulong bl; 271 int pp, valid, prot; 272 273 bl = (*BATu & 0x00001FFC) << 15; 274 valid = 0; 275 prot = 0; 276 if ((!FIELD_EX64(env->msr, MSR, PR) && (*BATu & 0x00000002)) || 277 (FIELD_EX64(env->msr, MSR, PR) && (*BATu & 0x00000001))) { 278 valid = 1; 279 pp = *BATl & 0x00000003; 280 if (pp != 0) { 281 prot = PAGE_READ | PAGE_EXEC; 282 if (pp == 0x2) { 283 prot |= PAGE_WRITE; 284 } 285 } 286 } 287 *blp = bl; 288 *validp = valid; 289 *protp = prot; 290 } 291 292 static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, 293 target_ulong virtual, MMUAccessType access_type) 294 { 295 target_ulong *BATlt, *BATut, *BATu, *BATl; 296 target_ulong BEPIl, BEPIu, bl; 297 int i, valid, prot; 298 int ret = -1; 299 bool ifetch = access_type == MMU_INST_FETCH; 300 301 qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT v " TARGET_FMT_lx "\n", __func__, 302 ifetch ? 'I' : 'D', virtual); 303 if (ifetch) { 304 BATlt = env->IBAT[1]; 305 BATut = env->IBAT[0]; 306 } else { 307 BATlt = env->DBAT[1]; 308 BATut = env->DBAT[0]; 309 } 310 for (i = 0; i < env->nb_BATs; i++) { 311 BATu = &BATut[i]; 312 BATl = &BATlt[i]; 313 BEPIu = *BATu & 0xF0000000; 314 BEPIl = *BATu & 0x0FFE0000; 315 bat_size_prot(env, &bl, &valid, &prot, BATu, BATl); 316 qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT%d v " TARGET_FMT_lx " BATu " 317 TARGET_FMT_lx " BATl " TARGET_FMT_lx "\n", __func__, 318 ifetch ? 'I' : 'D', i, virtual, *BATu, *BATl); 319 if ((virtual & 0xF0000000) == BEPIu && 320 ((virtual & 0x0FFE0000) & ~bl) == BEPIl) { 321 /* BAT matches */ 322 if (valid != 0) { 323 /* Get physical address */ 324 ctx->raddr = (*BATl & 0xF0000000) | 325 ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) | 326 (virtual & 0x0001F000); 327 /* Compute access rights */ 328 ctx->prot = prot; 329 ret = check_prot(ctx->prot, access_type); 330 if (ret == 0) { 331 qemu_log_mask(CPU_LOG_MMU, "BAT %d match: r " HWADDR_FMT_plx 332 " prot=%c%c\n", i, ctx->raddr, 333 ctx->prot & PAGE_READ ? 'R' : '-', 334 ctx->prot & PAGE_WRITE ? 'W' : '-'); 335 } 336 break; 337 } 338 } 339 } 340 if (ret < 0) { 341 if (qemu_log_enabled()) { 342 qemu_log_mask(CPU_LOG_MMU, "no BAT match for " 343 TARGET_FMT_lx ":\n", virtual); 344 for (i = 0; i < 4; i++) { 345 BATu = &BATut[i]; 346 BATl = &BATlt[i]; 347 BEPIu = *BATu & 0xF0000000; 348 BEPIl = *BATu & 0x0FFE0000; 349 bl = (*BATu & 0x00001FFC) << 15; 350 qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT%d v " 351 TARGET_FMT_lx " BATu " TARGET_FMT_lx 352 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " 353 TARGET_FMT_lx " " TARGET_FMT_lx "\n", 354 __func__, ifetch ? 'I' : 'D', i, virtual, 355 *BATu, *BATl, BEPIu, BEPIl, bl); 356 } 357 } 358 } 359 /* No hit */ 360 return ret; 361 } 362 363 /* Perform segment based translation */ 364 static int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, 365 target_ulong eaddr, MMUAccessType access_type, 366 int type) 367 { 368 PowerPCCPU *cpu = env_archcpu(env); 369 hwaddr hash; 370 target_ulong vsid; 371 int ds, target_page_bits; 372 bool pr; 373 int ret; 374 target_ulong sr, pgidx; 375 376 pr = FIELD_EX64(env->msr, MSR, PR); 377 ctx->eaddr = eaddr; 378 379 sr = env->sr[eaddr >> 28]; 380 ctx->key = (((sr & 0x20000000) && pr) || 381 ((sr & 0x40000000) && !pr)) ? 1 : 0; 382 ds = sr & 0x80000000 ? 1 : 0; 383 ctx->nx = sr & 0x10000000 ? 1 : 0; 384 vsid = sr & 0x00FFFFFF; 385 target_page_bits = TARGET_PAGE_BITS; 386 qemu_log_mask(CPU_LOG_MMU, 387 "Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx 388 " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx 389 " ir=%d dr=%d pr=%d %d t=%d\n", 390 eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, 391 (int)FIELD_EX64(env->msr, MSR, IR), 392 (int)FIELD_EX64(env->msr, MSR, DR), pr ? 1 : 0, 393 access_type == MMU_DATA_STORE, type); 394 pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits; 395 hash = vsid ^ pgidx; 396 ctx->ptem = (vsid << 7) | (pgidx >> 10); 397 398 qemu_log_mask(CPU_LOG_MMU, 399 "pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n", 400 ctx->key, ds, ctx->nx, vsid); 401 ret = -1; 402 if (!ds) { 403 /* Check if instruction fetch is allowed, if needed */ 404 if (type != ACCESS_CODE || ctx->nx == 0) { 405 /* Page address translation */ 406 qemu_log_mask(CPU_LOG_MMU, "htab_base " HWADDR_FMT_plx 407 " htab_mask " HWADDR_FMT_plx 408 " hash " HWADDR_FMT_plx "\n", 409 ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash); 410 ctx->hash[0] = hash; 411 ctx->hash[1] = ~hash; 412 413 /* Initialize real address with an invalid value */ 414 ctx->raddr = (hwaddr)-1ULL; 415 /* Software TLB search */ 416 ret = ppc6xx_tlb_check(env, ctx, eaddr, access_type); 417 #if defined(DUMP_PAGE_TABLES) 418 if (qemu_loglevel_mask(CPU_LOG_MMU)) { 419 CPUState *cs = env_cpu(env); 420 hwaddr curaddr; 421 uint32_t a0, a1, a2, a3; 422 423 qemu_log("Page table: " HWADDR_FMT_plx " len " HWADDR_FMT_plx 424 "\n", ppc_hash32_hpt_base(cpu), 425 ppc_hash32_hpt_mask(cpu) + 0x80); 426 for (curaddr = ppc_hash32_hpt_base(cpu); 427 curaddr < (ppc_hash32_hpt_base(cpu) 428 + ppc_hash32_hpt_mask(cpu) + 0x80); 429 curaddr += 16) { 430 a0 = ldl_phys(cs->as, curaddr); 431 a1 = ldl_phys(cs->as, curaddr + 4); 432 a2 = ldl_phys(cs->as, curaddr + 8); 433 a3 = ldl_phys(cs->as, curaddr + 12); 434 if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) { 435 qemu_log(HWADDR_FMT_plx ": %08x %08x %08x %08x\n", 436 curaddr, a0, a1, a2, a3); 437 } 438 } 439 } 440 #endif 441 } else { 442 qemu_log_mask(CPU_LOG_MMU, "No access allowed\n"); 443 ret = -3; 444 } 445 } else { 446 qemu_log_mask(CPU_LOG_MMU, "direct store...\n"); 447 /* Direct-store segment : absolutely *BUGGY* for now */ 448 449 switch (type) { 450 case ACCESS_INT: 451 /* Integer load/store : only access allowed */ 452 break; 453 case ACCESS_CODE: 454 /* No code fetch is allowed in direct-store areas */ 455 return -4; 456 case ACCESS_FLOAT: 457 /* Floating point load/store */ 458 return -4; 459 case ACCESS_RES: 460 /* lwarx, ldarx or srwcx. */ 461 return -4; 462 case ACCESS_CACHE: 463 /* 464 * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi 465 * 466 * Should make the instruction do no-op. As it already do 467 * no-op, it's quite easy :-) 468 */ 469 ctx->raddr = eaddr; 470 return 0; 471 case ACCESS_EXT: 472 /* eciwx or ecowx */ 473 return -4; 474 default: 475 qemu_log_mask(CPU_LOG_MMU, "ERROR: instruction should not need " 476 "address translation\n"); 477 return -4; 478 } 479 if ((access_type == MMU_DATA_STORE || ctx->key != 1) && 480 (access_type == MMU_DATA_LOAD || ctx->key != 0)) { 481 ctx->raddr = eaddr; 482 ret = 2; 483 } else { 484 ret = -2; 485 } 486 } 487 488 return ret; 489 } 490 491 /* Generic TLB check function for embedded PowerPC implementations */ 492 static bool ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb, 493 hwaddr *raddrp, 494 target_ulong address, uint32_t pid, int i) 495 { 496 target_ulong mask; 497 498 /* Check valid flag */ 499 if (!(tlb->prot & PAGE_VALID)) { 500 return false; 501 } 502 mask = ~(tlb->size - 1); 503 qemu_log_mask(CPU_LOG_MMU, "%s: TLB %d address " TARGET_FMT_lx 504 " PID %u <=> " TARGET_FMT_lx " " TARGET_FMT_lx " %u %x\n", 505 __func__, i, address, pid, tlb->EPN, 506 mask, (uint32_t)tlb->PID, tlb->prot); 507 /* Check PID */ 508 if (tlb->PID != 0 && tlb->PID != pid) { 509 return false; 510 } 511 /* Check effective address */ 512 if ((address & mask) != tlb->EPN) { 513 return false; 514 } 515 *raddrp = (tlb->RPN & mask) | (address & ~mask); 516 return true; 517 } 518 519 /* Generic TLB search function for PowerPC embedded implementations */ 520 int ppcemb_tlb_search(CPUPPCState *env, target_ulong address, uint32_t pid) 521 { 522 ppcemb_tlb_t *tlb; 523 hwaddr raddr; 524 int i; 525 526 for (i = 0; i < env->nb_tlb; i++) { 527 tlb = &env->tlb.tlbe[i]; 528 if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, i)) { 529 return i; 530 } 531 } 532 return -1; 533 } 534 535 static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 536 target_ulong address, 537 MMUAccessType access_type) 538 { 539 ppcemb_tlb_t *tlb; 540 hwaddr raddr; 541 int i, ret, zsel, zpr, pr; 542 543 ret = -1; 544 raddr = (hwaddr)-1ULL; 545 pr = FIELD_EX64(env->msr, MSR, PR); 546 for (i = 0; i < env->nb_tlb; i++) { 547 tlb = &env->tlb.tlbe[i]; 548 if (!ppcemb_tlb_check(env, tlb, &raddr, address, 549 env->spr[SPR_40x_PID], i)) { 550 continue; 551 } 552 zsel = (tlb->attr >> 4) & 0xF; 553 zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3; 554 qemu_log_mask(CPU_LOG_MMU, 555 "%s: TLB %d zsel %d zpr %d ty %d attr %08x\n", 556 __func__, i, zsel, zpr, access_type, tlb->attr); 557 /* Check execute enable bit */ 558 switch (zpr) { 559 case 0x2: 560 if (pr != 0) { 561 goto check_perms; 562 } 563 /* fall through */ 564 case 0x3: 565 /* All accesses granted */ 566 ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 567 ret = 0; 568 break; 569 case 0x0: 570 if (pr != 0) { 571 /* Raise Zone protection fault. */ 572 env->spr[SPR_40x_ESR] = 1 << 22; 573 ctx->prot = 0; 574 ret = -2; 575 break; 576 } 577 /* fall through */ 578 case 0x1: 579 check_perms: 580 /* Check from TLB entry */ 581 ctx->prot = tlb->prot; 582 ret = check_prot(ctx->prot, access_type); 583 if (ret == -2) { 584 env->spr[SPR_40x_ESR] = 0; 585 } 586 break; 587 } 588 if (ret >= 0) { 589 ctx->raddr = raddr; 590 qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx 591 " => " HWADDR_FMT_plx 592 " %d %d\n", __func__, address, ctx->raddr, ctx->prot, 593 ret); 594 return 0; 595 } 596 } 597 qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx 598 " => " HWADDR_FMT_plx 599 " %d %d\n", __func__, address, raddr, ctx->prot, ret); 600 601 return ret; 602 } 603 604 static bool mmubooke_check_pid(CPUPPCState *env, ppcemb_tlb_t *tlb, 605 hwaddr *raddr, target_ulong addr, int i) 606 { 607 if (ppcemb_tlb_check(env, tlb, raddr, addr, env->spr[SPR_BOOKE_PID], i)) { 608 if (!env->nb_pids) { 609 /* Extend the physical address to 36 bits */ 610 *raddr |= (uint64_t)(tlb->RPN & 0xF) << 32; 611 } 612 return true; 613 } else if (!env->nb_pids) { 614 return false; 615 } 616 if (env->spr[SPR_BOOKE_PID1] && 617 ppcemb_tlb_check(env, tlb, raddr, addr, env->spr[SPR_BOOKE_PID1], i)) { 618 return true; 619 } 620 if (env->spr[SPR_BOOKE_PID2] && 621 ppcemb_tlb_check(env, tlb, raddr, addr, env->spr[SPR_BOOKE_PID2], i)) { 622 return true; 623 } 624 return false; 625 } 626 627 static int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb, 628 hwaddr *raddr, int *prot, target_ulong address, 629 MMUAccessType access_type, int i) 630 { 631 int prot2; 632 633 if (!mmubooke_check_pid(env, tlb, raddr, address, i)) { 634 qemu_log_mask(CPU_LOG_MMU, "%s: TLB entry not found\n", __func__); 635 return -1; 636 } 637 638 if (FIELD_EX64(env->msr, MSR, PR)) { 639 prot2 = tlb->prot & 0xF; 640 } else { 641 prot2 = (tlb->prot >> 4) & 0xF; 642 } 643 644 /* Check the address space */ 645 if ((access_type == MMU_INST_FETCH ? 646 FIELD_EX64(env->msr, MSR, IR) : 647 FIELD_EX64(env->msr, MSR, DR)) != (tlb->attr & 1)) { 648 qemu_log_mask(CPU_LOG_MMU, "%s: AS doesn't match\n", __func__); 649 return -1; 650 } 651 652 *prot = prot2; 653 if (prot2 & prot_for_access_type(access_type)) { 654 qemu_log_mask(CPU_LOG_MMU, "%s: good TLB!\n", __func__); 655 return 0; 656 } 657 658 qemu_log_mask(CPU_LOG_MMU, "%s: no prot match: %x\n", __func__, prot2); 659 return access_type == MMU_INST_FETCH ? -3 : -2; 660 } 661 662 static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 663 target_ulong address, 664 MMUAccessType access_type) 665 { 666 ppcemb_tlb_t *tlb; 667 hwaddr raddr; 668 int i, ret; 669 670 ret = -1; 671 raddr = (hwaddr)-1ULL; 672 for (i = 0; i < env->nb_tlb; i++) { 673 tlb = &env->tlb.tlbe[i]; 674 ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address, 675 access_type, i); 676 if (ret != -1) { 677 break; 678 } 679 } 680 681 if (ret >= 0) { 682 ctx->raddr = raddr; 683 qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx 684 " => " HWADDR_FMT_plx " %d %d\n", __func__, 685 address, ctx->raddr, ctx->prot, ret); 686 } else { 687 qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx 688 " => " HWADDR_FMT_plx " %d %d\n", __func__, 689 address, raddr, ctx->prot, ret); 690 } 691 692 return ret; 693 } 694 695 hwaddr booke206_tlb_to_page_size(CPUPPCState *env, ppcmas_tlb_t *tlb) 696 { 697 int tlbm_size; 698 699 tlbm_size = (tlb->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 700 701 return 1024ULL << tlbm_size; 702 } 703 704 /* TLB check function for MAS based SoftTLBs */ 705 int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb, hwaddr *raddrp, 706 target_ulong address, uint32_t pid) 707 { 708 hwaddr mask; 709 uint32_t tlb_pid; 710 711 if (!FIELD_EX64(env->msr, MSR, CM)) { 712 /* In 32bit mode we can only address 32bit EAs */ 713 address = (uint32_t)address; 714 } 715 716 /* Check valid flag */ 717 if (!(tlb->mas1 & MAS1_VALID)) { 718 return -1; 719 } 720 721 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); 722 qemu_log_mask(CPU_LOG_MMU, "%s: TLB ADDR=0x" TARGET_FMT_lx 723 " PID=0x%x MAS1=0x%x MAS2=0x%" PRIx64 " mask=0x%" 724 HWADDR_PRIx " MAS7_3=0x%" PRIx64 " MAS8=0x%" PRIx32 "\n", 725 __func__, address, pid, tlb->mas1, tlb->mas2, mask, 726 tlb->mas7_3, tlb->mas8); 727 728 /* Check PID */ 729 tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT; 730 if (tlb_pid != 0 && tlb_pid != pid) { 731 return -1; 732 } 733 734 /* Check effective address */ 735 if ((address & mask) != (tlb->mas2 & MAS2_EPN_MASK)) { 736 return -1; 737 } 738 739 if (raddrp) { 740 *raddrp = (tlb->mas7_3 & mask) | (address & ~mask); 741 } 742 743 return 0; 744 } 745 746 static bool is_epid_mmu(int mmu_idx) 747 { 748 return mmu_idx == PPC_TLB_EPID_STORE || mmu_idx == PPC_TLB_EPID_LOAD; 749 } 750 751 static uint32_t mmubooke206_esr(int mmu_idx, MMUAccessType access_type) 752 { 753 uint32_t esr = 0; 754 if (access_type == MMU_DATA_STORE) { 755 esr |= ESR_ST; 756 } 757 if (is_epid_mmu(mmu_idx)) { 758 esr |= ESR_EPID; 759 } 760 return esr; 761 } 762 763 /* 764 * Get EPID register given the mmu_idx. If this is regular load, 765 * construct the EPID access bits from current processor state 766 * 767 * Get the effective AS and PR bits and the PID. The PID is returned 768 * only if EPID load is requested, otherwise the caller must detect 769 * the correct EPID. Return true if valid EPID is returned. 770 */ 771 static bool mmubooke206_get_as(CPUPPCState *env, 772 int mmu_idx, uint32_t *epid_out, 773 bool *as_out, bool *pr_out) 774 { 775 if (is_epid_mmu(mmu_idx)) { 776 uint32_t epidr; 777 if (mmu_idx == PPC_TLB_EPID_STORE) { 778 epidr = env->spr[SPR_BOOKE_EPSC]; 779 } else { 780 epidr = env->spr[SPR_BOOKE_EPLC]; 781 } 782 *epid_out = (epidr & EPID_EPID) >> EPID_EPID_SHIFT; 783 *as_out = !!(epidr & EPID_EAS); 784 *pr_out = !!(epidr & EPID_EPR); 785 return true; 786 } else { 787 *as_out = FIELD_EX64(env->msr, MSR, DS); 788 *pr_out = FIELD_EX64(env->msr, MSR, PR); 789 return false; 790 } 791 } 792 793 /* Check if the tlb found by hashing really matches */ 794 static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb, 795 hwaddr *raddr, int *prot, 796 target_ulong address, 797 MMUAccessType access_type, int mmu_idx) 798 { 799 int prot2 = 0; 800 uint32_t epid; 801 bool as, pr; 802 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); 803 804 if (!use_epid) { 805 if (ppcmas_tlb_check(env, tlb, raddr, address, 806 env->spr[SPR_BOOKE_PID]) >= 0) { 807 goto found_tlb; 808 } 809 810 if (env->spr[SPR_BOOKE_PID1] && 811 ppcmas_tlb_check(env, tlb, raddr, address, 812 env->spr[SPR_BOOKE_PID1]) >= 0) { 813 goto found_tlb; 814 } 815 816 if (env->spr[SPR_BOOKE_PID2] && 817 ppcmas_tlb_check(env, tlb, raddr, address, 818 env->spr[SPR_BOOKE_PID2]) >= 0) { 819 goto found_tlb; 820 } 821 } else { 822 if (ppcmas_tlb_check(env, tlb, raddr, address, epid) >= 0) { 823 goto found_tlb; 824 } 825 } 826 827 qemu_log_mask(CPU_LOG_MMU, "%s: No TLB entry found for effective address " 828 "0x" TARGET_FMT_lx "\n", __func__, address); 829 return -1; 830 831 found_tlb: 832 833 if (pr) { 834 if (tlb->mas7_3 & MAS3_UR) { 835 prot2 |= PAGE_READ; 836 } 837 if (tlb->mas7_3 & MAS3_UW) { 838 prot2 |= PAGE_WRITE; 839 } 840 if (tlb->mas7_3 & MAS3_UX) { 841 prot2 |= PAGE_EXEC; 842 } 843 } else { 844 if (tlb->mas7_3 & MAS3_SR) { 845 prot2 |= PAGE_READ; 846 } 847 if (tlb->mas7_3 & MAS3_SW) { 848 prot2 |= PAGE_WRITE; 849 } 850 if (tlb->mas7_3 & MAS3_SX) { 851 prot2 |= PAGE_EXEC; 852 } 853 } 854 855 /* Check the address space and permissions */ 856 if (access_type == MMU_INST_FETCH) { 857 /* There is no way to fetch code using epid load */ 858 assert(!use_epid); 859 as = FIELD_EX64(env->msr, MSR, IR); 860 } 861 862 if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { 863 qemu_log_mask(CPU_LOG_MMU, "%s: AS doesn't match\n", __func__); 864 return -1; 865 } 866 867 *prot = prot2; 868 if (prot2 & prot_for_access_type(access_type)) { 869 qemu_log_mask(CPU_LOG_MMU, "%s: good TLB!\n", __func__); 870 return 0; 871 } 872 873 qemu_log_mask(CPU_LOG_MMU, "%s: no prot match: %x\n", __func__, prot2); 874 return access_type == MMU_INST_FETCH ? -3 : -2; 875 } 876 877 static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 878 target_ulong address, 879 MMUAccessType access_type, 880 int mmu_idx) 881 { 882 ppcmas_tlb_t *tlb; 883 hwaddr raddr; 884 int i, j, ret; 885 886 ret = -1; 887 raddr = (hwaddr)-1ULL; 888 889 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 890 int ways = booke206_tlb_ways(env, i); 891 892 for (j = 0; j < ways; j++) { 893 tlb = booke206_get_tlbm(env, i, address, j); 894 if (!tlb) { 895 continue; 896 } 897 ret = mmubooke206_check_tlb(env, tlb, &raddr, &ctx->prot, address, 898 access_type, mmu_idx); 899 if (ret != -1) { 900 goto found_tlb; 901 } 902 } 903 } 904 905 found_tlb: 906 907 if (ret >= 0) { 908 ctx->raddr = raddr; 909 qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx 910 " => " HWADDR_FMT_plx " %d %d\n", __func__, address, 911 ctx->raddr, ctx->prot, ret); 912 } else { 913 qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx 914 " => " HWADDR_FMT_plx " %d %d\n", __func__, address, 915 raddr, ctx->prot, ret); 916 } 917 918 return ret; 919 } 920 921 static const char *book3e_tsize_to_str[32] = { 922 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", 923 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", 924 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G", 925 "1T", "2T" 926 }; 927 928 static void mmubooke_dump_mmu(CPUPPCState *env) 929 { 930 ppcemb_tlb_t *entry; 931 int i; 932 933 #ifdef CONFIG_KVM 934 if (kvm_enabled() && !env->kvm_sw_tlb) { 935 qemu_printf("Cannot access KVM TLB\n"); 936 return; 937 } 938 #endif 939 940 qemu_printf("\nTLB:\n"); 941 qemu_printf("Effective Physical Size PID Prot " 942 "Attr\n"); 943 944 entry = &env->tlb.tlbe[0]; 945 for (i = 0; i < env->nb_tlb; i++, entry++) { 946 hwaddr ea, pa; 947 target_ulong mask; 948 uint64_t size = (uint64_t)entry->size; 949 char size_buf[20]; 950 951 /* Check valid flag */ 952 if (!(entry->prot & PAGE_VALID)) { 953 continue; 954 } 955 956 mask = ~(entry->size - 1); 957 ea = entry->EPN & mask; 958 pa = entry->RPN & mask; 959 /* Extend the physical address to 36 bits */ 960 pa |= (hwaddr)(entry->RPN & 0xF) << 32; 961 if (size >= 1 * MiB) { 962 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "M", size / MiB); 963 } else { 964 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "k", size / KiB); 965 } 966 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n", 967 (uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID, 968 entry->prot, entry->attr); 969 } 970 971 } 972 973 static void mmubooke206_dump_one_tlb(CPUPPCState *env, int tlbn, int offset, 974 int tlbsize) 975 { 976 ppcmas_tlb_t *entry; 977 int i; 978 979 qemu_printf("\nTLB%d:\n", tlbn); 980 qemu_printf("Effective Physical Size TID TS SRWX" 981 " URWX WIMGE U0123\n"); 982 983 entry = &env->tlb.tlbm[offset]; 984 for (i = 0; i < tlbsize; i++, entry++) { 985 hwaddr ea, pa, size; 986 int tsize; 987 988 if (!(entry->mas1 & MAS1_VALID)) { 989 continue; 990 } 991 992 tsize = (entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 993 size = 1024ULL << tsize; 994 ea = entry->mas2 & ~(size - 1); 995 pa = entry->mas7_3 & ~(size - 1); 996 997 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c" 998 " U%c%c%c %c%c%c%c%c U%c%c%c%c\n", 999 (uint64_t)ea, (uint64_t)pa, 1000 book3e_tsize_to_str[tsize], 1001 (entry->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT, 1002 (entry->mas1 & MAS1_TS) >> MAS1_TS_SHIFT, 1003 entry->mas7_3 & MAS3_SR ? 'R' : '-', 1004 entry->mas7_3 & MAS3_SW ? 'W' : '-', 1005 entry->mas7_3 & MAS3_SX ? 'X' : '-', 1006 entry->mas7_3 & MAS3_UR ? 'R' : '-', 1007 entry->mas7_3 & MAS3_UW ? 'W' : '-', 1008 entry->mas7_3 & MAS3_UX ? 'X' : '-', 1009 entry->mas2 & MAS2_W ? 'W' : '-', 1010 entry->mas2 & MAS2_I ? 'I' : '-', 1011 entry->mas2 & MAS2_M ? 'M' : '-', 1012 entry->mas2 & MAS2_G ? 'G' : '-', 1013 entry->mas2 & MAS2_E ? 'E' : '-', 1014 entry->mas7_3 & MAS3_U0 ? '0' : '-', 1015 entry->mas7_3 & MAS3_U1 ? '1' : '-', 1016 entry->mas7_3 & MAS3_U2 ? '2' : '-', 1017 entry->mas7_3 & MAS3_U3 ? '3' : '-'); 1018 } 1019 } 1020 1021 static void mmubooke206_dump_mmu(CPUPPCState *env) 1022 { 1023 int offset = 0; 1024 int i; 1025 1026 #ifdef CONFIG_KVM 1027 if (kvm_enabled() && !env->kvm_sw_tlb) { 1028 qemu_printf("Cannot access KVM TLB\n"); 1029 return; 1030 } 1031 #endif 1032 1033 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 1034 int size = booke206_tlb_size(env, i); 1035 1036 if (size == 0) { 1037 continue; 1038 } 1039 1040 mmubooke206_dump_one_tlb(env, i, offset, size); 1041 offset += size; 1042 } 1043 } 1044 1045 static void mmu6xx_dump_BATs(CPUPPCState *env, int type) 1046 { 1047 target_ulong *BATlt, *BATut, *BATu, *BATl; 1048 target_ulong BEPIl, BEPIu, bl; 1049 int i; 1050 1051 switch (type) { 1052 case ACCESS_CODE: 1053 BATlt = env->IBAT[1]; 1054 BATut = env->IBAT[0]; 1055 break; 1056 default: 1057 BATlt = env->DBAT[1]; 1058 BATut = env->DBAT[0]; 1059 break; 1060 } 1061 1062 for (i = 0; i < env->nb_BATs; i++) { 1063 BATu = &BATut[i]; 1064 BATl = &BATlt[i]; 1065 BEPIu = *BATu & 0xF0000000; 1066 BEPIl = *BATu & 0x0FFE0000; 1067 bl = (*BATu & 0x00001FFC) << 15; 1068 qemu_printf("%s BAT%d BATu " TARGET_FMT_lx 1069 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " 1070 TARGET_FMT_lx " " TARGET_FMT_lx "\n", 1071 type == ACCESS_CODE ? "code" : "data", i, 1072 *BATu, *BATl, BEPIu, BEPIl, bl); 1073 } 1074 } 1075 1076 static void mmu6xx_dump_mmu(CPUPPCState *env) 1077 { 1078 PowerPCCPU *cpu = env_archcpu(env); 1079 ppc6xx_tlb_t *tlb; 1080 target_ulong sr; 1081 int type, way, entry, i; 1082 1083 qemu_printf("HTAB base = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_base(cpu)); 1084 qemu_printf("HTAB mask = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_mask(cpu)); 1085 1086 qemu_printf("\nSegment registers:\n"); 1087 for (i = 0; i < 32; i++) { 1088 sr = env->sr[i]; 1089 if (sr & 0x80000000) { 1090 qemu_printf("%02d T=%d Ks=%d Kp=%d BUID=0x%03x " 1091 "CNTLR_SPEC=0x%05x\n", i, 1092 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, 1093 sr & 0x20000000 ? 1 : 0, (uint32_t)((sr >> 20) & 0x1FF), 1094 (uint32_t)(sr & 0xFFFFF)); 1095 } else { 1096 qemu_printf("%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i, 1097 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, 1098 sr & 0x20000000 ? 1 : 0, sr & 0x10000000 ? 1 : 0, 1099 (uint32_t)(sr & 0x00FFFFFF)); 1100 } 1101 } 1102 1103 qemu_printf("\nBATs:\n"); 1104 mmu6xx_dump_BATs(env, ACCESS_INT); 1105 mmu6xx_dump_BATs(env, ACCESS_CODE); 1106 1107 if (env->id_tlbs != 1) { 1108 qemu_printf("ERROR: 6xx MMU should have separated TLB" 1109 " for code and data\n"); 1110 } 1111 1112 qemu_printf("\nTLBs [EPN EPN + SIZE]\n"); 1113 1114 for (type = 0; type < 2; type++) { 1115 for (way = 0; way < env->nb_ways; way++) { 1116 for (entry = env->nb_tlb * type + env->tlb_per_way * way; 1117 entry < (env->nb_tlb * type + env->tlb_per_way * (way + 1)); 1118 entry++) { 1119 1120 tlb = &env->tlb.tlb6[entry]; 1121 qemu_printf("%s TLB %02d/%02d way:%d %s [" 1122 TARGET_FMT_lx " " TARGET_FMT_lx "]\n", 1123 type ? "code" : "data", entry % env->nb_tlb, 1124 env->nb_tlb, way, 1125 pte_is_valid(tlb->pte0) ? "valid" : "inval", 1126 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE); 1127 } 1128 } 1129 } 1130 } 1131 1132 void dump_mmu(CPUPPCState *env) 1133 { 1134 switch (env->mmu_model) { 1135 case POWERPC_MMU_BOOKE: 1136 mmubooke_dump_mmu(env); 1137 break; 1138 case POWERPC_MMU_BOOKE206: 1139 mmubooke206_dump_mmu(env); 1140 break; 1141 case POWERPC_MMU_SOFT_6xx: 1142 mmu6xx_dump_mmu(env); 1143 break; 1144 #if defined(TARGET_PPC64) 1145 case POWERPC_MMU_64B: 1146 case POWERPC_MMU_2_03: 1147 case POWERPC_MMU_2_06: 1148 case POWERPC_MMU_2_07: 1149 dump_slb(env_archcpu(env)); 1150 break; 1151 case POWERPC_MMU_3_00: 1152 if (ppc64_v3_radix(env_archcpu(env))) { 1153 qemu_log_mask(LOG_UNIMP, "%s: the PPC64 MMU is unsupported\n", 1154 __func__); 1155 } else { 1156 dump_slb(env_archcpu(env)); 1157 } 1158 break; 1159 #endif 1160 default: 1161 qemu_log_mask(LOG_UNIMP, "%s: unimplemented\n", __func__); 1162 } 1163 } 1164 1165 static int check_physical(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr, 1166 MMUAccessType access_type) 1167 { 1168 ctx->raddr = eaddr; 1169 ctx->prot = PAGE_READ | PAGE_EXEC; 1170 1171 switch (env->mmu_model) { 1172 case POWERPC_MMU_SOFT_6xx: 1173 case POWERPC_MMU_SOFT_4xx: 1174 case POWERPC_MMU_REAL: 1175 case POWERPC_MMU_BOOKE: 1176 ctx->prot |= PAGE_WRITE; 1177 break; 1178 1179 default: 1180 /* Caller's checks mean we should never get here for other models */ 1181 g_assert_not_reached(); 1182 } 1183 1184 return 0; 1185 } 1186 1187 int get_physical_address_wtlb(CPUPPCState *env, mmu_ctx_t *ctx, 1188 target_ulong eaddr, 1189 MMUAccessType access_type, int type, 1190 int mmu_idx) 1191 { 1192 int ret = -1; 1193 bool real_mode = (type == ACCESS_CODE && !FIELD_EX64(env->msr, MSR, IR)) || 1194 (type != ACCESS_CODE && !FIELD_EX64(env->msr, MSR, DR)); 1195 1196 switch (env->mmu_model) { 1197 case POWERPC_MMU_SOFT_6xx: 1198 if (real_mode) { 1199 ret = check_physical(env, ctx, eaddr, access_type); 1200 } else { 1201 /* Try to find a BAT */ 1202 if (env->nb_BATs != 0) { 1203 ret = get_bat_6xx_tlb(env, ctx, eaddr, access_type); 1204 } 1205 if (ret < 0) { 1206 /* We didn't match any BAT entry or don't have BATs */ 1207 ret = get_segment_6xx_tlb(env, ctx, eaddr, access_type, type); 1208 } 1209 } 1210 break; 1211 1212 case POWERPC_MMU_SOFT_4xx: 1213 if (real_mode) { 1214 ret = check_physical(env, ctx, eaddr, access_type); 1215 } else { 1216 ret = mmu40x_get_physical_address(env, ctx, eaddr, access_type); 1217 } 1218 break; 1219 case POWERPC_MMU_BOOKE: 1220 ret = mmubooke_get_physical_address(env, ctx, eaddr, access_type); 1221 break; 1222 case POWERPC_MMU_BOOKE206: 1223 ret = mmubooke206_get_physical_address(env, ctx, eaddr, access_type, 1224 mmu_idx); 1225 break; 1226 case POWERPC_MMU_MPC8xx: 1227 /* XXX: TODO */ 1228 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n"); 1229 break; 1230 case POWERPC_MMU_REAL: 1231 if (real_mode) { 1232 ret = check_physical(env, ctx, eaddr, access_type); 1233 } else { 1234 cpu_abort(env_cpu(env), 1235 "PowerPC in real mode do not do any translation\n"); 1236 } 1237 return -1; 1238 default: 1239 cpu_abort(env_cpu(env), "Unknown or invalid MMU model\n"); 1240 return -1; 1241 } 1242 1243 return ret; 1244 } 1245 1246 static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address, 1247 MMUAccessType access_type, int mmu_idx) 1248 { 1249 uint32_t epid; 1250 bool as, pr; 1251 uint32_t missed_tid = 0; 1252 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); 1253 1254 if (access_type == MMU_INST_FETCH) { 1255 as = FIELD_EX64(env->msr, MSR, IR); 1256 } 1257 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; 1258 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; 1259 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; 1260 env->spr[SPR_BOOKE_MAS3] = 0; 1261 env->spr[SPR_BOOKE_MAS6] = 0; 1262 env->spr[SPR_BOOKE_MAS7] = 0; 1263 1264 /* AS */ 1265 if (as) { 1266 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; 1267 env->spr[SPR_BOOKE_MAS6] |= MAS6_SAS; 1268 } 1269 1270 env->spr[SPR_BOOKE_MAS1] |= MAS1_VALID; 1271 env->spr[SPR_BOOKE_MAS2] |= address & MAS2_EPN_MASK; 1272 1273 if (!use_epid) { 1274 switch (env->spr[SPR_BOOKE_MAS4] & MAS4_TIDSELD_PIDZ) { 1275 case MAS4_TIDSELD_PID0: 1276 missed_tid = env->spr[SPR_BOOKE_PID]; 1277 break; 1278 case MAS4_TIDSELD_PID1: 1279 missed_tid = env->spr[SPR_BOOKE_PID1]; 1280 break; 1281 case MAS4_TIDSELD_PID2: 1282 missed_tid = env->spr[SPR_BOOKE_PID2]; 1283 break; 1284 } 1285 env->spr[SPR_BOOKE_MAS6] |= env->spr[SPR_BOOKE_PID] << 16; 1286 } else { 1287 missed_tid = epid; 1288 env->spr[SPR_BOOKE_MAS6] |= missed_tid << 16; 1289 } 1290 env->spr[SPR_BOOKE_MAS1] |= (missed_tid << MAS1_TID_SHIFT); 1291 1292 1293 /* next victim logic */ 1294 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; 1295 env->last_way++; 1296 env->last_way &= booke206_tlb_ways(env, 0) - 1; 1297 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; 1298 } 1299 1300 /* Perform address translation */ 1301 /* TODO: Split this by mmu_model. */ 1302 static bool ppc_jumbo_xlate(PowerPCCPU *cpu, vaddr eaddr, 1303 MMUAccessType access_type, 1304 hwaddr *raddrp, int *psizep, int *protp, 1305 int mmu_idx, bool guest_visible) 1306 { 1307 CPUState *cs = CPU(cpu); 1308 CPUPPCState *env = &cpu->env; 1309 mmu_ctx_t ctx; 1310 int type; 1311 int ret; 1312 1313 if (access_type == MMU_INST_FETCH) { 1314 /* code access */ 1315 type = ACCESS_CODE; 1316 } else if (guest_visible) { 1317 /* data access */ 1318 type = env->access_type; 1319 } else { 1320 type = ACCESS_INT; 1321 } 1322 1323 ret = get_physical_address_wtlb(env, &ctx, eaddr, access_type, 1324 type, mmu_idx); 1325 if (ret == 0) { 1326 *raddrp = ctx.raddr; 1327 *protp = ctx.prot; 1328 *psizep = TARGET_PAGE_BITS; 1329 return true; 1330 } 1331 1332 if (guest_visible) { 1333 log_cpu_state_mask(CPU_LOG_MMU, cs, 0); 1334 if (type == ACCESS_CODE) { 1335 switch (ret) { 1336 case -1: 1337 /* No matches in page tables or TLB */ 1338 switch (env->mmu_model) { 1339 case POWERPC_MMU_SOFT_6xx: 1340 cs->exception_index = POWERPC_EXCP_IFTLB; 1341 env->error_code = 1 << 18; 1342 env->spr[SPR_IMISS] = eaddr; 1343 env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem; 1344 goto tlb_miss; 1345 case POWERPC_MMU_SOFT_4xx: 1346 cs->exception_index = POWERPC_EXCP_ITLB; 1347 env->error_code = 0; 1348 env->spr[SPR_40x_DEAR] = eaddr; 1349 env->spr[SPR_40x_ESR] = 0x00000000; 1350 break; 1351 case POWERPC_MMU_BOOKE206: 1352 booke206_update_mas_tlb_miss(env, eaddr, 2, mmu_idx); 1353 /* fall through */ 1354 case POWERPC_MMU_BOOKE: 1355 cs->exception_index = POWERPC_EXCP_ITLB; 1356 env->error_code = 0; 1357 env->spr[SPR_BOOKE_DEAR] = eaddr; 1358 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, MMU_DATA_LOAD); 1359 break; 1360 case POWERPC_MMU_MPC8xx: 1361 cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); 1362 case POWERPC_MMU_REAL: 1363 cpu_abort(cs, "PowerPC in real mode should never raise " 1364 "any MMU exceptions\n"); 1365 default: 1366 cpu_abort(cs, "Unknown or invalid MMU model\n"); 1367 } 1368 break; 1369 case -2: 1370 /* Access rights violation */ 1371 cs->exception_index = POWERPC_EXCP_ISI; 1372 if ((env->mmu_model == POWERPC_MMU_BOOKE) || 1373 (env->mmu_model == POWERPC_MMU_BOOKE206)) { 1374 env->error_code = 0; 1375 } else { 1376 env->error_code = 0x08000000; 1377 } 1378 break; 1379 case -3: 1380 /* No execute protection violation */ 1381 if ((env->mmu_model == POWERPC_MMU_BOOKE) || 1382 (env->mmu_model == POWERPC_MMU_BOOKE206)) { 1383 env->spr[SPR_BOOKE_ESR] = 0x00000000; 1384 env->error_code = 0; 1385 } else { 1386 env->error_code = 0x10000000; 1387 } 1388 cs->exception_index = POWERPC_EXCP_ISI; 1389 break; 1390 case -4: 1391 /* Direct store exception */ 1392 /* No code fetch is allowed in direct-store areas */ 1393 cs->exception_index = POWERPC_EXCP_ISI; 1394 if ((env->mmu_model == POWERPC_MMU_BOOKE) || 1395 (env->mmu_model == POWERPC_MMU_BOOKE206)) { 1396 env->error_code = 0; 1397 } else { 1398 env->error_code = 0x10000000; 1399 } 1400 break; 1401 } 1402 } else { 1403 switch (ret) { 1404 case -1: 1405 /* No matches in page tables or TLB */ 1406 switch (env->mmu_model) { 1407 case POWERPC_MMU_SOFT_6xx: 1408 if (access_type == MMU_DATA_STORE) { 1409 cs->exception_index = POWERPC_EXCP_DSTLB; 1410 env->error_code = 1 << 16; 1411 } else { 1412 cs->exception_index = POWERPC_EXCP_DLTLB; 1413 env->error_code = 0; 1414 } 1415 env->spr[SPR_DMISS] = eaddr; 1416 env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem; 1417 tlb_miss: 1418 env->error_code |= ctx.key << 19; 1419 env->spr[SPR_HASH1] = ppc_hash32_hpt_base(cpu) + 1420 get_pteg_offset32(cpu, ctx.hash[0]); 1421 env->spr[SPR_HASH2] = ppc_hash32_hpt_base(cpu) + 1422 get_pteg_offset32(cpu, ctx.hash[1]); 1423 break; 1424 case POWERPC_MMU_SOFT_4xx: 1425 cs->exception_index = POWERPC_EXCP_DTLB; 1426 env->error_code = 0; 1427 env->spr[SPR_40x_DEAR] = eaddr; 1428 if (access_type == MMU_DATA_STORE) { 1429 env->spr[SPR_40x_ESR] = 0x00800000; 1430 } else { 1431 env->spr[SPR_40x_ESR] = 0x00000000; 1432 } 1433 break; 1434 case POWERPC_MMU_MPC8xx: 1435 /* XXX: TODO */ 1436 cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); 1437 case POWERPC_MMU_BOOKE206: 1438 booke206_update_mas_tlb_miss(env, eaddr, access_type, mmu_idx); 1439 /* fall through */ 1440 case POWERPC_MMU_BOOKE: 1441 cs->exception_index = POWERPC_EXCP_DTLB; 1442 env->error_code = 0; 1443 env->spr[SPR_BOOKE_DEAR] = eaddr; 1444 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); 1445 break; 1446 case POWERPC_MMU_REAL: 1447 cpu_abort(cs, "PowerPC in real mode should never raise " 1448 "any MMU exceptions\n"); 1449 default: 1450 cpu_abort(cs, "Unknown or invalid MMU model\n"); 1451 } 1452 break; 1453 case -2: 1454 /* Access rights violation */ 1455 cs->exception_index = POWERPC_EXCP_DSI; 1456 env->error_code = 0; 1457 if (env->mmu_model == POWERPC_MMU_SOFT_4xx) { 1458 env->spr[SPR_40x_DEAR] = eaddr; 1459 if (access_type == MMU_DATA_STORE) { 1460 env->spr[SPR_40x_ESR] |= 0x00800000; 1461 } 1462 } else if ((env->mmu_model == POWERPC_MMU_BOOKE) || 1463 (env->mmu_model == POWERPC_MMU_BOOKE206)) { 1464 env->spr[SPR_BOOKE_DEAR] = eaddr; 1465 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); 1466 } else { 1467 env->spr[SPR_DAR] = eaddr; 1468 if (access_type == MMU_DATA_STORE) { 1469 env->spr[SPR_DSISR] = 0x0A000000; 1470 } else { 1471 env->spr[SPR_DSISR] = 0x08000000; 1472 } 1473 } 1474 break; 1475 case -4: 1476 /* Direct store exception */ 1477 switch (type) { 1478 case ACCESS_FLOAT: 1479 /* Floating point load/store */ 1480 cs->exception_index = POWERPC_EXCP_ALIGN; 1481 env->error_code = POWERPC_EXCP_ALIGN_FP; 1482 env->spr[SPR_DAR] = eaddr; 1483 break; 1484 case ACCESS_RES: 1485 /* lwarx, ldarx or stwcx. */ 1486 cs->exception_index = POWERPC_EXCP_DSI; 1487 env->error_code = 0; 1488 env->spr[SPR_DAR] = eaddr; 1489 if (access_type == MMU_DATA_STORE) { 1490 env->spr[SPR_DSISR] = 0x06000000; 1491 } else { 1492 env->spr[SPR_DSISR] = 0x04000000; 1493 } 1494 break; 1495 case ACCESS_EXT: 1496 /* eciwx or ecowx */ 1497 cs->exception_index = POWERPC_EXCP_DSI; 1498 env->error_code = 0; 1499 env->spr[SPR_DAR] = eaddr; 1500 if (access_type == MMU_DATA_STORE) { 1501 env->spr[SPR_DSISR] = 0x06100000; 1502 } else { 1503 env->spr[SPR_DSISR] = 0x04100000; 1504 } 1505 break; 1506 default: 1507 printf("DSI: invalid exception (%d)\n", ret); 1508 cs->exception_index = POWERPC_EXCP_PROGRAM; 1509 env->error_code = 1510 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL; 1511 env->spr[SPR_DAR] = eaddr; 1512 break; 1513 } 1514 break; 1515 } 1516 } 1517 } 1518 return false; 1519 } 1520 1521 /*****************************************************************************/ 1522 1523 bool ppc_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, 1524 hwaddr *raddrp, int *psizep, int *protp, 1525 int mmu_idx, bool guest_visible) 1526 { 1527 switch (cpu->env.mmu_model) { 1528 #if defined(TARGET_PPC64) 1529 case POWERPC_MMU_3_00: 1530 if (ppc64_v3_radix(cpu)) { 1531 return ppc_radix64_xlate(cpu, eaddr, access_type, raddrp, 1532 psizep, protp, mmu_idx, guest_visible); 1533 } 1534 /* fall through */ 1535 case POWERPC_MMU_64B: 1536 case POWERPC_MMU_2_03: 1537 case POWERPC_MMU_2_06: 1538 case POWERPC_MMU_2_07: 1539 return ppc_hash64_xlate(cpu, eaddr, access_type, 1540 raddrp, psizep, protp, mmu_idx, guest_visible); 1541 #endif 1542 1543 case POWERPC_MMU_32B: 1544 return ppc_hash32_xlate(cpu, eaddr, access_type, raddrp, 1545 psizep, protp, mmu_idx, guest_visible); 1546 1547 default: 1548 return ppc_jumbo_xlate(cpu, eaddr, access_type, raddrp, 1549 psizep, protp, mmu_idx, guest_visible); 1550 } 1551 } 1552 1553 hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 1554 { 1555 PowerPCCPU *cpu = POWERPC_CPU(cs); 1556 hwaddr raddr; 1557 int s, p; 1558 1559 /* 1560 * Some MMUs have separate TLBs for code and data. If we only 1561 * try an MMU_DATA_LOAD, we may not be able to read instructions 1562 * mapped by code TLBs, so we also try a MMU_INST_FETCH. 1563 */ 1564 if (ppc_xlate(cpu, addr, MMU_DATA_LOAD, &raddr, &s, &p, 1565 cpu_mmu_index(&cpu->env, false), false) || 1566 ppc_xlate(cpu, addr, MMU_INST_FETCH, &raddr, &s, &p, 1567 cpu_mmu_index(&cpu->env, true), false)) { 1568 return raddr & TARGET_PAGE_MASK; 1569 } 1570 return -1; 1571 } 1572