1 /* 2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "cpu.h" 23 #include "sysemu/kvm.h" 24 #include "kvm_ppc.h" 25 #include "mmu-hash64.h" 26 #include "mmu-hash32.h" 27 #include "exec/exec-all.h" 28 #include "exec/page-protection.h" 29 #include "exec/log.h" 30 #include "helper_regs.h" 31 #include "qemu/error-report.h" 32 #include "qemu/qemu-print.h" 33 #include "internal.h" 34 #include "mmu-book3s-v3.h" 35 #include "mmu-radix64.h" 36 37 /* #define DUMP_PAGE_TABLES */ 38 39 void ppc_store_sdr1(CPUPPCState *env, target_ulong value) 40 { 41 PowerPCCPU *cpu = env_archcpu(env); 42 qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value); 43 assert(!cpu->env.has_hv_mode || !cpu->vhyp); 44 #if defined(TARGET_PPC64) 45 if (mmu_is_64bit(env->mmu_model)) { 46 target_ulong sdr_mask = SDR_64_HTABORG | SDR_64_HTABSIZE; 47 target_ulong htabsize = value & SDR_64_HTABSIZE; 48 49 if (value & ~sdr_mask) { 50 qemu_log_mask(LOG_GUEST_ERROR, "Invalid bits 0x"TARGET_FMT_lx 51 " set in SDR1", value & ~sdr_mask); 52 value &= sdr_mask; 53 } 54 if (htabsize > 28) { 55 qemu_log_mask(LOG_GUEST_ERROR, "Invalid HTABSIZE 0x" TARGET_FMT_lx 56 " stored in SDR1", htabsize); 57 return; 58 } 59 } 60 #endif /* defined(TARGET_PPC64) */ 61 /* FIXME: Should check for valid HTABMASK values in 32-bit case */ 62 env->spr[SPR_SDR1] = value; 63 } 64 65 /*****************************************************************************/ 66 /* PowerPC MMU emulation */ 67 68 static int pp_check(int key, int pp, int nx) 69 { 70 int access; 71 72 /* Compute access rights */ 73 access = 0; 74 if (key == 0) { 75 switch (pp) { 76 case 0x0: 77 case 0x1: 78 case 0x2: 79 access |= PAGE_WRITE; 80 /* fall through */ 81 case 0x3: 82 access |= PAGE_READ; 83 break; 84 } 85 } else { 86 switch (pp) { 87 case 0x0: 88 access = 0; 89 break; 90 case 0x1: 91 case 0x3: 92 access = PAGE_READ; 93 break; 94 case 0x2: 95 access = PAGE_READ | PAGE_WRITE; 96 break; 97 } 98 } 99 if (nx == 0) { 100 access |= PAGE_EXEC; 101 } 102 103 return access; 104 } 105 106 static int check_prot(int prot, MMUAccessType access_type) 107 { 108 return prot & prot_for_access_type(access_type) ? 0 : -2; 109 } 110 111 int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr, 112 int way, int is_code) 113 { 114 int nr; 115 116 /* Select TLB num in a way from address */ 117 nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1); 118 /* Select TLB way */ 119 nr += env->tlb_per_way * way; 120 /* 6xx have separate TLBs for instructions and data */ 121 if (is_code && env->id_tlbs == 1) { 122 nr += env->nb_tlb; 123 } 124 125 return nr; 126 } 127 128 static int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0, 129 target_ulong pte1, int h, 130 MMUAccessType access_type) 131 { 132 target_ulong ptem, mmask; 133 int access, ret, pteh, ptev, pp; 134 135 ret = -1; 136 /* Check validity and table match */ 137 ptev = pte_is_valid(pte0); 138 pteh = (pte0 >> 6) & 1; 139 if (ptev && h == pteh) { 140 /* Check vsid & api */ 141 ptem = pte0 & PTE_PTEM_MASK; 142 mmask = PTE_CHECK_MASK; 143 pp = pte1 & 0x00000003; 144 if (ptem == ctx->ptem) { 145 if (ctx->raddr != (hwaddr)-1ULL) { 146 /* all matches should have equal RPN, WIMG & PP */ 147 if ((ctx->raddr & mmask) != (pte1 & mmask)) { 148 qemu_log_mask(CPU_LOG_MMU, "Bad RPN/WIMG/PP\n"); 149 return -3; 150 } 151 } 152 /* Compute access rights */ 153 access = pp_check(ctx->key, pp, ctx->nx); 154 /* Keep the matching PTE information */ 155 ctx->raddr = pte1; 156 ctx->prot = access; 157 ret = check_prot(ctx->prot, access_type); 158 if (ret == 0) { 159 /* Access granted */ 160 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); 161 } else { 162 /* Access right violation */ 163 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); 164 } 165 } 166 } 167 168 return ret; 169 } 170 171 static int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p, 172 int ret, MMUAccessType access_type) 173 { 174 int store = 0; 175 176 /* Update page flags */ 177 if (!(*pte1p & 0x00000100)) { 178 /* Update accessed flag */ 179 *pte1p |= 0x00000100; 180 store = 1; 181 } 182 if (!(*pte1p & 0x00000080)) { 183 if (access_type == MMU_DATA_STORE && ret == 0) { 184 /* Update changed flag */ 185 *pte1p |= 0x00000080; 186 store = 1; 187 } else { 188 /* Force page fault for first write access */ 189 ctx->prot &= ~PAGE_WRITE; 190 } 191 } 192 193 return store; 194 } 195 196 /* Software driven TLB helpers */ 197 198 static int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx, 199 target_ulong eaddr, MMUAccessType access_type) 200 { 201 ppc6xx_tlb_t *tlb; 202 int nr, best, way; 203 int ret; 204 205 best = -1; 206 ret = -1; /* No TLB found */ 207 for (way = 0; way < env->nb_ways; way++) { 208 nr = ppc6xx_tlb_getnum(env, eaddr, way, access_type == MMU_INST_FETCH); 209 tlb = &env->tlb.tlb6[nr]; 210 /* This test "emulates" the PTE index match for hardware TLBs */ 211 if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) { 212 qemu_log_mask(CPU_LOG_MMU, "TLB %d/%d %s [" TARGET_FMT_lx 213 " " TARGET_FMT_lx "] <> " TARGET_FMT_lx "\n", 214 nr, env->nb_tlb, 215 pte_is_valid(tlb->pte0) ? "valid" : "inval", 216 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr); 217 continue; 218 } 219 qemu_log_mask(CPU_LOG_MMU, "TLB %d/%d %s " TARGET_FMT_lx " <> " 220 TARGET_FMT_lx " " TARGET_FMT_lx " %c %c\n", 221 nr, env->nb_tlb, 222 pte_is_valid(tlb->pte0) ? "valid" : "inval", 223 tlb->EPN, eaddr, tlb->pte1, 224 access_type == MMU_DATA_STORE ? 'S' : 'L', 225 access_type == MMU_INST_FETCH ? 'I' : 'D'); 226 switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1, 227 0, access_type)) { 228 case -3: 229 /* TLB inconsistency */ 230 return -1; 231 case -2: 232 /* Access violation */ 233 ret = -2; 234 best = nr; 235 break; 236 case -1: 237 default: 238 /* No match */ 239 break; 240 case 0: 241 /* access granted */ 242 /* 243 * XXX: we should go on looping to check all TLBs 244 * consistency but we can speed-up the whole thing as 245 * the result would be undefined if TLBs are not 246 * consistent. 247 */ 248 ret = 0; 249 best = nr; 250 goto done; 251 } 252 } 253 if (best != -1) { 254 done: 255 qemu_log_mask(CPU_LOG_MMU, "found TLB at addr " HWADDR_FMT_plx 256 " prot=%01x ret=%d\n", 257 ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret); 258 /* Update page flags */ 259 pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, access_type); 260 } 261 262 return ret; 263 } 264 265 /* Perform BAT hit & translation */ 266 static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp, 267 int *validp, int *protp, target_ulong *BATu, 268 target_ulong *BATl) 269 { 270 target_ulong bl; 271 int pp, valid, prot; 272 273 bl = (*BATu & 0x00001FFC) << 15; 274 valid = 0; 275 prot = 0; 276 if ((!FIELD_EX64(env->msr, MSR, PR) && (*BATu & 0x00000002)) || 277 (FIELD_EX64(env->msr, MSR, PR) && (*BATu & 0x00000001))) { 278 valid = 1; 279 pp = *BATl & 0x00000003; 280 if (pp != 0) { 281 prot = PAGE_READ | PAGE_EXEC; 282 if (pp == 0x2) { 283 prot |= PAGE_WRITE; 284 } 285 } 286 } 287 *blp = bl; 288 *validp = valid; 289 *protp = prot; 290 } 291 292 static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, 293 target_ulong virtual, MMUAccessType access_type) 294 { 295 target_ulong *BATlt, *BATut, *BATu, *BATl; 296 target_ulong BEPIl, BEPIu, bl; 297 int i, valid, prot; 298 int ret = -1; 299 bool ifetch = access_type == MMU_INST_FETCH; 300 301 qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT v " TARGET_FMT_lx "\n", __func__, 302 ifetch ? 'I' : 'D', virtual); 303 if (ifetch) { 304 BATlt = env->IBAT[1]; 305 BATut = env->IBAT[0]; 306 } else { 307 BATlt = env->DBAT[1]; 308 BATut = env->DBAT[0]; 309 } 310 for (i = 0; i < env->nb_BATs; i++) { 311 BATu = &BATut[i]; 312 BATl = &BATlt[i]; 313 BEPIu = *BATu & 0xF0000000; 314 BEPIl = *BATu & 0x0FFE0000; 315 bat_size_prot(env, &bl, &valid, &prot, BATu, BATl); 316 qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT%d v " TARGET_FMT_lx " BATu " 317 TARGET_FMT_lx " BATl " TARGET_FMT_lx "\n", __func__, 318 ifetch ? 'I' : 'D', i, virtual, *BATu, *BATl); 319 if ((virtual & 0xF0000000) == BEPIu && 320 ((virtual & 0x0FFE0000) & ~bl) == BEPIl) { 321 /* BAT matches */ 322 if (valid != 0) { 323 /* Get physical address */ 324 ctx->raddr = (*BATl & 0xF0000000) | 325 ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) | 326 (virtual & 0x0001F000); 327 /* Compute access rights */ 328 ctx->prot = prot; 329 ret = check_prot(ctx->prot, access_type); 330 if (ret == 0) { 331 qemu_log_mask(CPU_LOG_MMU, "BAT %d match: r " HWADDR_FMT_plx 332 " prot=%c%c\n", i, ctx->raddr, 333 ctx->prot & PAGE_READ ? 'R' : '-', 334 ctx->prot & PAGE_WRITE ? 'W' : '-'); 335 } 336 break; 337 } 338 } 339 } 340 if (ret < 0) { 341 if (qemu_log_enabled()) { 342 qemu_log_mask(CPU_LOG_MMU, "no BAT match for " 343 TARGET_FMT_lx ":\n", virtual); 344 for (i = 0; i < 4; i++) { 345 BATu = &BATut[i]; 346 BATl = &BATlt[i]; 347 BEPIu = *BATu & 0xF0000000; 348 BEPIl = *BATu & 0x0FFE0000; 349 bl = (*BATu & 0x00001FFC) << 15; 350 qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT%d v " 351 TARGET_FMT_lx " BATu " TARGET_FMT_lx 352 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " 353 TARGET_FMT_lx " " TARGET_FMT_lx "\n", 354 __func__, ifetch ? 'I' : 'D', i, virtual, 355 *BATu, *BATl, BEPIu, BEPIl, bl); 356 } 357 } 358 } 359 /* No hit */ 360 return ret; 361 } 362 363 static int mmu6xx_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 364 target_ulong eaddr, 365 MMUAccessType access_type, int type) 366 { 367 PowerPCCPU *cpu = env_archcpu(env); 368 hwaddr hash; 369 target_ulong vsid, sr, pgidx; 370 int ds, target_page_bits; 371 bool pr; 372 int ret; 373 374 /* First try to find a BAT entry if there are any */ 375 if (env->nb_BATs && get_bat_6xx_tlb(env, ctx, eaddr, access_type) == 0) { 376 return 0; 377 } 378 379 /* Perform segment based translation when no BATs matched */ 380 pr = FIELD_EX64(env->msr, MSR, PR); 381 ctx->eaddr = eaddr; 382 383 sr = env->sr[eaddr >> 28]; 384 ctx->key = (((sr & 0x20000000) && pr) || 385 ((sr & 0x40000000) && !pr)) ? 1 : 0; 386 ds = sr & 0x80000000 ? 1 : 0; 387 ctx->nx = sr & 0x10000000 ? 1 : 0; 388 vsid = sr & 0x00FFFFFF; 389 target_page_bits = TARGET_PAGE_BITS; 390 qemu_log_mask(CPU_LOG_MMU, 391 "Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx 392 " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx 393 " ir=%d dr=%d pr=%d %d t=%d\n", 394 eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, 395 (int)FIELD_EX64(env->msr, MSR, IR), 396 (int)FIELD_EX64(env->msr, MSR, DR), pr ? 1 : 0, 397 access_type == MMU_DATA_STORE, type); 398 pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits; 399 hash = vsid ^ pgidx; 400 ctx->ptem = (vsid << 7) | (pgidx >> 10); 401 402 qemu_log_mask(CPU_LOG_MMU, 403 "pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n", 404 ctx->key, ds, ctx->nx, vsid); 405 ret = -1; 406 if (!ds) { 407 /* Check if instruction fetch is allowed, if needed */ 408 if (type == ACCESS_CODE && ctx->nx) { 409 qemu_log_mask(CPU_LOG_MMU, "No access allowed\n"); 410 return -3; 411 } 412 /* Page address translation */ 413 qemu_log_mask(CPU_LOG_MMU, "htab_base " HWADDR_FMT_plx " htab_mask " 414 HWADDR_FMT_plx " hash " HWADDR_FMT_plx "\n", 415 ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash); 416 ctx->hash[0] = hash; 417 ctx->hash[1] = ~hash; 418 419 /* Initialize real address with an invalid value */ 420 ctx->raddr = (hwaddr)-1ULL; 421 /* Software TLB search */ 422 ret = ppc6xx_tlb_check(env, ctx, eaddr, access_type); 423 #if defined(DUMP_PAGE_TABLES) 424 if (qemu_loglevel_mask(CPU_LOG_MMU)) { 425 CPUState *cs = env_cpu(env); 426 hwaddr curaddr; 427 uint32_t a0, a1, a2, a3; 428 429 qemu_log("Page table: " HWADDR_FMT_plx " len " HWADDR_FMT_plx "\n", 430 ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu) + 0x80); 431 for (curaddr = ppc_hash32_hpt_base(cpu); 432 curaddr < (ppc_hash32_hpt_base(cpu) 433 + ppc_hash32_hpt_mask(cpu) + 0x80); 434 curaddr += 16) { 435 a0 = ldl_phys(cs->as, curaddr); 436 a1 = ldl_phys(cs->as, curaddr + 4); 437 a2 = ldl_phys(cs->as, curaddr + 8); 438 a3 = ldl_phys(cs->as, curaddr + 12); 439 if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) { 440 qemu_log(HWADDR_FMT_plx ": %08x %08x %08x %08x\n", 441 curaddr, a0, a1, a2, a3); 442 } 443 } 444 } 445 #endif 446 } else { 447 qemu_log_mask(CPU_LOG_MMU, "direct store...\n"); 448 /* Direct-store segment : absolutely *BUGGY* for now */ 449 450 switch (type) { 451 case ACCESS_INT: 452 /* Integer load/store : only access allowed */ 453 break; 454 case ACCESS_CODE: 455 /* No code fetch is allowed in direct-store areas */ 456 return -4; 457 case ACCESS_FLOAT: 458 /* Floating point load/store */ 459 return -4; 460 case ACCESS_RES: 461 /* lwarx, ldarx or srwcx. */ 462 return -4; 463 case ACCESS_CACHE: 464 /* 465 * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi 466 * 467 * Should make the instruction do no-op. As it already do 468 * no-op, it's quite easy :-) 469 */ 470 ctx->raddr = eaddr; 471 return 0; 472 case ACCESS_EXT: 473 /* eciwx or ecowx */ 474 return -4; 475 default: 476 qemu_log_mask(CPU_LOG_MMU, "ERROR: instruction should not need " 477 "address translation\n"); 478 return -4; 479 } 480 if ((access_type == MMU_DATA_STORE || ctx->key != 1) && 481 (access_type == MMU_DATA_LOAD || ctx->key != 0)) { 482 ctx->raddr = eaddr; 483 ret = 2; 484 } else { 485 ret = -2; 486 } 487 } 488 489 return ret; 490 } 491 492 /* Generic TLB check function for embedded PowerPC implementations */ 493 static bool ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb, 494 hwaddr *raddrp, 495 target_ulong address, uint32_t pid, int i) 496 { 497 target_ulong mask; 498 499 /* Check valid flag */ 500 if (!(tlb->prot & PAGE_VALID)) { 501 return false; 502 } 503 mask = ~(tlb->size - 1); 504 qemu_log_mask(CPU_LOG_MMU, "%s: TLB %d address " TARGET_FMT_lx 505 " PID %u <=> " TARGET_FMT_lx " " TARGET_FMT_lx " %u %x\n", 506 __func__, i, address, pid, tlb->EPN, 507 mask, (uint32_t)tlb->PID, tlb->prot); 508 /* Check PID */ 509 if (tlb->PID != 0 && tlb->PID != pid) { 510 return false; 511 } 512 /* Check effective address */ 513 if ((address & mask) != tlb->EPN) { 514 return false; 515 } 516 *raddrp = (tlb->RPN & mask) | (address & ~mask); 517 return true; 518 } 519 520 /* Generic TLB search function for PowerPC embedded implementations */ 521 int ppcemb_tlb_search(CPUPPCState *env, target_ulong address, uint32_t pid) 522 { 523 ppcemb_tlb_t *tlb; 524 hwaddr raddr; 525 int i; 526 527 for (i = 0; i < env->nb_tlb; i++) { 528 tlb = &env->tlb.tlbe[i]; 529 if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, i)) { 530 return i; 531 } 532 } 533 return -1; 534 } 535 536 static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 537 target_ulong address, 538 MMUAccessType access_type) 539 { 540 ppcemb_tlb_t *tlb; 541 hwaddr raddr; 542 int i, ret, zsel, zpr, pr; 543 544 ret = -1; 545 raddr = (hwaddr)-1ULL; 546 pr = FIELD_EX64(env->msr, MSR, PR); 547 for (i = 0; i < env->nb_tlb; i++) { 548 tlb = &env->tlb.tlbe[i]; 549 if (!ppcemb_tlb_check(env, tlb, &raddr, address, 550 env->spr[SPR_40x_PID], i)) { 551 continue; 552 } 553 zsel = (tlb->attr >> 4) & 0xF; 554 zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3; 555 qemu_log_mask(CPU_LOG_MMU, 556 "%s: TLB %d zsel %d zpr %d ty %d attr %08x\n", 557 __func__, i, zsel, zpr, access_type, tlb->attr); 558 /* Check execute enable bit */ 559 switch (zpr) { 560 case 0x2: 561 if (pr != 0) { 562 goto check_perms; 563 } 564 /* fall through */ 565 case 0x3: 566 /* All accesses granted */ 567 ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 568 ret = 0; 569 break; 570 case 0x0: 571 if (pr != 0) { 572 /* Raise Zone protection fault. */ 573 env->spr[SPR_40x_ESR] = 1 << 22; 574 ctx->prot = 0; 575 ret = -2; 576 break; 577 } 578 /* fall through */ 579 case 0x1: 580 check_perms: 581 /* Check from TLB entry */ 582 ctx->prot = tlb->prot; 583 ret = check_prot(ctx->prot, access_type); 584 if (ret == -2) { 585 env->spr[SPR_40x_ESR] = 0; 586 } 587 break; 588 } 589 if (ret >= 0) { 590 ctx->raddr = raddr; 591 qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx 592 " => " HWADDR_FMT_plx 593 " %d %d\n", __func__, address, ctx->raddr, ctx->prot, 594 ret); 595 return 0; 596 } 597 } 598 qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx 599 " => " HWADDR_FMT_plx 600 " %d %d\n", __func__, address, raddr, ctx->prot, ret); 601 602 return ret; 603 } 604 605 static bool mmubooke_check_pid(CPUPPCState *env, ppcemb_tlb_t *tlb, 606 hwaddr *raddr, target_ulong addr, int i) 607 { 608 if (ppcemb_tlb_check(env, tlb, raddr, addr, env->spr[SPR_BOOKE_PID], i)) { 609 if (!env->nb_pids) { 610 /* Extend the physical address to 36 bits */ 611 *raddr |= (uint64_t)(tlb->RPN & 0xF) << 32; 612 } 613 return true; 614 } else if (!env->nb_pids) { 615 return false; 616 } 617 if (env->spr[SPR_BOOKE_PID1] && 618 ppcemb_tlb_check(env, tlb, raddr, addr, env->spr[SPR_BOOKE_PID1], i)) { 619 return true; 620 } 621 if (env->spr[SPR_BOOKE_PID2] && 622 ppcemb_tlb_check(env, tlb, raddr, addr, env->spr[SPR_BOOKE_PID2], i)) { 623 return true; 624 } 625 return false; 626 } 627 628 static int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb, 629 hwaddr *raddr, int *prot, target_ulong address, 630 MMUAccessType access_type, int i) 631 { 632 if (!mmubooke_check_pid(env, tlb, raddr, address, i)) { 633 qemu_log_mask(CPU_LOG_MMU, "%s: TLB entry not found\n", __func__); 634 return -1; 635 } 636 637 /* Check the address space */ 638 if ((access_type == MMU_INST_FETCH ? 639 FIELD_EX64(env->msr, MSR, IR) : 640 FIELD_EX64(env->msr, MSR, DR)) != (tlb->attr & 1)) { 641 qemu_log_mask(CPU_LOG_MMU, "%s: AS doesn't match\n", __func__); 642 return -1; 643 } 644 645 if (FIELD_EX64(env->msr, MSR, PR)) { 646 *prot = tlb->prot & 0xF; 647 } else { 648 *prot = (tlb->prot >> 4) & 0xF; 649 } 650 if (*prot & prot_for_access_type(access_type)) { 651 qemu_log_mask(CPU_LOG_MMU, "%s: good TLB!\n", __func__); 652 return 0; 653 } 654 655 qemu_log_mask(CPU_LOG_MMU, "%s: no prot match: %x\n", __func__, *prot); 656 return access_type == MMU_INST_FETCH ? -3 : -2; 657 } 658 659 static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 660 target_ulong address, 661 MMUAccessType access_type) 662 { 663 ppcemb_tlb_t *tlb; 664 hwaddr raddr; 665 int i, ret; 666 667 ret = -1; 668 raddr = (hwaddr)-1ULL; 669 for (i = 0; i < env->nb_tlb; i++) { 670 tlb = &env->tlb.tlbe[i]; 671 ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address, 672 access_type, i); 673 if (ret != -1) { 674 break; 675 } 676 } 677 678 if (ret >= 0) { 679 ctx->raddr = raddr; 680 qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx 681 " => " HWADDR_FMT_plx " %d %d\n", __func__, 682 address, ctx->raddr, ctx->prot, ret); 683 } else { 684 qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx 685 " => " HWADDR_FMT_plx " %d %d\n", __func__, 686 address, raddr, ctx->prot, ret); 687 } 688 689 return ret; 690 } 691 692 hwaddr booke206_tlb_to_page_size(CPUPPCState *env, ppcmas_tlb_t *tlb) 693 { 694 int tlbm_size; 695 696 tlbm_size = (tlb->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 697 698 return 1024ULL << tlbm_size; 699 } 700 701 /* TLB check function for MAS based SoftTLBs */ 702 int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb, hwaddr *raddrp, 703 target_ulong address, uint32_t pid) 704 { 705 hwaddr mask; 706 uint32_t tlb_pid; 707 708 if (!FIELD_EX64(env->msr, MSR, CM)) { 709 /* In 32bit mode we can only address 32bit EAs */ 710 address = (uint32_t)address; 711 } 712 713 /* Check valid flag */ 714 if (!(tlb->mas1 & MAS1_VALID)) { 715 return -1; 716 } 717 718 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); 719 qemu_log_mask(CPU_LOG_MMU, "%s: TLB ADDR=0x" TARGET_FMT_lx 720 " PID=0x%x MAS1=0x%x MAS2=0x%" PRIx64 " mask=0x%" 721 HWADDR_PRIx " MAS7_3=0x%" PRIx64 " MAS8=0x%" PRIx32 "\n", 722 __func__, address, pid, tlb->mas1, tlb->mas2, mask, 723 tlb->mas7_3, tlb->mas8); 724 725 /* Check PID */ 726 tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT; 727 if (tlb_pid != 0 && tlb_pid != pid) { 728 return -1; 729 } 730 731 /* Check effective address */ 732 if ((address & mask) != (tlb->mas2 & MAS2_EPN_MASK)) { 733 return -1; 734 } 735 736 if (raddrp) { 737 *raddrp = (tlb->mas7_3 & mask) | (address & ~mask); 738 } 739 740 return 0; 741 } 742 743 static bool is_epid_mmu(int mmu_idx) 744 { 745 return mmu_idx == PPC_TLB_EPID_STORE || mmu_idx == PPC_TLB_EPID_LOAD; 746 } 747 748 static uint32_t mmubooke206_esr(int mmu_idx, MMUAccessType access_type) 749 { 750 uint32_t esr = 0; 751 if (access_type == MMU_DATA_STORE) { 752 esr |= ESR_ST; 753 } 754 if (is_epid_mmu(mmu_idx)) { 755 esr |= ESR_EPID; 756 } 757 return esr; 758 } 759 760 /* 761 * Get EPID register given the mmu_idx. If this is regular load, 762 * construct the EPID access bits from current processor state 763 * 764 * Get the effective AS and PR bits and the PID. The PID is returned 765 * only if EPID load is requested, otherwise the caller must detect 766 * the correct EPID. Return true if valid EPID is returned. 767 */ 768 static bool mmubooke206_get_as(CPUPPCState *env, 769 int mmu_idx, uint32_t *epid_out, 770 bool *as_out, bool *pr_out) 771 { 772 if (is_epid_mmu(mmu_idx)) { 773 uint32_t epidr; 774 if (mmu_idx == PPC_TLB_EPID_STORE) { 775 epidr = env->spr[SPR_BOOKE_EPSC]; 776 } else { 777 epidr = env->spr[SPR_BOOKE_EPLC]; 778 } 779 *epid_out = (epidr & EPID_EPID) >> EPID_EPID_SHIFT; 780 *as_out = !!(epidr & EPID_EAS); 781 *pr_out = !!(epidr & EPID_EPR); 782 return true; 783 } else { 784 *as_out = FIELD_EX64(env->msr, MSR, DS); 785 *pr_out = FIELD_EX64(env->msr, MSR, PR); 786 return false; 787 } 788 } 789 790 /* Check if the tlb found by hashing really matches */ 791 static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb, 792 hwaddr *raddr, int *prot, 793 target_ulong address, 794 MMUAccessType access_type, int mmu_idx) 795 { 796 uint32_t epid; 797 bool as, pr; 798 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); 799 800 if (!use_epid) { 801 if (ppcmas_tlb_check(env, tlb, raddr, address, 802 env->spr[SPR_BOOKE_PID]) >= 0) { 803 goto found_tlb; 804 } 805 806 if (env->spr[SPR_BOOKE_PID1] && 807 ppcmas_tlb_check(env, tlb, raddr, address, 808 env->spr[SPR_BOOKE_PID1]) >= 0) { 809 goto found_tlb; 810 } 811 812 if (env->spr[SPR_BOOKE_PID2] && 813 ppcmas_tlb_check(env, tlb, raddr, address, 814 env->spr[SPR_BOOKE_PID2]) >= 0) { 815 goto found_tlb; 816 } 817 } else { 818 if (ppcmas_tlb_check(env, tlb, raddr, address, epid) >= 0) { 819 goto found_tlb; 820 } 821 } 822 823 qemu_log_mask(CPU_LOG_MMU, "%s: No TLB entry found for effective address " 824 "0x" TARGET_FMT_lx "\n", __func__, address); 825 return -1; 826 827 found_tlb: 828 829 /* Check the address space and permissions */ 830 if (access_type == MMU_INST_FETCH) { 831 /* There is no way to fetch code using epid load */ 832 assert(!use_epid); 833 as = FIELD_EX64(env->msr, MSR, IR); 834 } 835 836 if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { 837 qemu_log_mask(CPU_LOG_MMU, "%s: AS doesn't match\n", __func__); 838 return -1; 839 } 840 841 *prot = 0; 842 if (pr) { 843 if (tlb->mas7_3 & MAS3_UR) { 844 *prot |= PAGE_READ; 845 } 846 if (tlb->mas7_3 & MAS3_UW) { 847 *prot |= PAGE_WRITE; 848 } 849 if (tlb->mas7_3 & MAS3_UX) { 850 *prot |= PAGE_EXEC; 851 } 852 } else { 853 if (tlb->mas7_3 & MAS3_SR) { 854 *prot |= PAGE_READ; 855 } 856 if (tlb->mas7_3 & MAS3_SW) { 857 *prot |= PAGE_WRITE; 858 } 859 if (tlb->mas7_3 & MAS3_SX) { 860 *prot |= PAGE_EXEC; 861 } 862 } 863 if (*prot & prot_for_access_type(access_type)) { 864 qemu_log_mask(CPU_LOG_MMU, "%s: good TLB!\n", __func__); 865 return 0; 866 } 867 868 qemu_log_mask(CPU_LOG_MMU, "%s: no prot match: %x\n", __func__, *prot); 869 return access_type == MMU_INST_FETCH ? -3 : -2; 870 } 871 872 static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 873 target_ulong address, 874 MMUAccessType access_type, 875 int mmu_idx) 876 { 877 ppcmas_tlb_t *tlb; 878 hwaddr raddr; 879 int i, j, ret; 880 881 ret = -1; 882 raddr = (hwaddr)-1ULL; 883 884 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 885 int ways = booke206_tlb_ways(env, i); 886 887 for (j = 0; j < ways; j++) { 888 tlb = booke206_get_tlbm(env, i, address, j); 889 if (!tlb) { 890 continue; 891 } 892 ret = mmubooke206_check_tlb(env, tlb, &raddr, &ctx->prot, address, 893 access_type, mmu_idx); 894 if (ret != -1) { 895 goto found_tlb; 896 } 897 } 898 } 899 900 found_tlb: 901 902 if (ret >= 0) { 903 ctx->raddr = raddr; 904 qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx 905 " => " HWADDR_FMT_plx " %d %d\n", __func__, address, 906 ctx->raddr, ctx->prot, ret); 907 } else { 908 qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx 909 " => " HWADDR_FMT_plx " %d %d\n", __func__, address, 910 raddr, ctx->prot, ret); 911 } 912 913 return ret; 914 } 915 916 static const char *book3e_tsize_to_str[32] = { 917 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", 918 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", 919 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G", 920 "1T", "2T" 921 }; 922 923 static void mmubooke_dump_mmu(CPUPPCState *env) 924 { 925 ppcemb_tlb_t *entry; 926 int i; 927 928 #ifdef CONFIG_KVM 929 if (kvm_enabled() && !env->kvm_sw_tlb) { 930 qemu_printf("Cannot access KVM TLB\n"); 931 return; 932 } 933 #endif 934 935 qemu_printf("\nTLB:\n"); 936 qemu_printf("Effective Physical Size PID Prot " 937 "Attr\n"); 938 939 entry = &env->tlb.tlbe[0]; 940 for (i = 0; i < env->nb_tlb; i++, entry++) { 941 hwaddr ea, pa; 942 target_ulong mask; 943 uint64_t size = (uint64_t)entry->size; 944 char size_buf[20]; 945 946 /* Check valid flag */ 947 if (!(entry->prot & PAGE_VALID)) { 948 continue; 949 } 950 951 mask = ~(entry->size - 1); 952 ea = entry->EPN & mask; 953 pa = entry->RPN & mask; 954 /* Extend the physical address to 36 bits */ 955 pa |= (hwaddr)(entry->RPN & 0xF) << 32; 956 if (size >= 1 * MiB) { 957 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "M", size / MiB); 958 } else { 959 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "k", size / KiB); 960 } 961 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n", 962 (uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID, 963 entry->prot, entry->attr); 964 } 965 966 } 967 968 static void mmubooke206_dump_one_tlb(CPUPPCState *env, int tlbn, int offset, 969 int tlbsize) 970 { 971 ppcmas_tlb_t *entry; 972 int i; 973 974 qemu_printf("\nTLB%d:\n", tlbn); 975 qemu_printf("Effective Physical Size TID TS SRWX" 976 " URWX WIMGE U0123\n"); 977 978 entry = &env->tlb.tlbm[offset]; 979 for (i = 0; i < tlbsize; i++, entry++) { 980 hwaddr ea, pa, size; 981 int tsize; 982 983 if (!(entry->mas1 & MAS1_VALID)) { 984 continue; 985 } 986 987 tsize = (entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 988 size = 1024ULL << tsize; 989 ea = entry->mas2 & ~(size - 1); 990 pa = entry->mas7_3 & ~(size - 1); 991 992 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c" 993 " U%c%c%c %c%c%c%c%c U%c%c%c%c\n", 994 (uint64_t)ea, (uint64_t)pa, 995 book3e_tsize_to_str[tsize], 996 (entry->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT, 997 (entry->mas1 & MAS1_TS) >> MAS1_TS_SHIFT, 998 entry->mas7_3 & MAS3_SR ? 'R' : '-', 999 entry->mas7_3 & MAS3_SW ? 'W' : '-', 1000 entry->mas7_3 & MAS3_SX ? 'X' : '-', 1001 entry->mas7_3 & MAS3_UR ? 'R' : '-', 1002 entry->mas7_3 & MAS3_UW ? 'W' : '-', 1003 entry->mas7_3 & MAS3_UX ? 'X' : '-', 1004 entry->mas2 & MAS2_W ? 'W' : '-', 1005 entry->mas2 & MAS2_I ? 'I' : '-', 1006 entry->mas2 & MAS2_M ? 'M' : '-', 1007 entry->mas2 & MAS2_G ? 'G' : '-', 1008 entry->mas2 & MAS2_E ? 'E' : '-', 1009 entry->mas7_3 & MAS3_U0 ? '0' : '-', 1010 entry->mas7_3 & MAS3_U1 ? '1' : '-', 1011 entry->mas7_3 & MAS3_U2 ? '2' : '-', 1012 entry->mas7_3 & MAS3_U3 ? '3' : '-'); 1013 } 1014 } 1015 1016 static void mmubooke206_dump_mmu(CPUPPCState *env) 1017 { 1018 int offset = 0; 1019 int i; 1020 1021 #ifdef CONFIG_KVM 1022 if (kvm_enabled() && !env->kvm_sw_tlb) { 1023 qemu_printf("Cannot access KVM TLB\n"); 1024 return; 1025 } 1026 #endif 1027 1028 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 1029 int size = booke206_tlb_size(env, i); 1030 1031 if (size == 0) { 1032 continue; 1033 } 1034 1035 mmubooke206_dump_one_tlb(env, i, offset, size); 1036 offset += size; 1037 } 1038 } 1039 1040 static void mmu6xx_dump_BATs(CPUPPCState *env, int type) 1041 { 1042 target_ulong *BATlt, *BATut, *BATu, *BATl; 1043 target_ulong BEPIl, BEPIu, bl; 1044 int i; 1045 1046 switch (type) { 1047 case ACCESS_CODE: 1048 BATlt = env->IBAT[1]; 1049 BATut = env->IBAT[0]; 1050 break; 1051 default: 1052 BATlt = env->DBAT[1]; 1053 BATut = env->DBAT[0]; 1054 break; 1055 } 1056 1057 for (i = 0; i < env->nb_BATs; i++) { 1058 BATu = &BATut[i]; 1059 BATl = &BATlt[i]; 1060 BEPIu = *BATu & 0xF0000000; 1061 BEPIl = *BATu & 0x0FFE0000; 1062 bl = (*BATu & 0x00001FFC) << 15; 1063 qemu_printf("%s BAT%d BATu " TARGET_FMT_lx 1064 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " 1065 TARGET_FMT_lx " " TARGET_FMT_lx "\n", 1066 type == ACCESS_CODE ? "code" : "data", i, 1067 *BATu, *BATl, BEPIu, BEPIl, bl); 1068 } 1069 } 1070 1071 static void mmu6xx_dump_mmu(CPUPPCState *env) 1072 { 1073 PowerPCCPU *cpu = env_archcpu(env); 1074 ppc6xx_tlb_t *tlb; 1075 target_ulong sr; 1076 int type, way, entry, i; 1077 1078 qemu_printf("HTAB base = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_base(cpu)); 1079 qemu_printf("HTAB mask = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_mask(cpu)); 1080 1081 qemu_printf("\nSegment registers:\n"); 1082 for (i = 0; i < 32; i++) { 1083 sr = env->sr[i]; 1084 if (sr & 0x80000000) { 1085 qemu_printf("%02d T=%d Ks=%d Kp=%d BUID=0x%03x " 1086 "CNTLR_SPEC=0x%05x\n", i, 1087 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, 1088 sr & 0x20000000 ? 1 : 0, (uint32_t)((sr >> 20) & 0x1FF), 1089 (uint32_t)(sr & 0xFFFFF)); 1090 } else { 1091 qemu_printf("%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i, 1092 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, 1093 sr & 0x20000000 ? 1 : 0, sr & 0x10000000 ? 1 : 0, 1094 (uint32_t)(sr & 0x00FFFFFF)); 1095 } 1096 } 1097 1098 qemu_printf("\nBATs:\n"); 1099 mmu6xx_dump_BATs(env, ACCESS_INT); 1100 mmu6xx_dump_BATs(env, ACCESS_CODE); 1101 1102 if (env->id_tlbs != 1) { 1103 qemu_printf("ERROR: 6xx MMU should have separated TLB" 1104 " for code and data\n"); 1105 } 1106 1107 qemu_printf("\nTLBs [EPN EPN + SIZE]\n"); 1108 1109 for (type = 0; type < 2; type++) { 1110 for (way = 0; way < env->nb_ways; way++) { 1111 for (entry = env->nb_tlb * type + env->tlb_per_way * way; 1112 entry < (env->nb_tlb * type + env->tlb_per_way * (way + 1)); 1113 entry++) { 1114 1115 tlb = &env->tlb.tlb6[entry]; 1116 qemu_printf("%s TLB %02d/%02d way:%d %s [" 1117 TARGET_FMT_lx " " TARGET_FMT_lx "]\n", 1118 type ? "code" : "data", entry % env->nb_tlb, 1119 env->nb_tlb, way, 1120 pte_is_valid(tlb->pte0) ? "valid" : "inval", 1121 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE); 1122 } 1123 } 1124 } 1125 } 1126 1127 void dump_mmu(CPUPPCState *env) 1128 { 1129 switch (env->mmu_model) { 1130 case POWERPC_MMU_BOOKE: 1131 mmubooke_dump_mmu(env); 1132 break; 1133 case POWERPC_MMU_BOOKE206: 1134 mmubooke206_dump_mmu(env); 1135 break; 1136 case POWERPC_MMU_SOFT_6xx: 1137 mmu6xx_dump_mmu(env); 1138 break; 1139 #if defined(TARGET_PPC64) 1140 case POWERPC_MMU_64B: 1141 case POWERPC_MMU_2_03: 1142 case POWERPC_MMU_2_06: 1143 case POWERPC_MMU_2_07: 1144 dump_slb(env_archcpu(env)); 1145 break; 1146 case POWERPC_MMU_3_00: 1147 if (ppc64_v3_radix(env_archcpu(env))) { 1148 qemu_log_mask(LOG_UNIMP, "%s: the PPC64 MMU is unsupported\n", 1149 __func__); 1150 } else { 1151 dump_slb(env_archcpu(env)); 1152 } 1153 break; 1154 #endif 1155 default: 1156 qemu_log_mask(LOG_UNIMP, "%s: unimplemented\n", __func__); 1157 } 1158 } 1159 1160 static int check_physical(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr, 1161 MMUAccessType access_type) 1162 { 1163 ctx->raddr = eaddr; 1164 ctx->prot = PAGE_READ | PAGE_EXEC; 1165 1166 switch (env->mmu_model) { 1167 case POWERPC_MMU_SOFT_6xx: 1168 case POWERPC_MMU_SOFT_4xx: 1169 case POWERPC_MMU_REAL: 1170 case POWERPC_MMU_BOOKE: 1171 ctx->prot |= PAGE_WRITE; 1172 break; 1173 1174 default: 1175 /* Caller's checks mean we should never get here for other models */ 1176 g_assert_not_reached(); 1177 } 1178 1179 return 0; 1180 } 1181 1182 int get_physical_address_wtlb(CPUPPCState *env, mmu_ctx_t *ctx, 1183 target_ulong eaddr, 1184 MMUAccessType access_type, int type, 1185 int mmu_idx) 1186 { 1187 int ret = -1; 1188 bool real_mode; 1189 1190 real_mode = (type == ACCESS_CODE) ? !FIELD_EX64(env->msr, MSR, IR) 1191 : !FIELD_EX64(env->msr, MSR, DR); 1192 1193 switch (env->mmu_model) { 1194 case POWERPC_MMU_SOFT_6xx: 1195 if (real_mode) { 1196 ret = check_physical(env, ctx, eaddr, access_type); 1197 } else { 1198 ret = mmu6xx_get_physical_address(env, ctx, eaddr, access_type, 1199 type); 1200 } 1201 break; 1202 1203 case POWERPC_MMU_SOFT_4xx: 1204 if (real_mode) { 1205 ret = check_physical(env, ctx, eaddr, access_type); 1206 } else { 1207 ret = mmu40x_get_physical_address(env, ctx, eaddr, access_type); 1208 } 1209 break; 1210 case POWERPC_MMU_BOOKE: 1211 ret = mmubooke_get_physical_address(env, ctx, eaddr, access_type); 1212 break; 1213 case POWERPC_MMU_BOOKE206: 1214 ret = mmubooke206_get_physical_address(env, ctx, eaddr, access_type, 1215 mmu_idx); 1216 break; 1217 case POWERPC_MMU_REAL: 1218 if (real_mode) { 1219 ret = check_physical(env, ctx, eaddr, access_type); 1220 } else { 1221 cpu_abort(env_cpu(env), 1222 "PowerPC in real mode do not do any translation\n"); 1223 } 1224 return -1; 1225 default: 1226 cpu_abort(env_cpu(env), "Unknown or invalid MMU model\n"); 1227 return -1; 1228 } 1229 1230 return ret; 1231 } 1232 1233 static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address, 1234 MMUAccessType access_type, int mmu_idx) 1235 { 1236 uint32_t epid; 1237 bool as, pr; 1238 uint32_t missed_tid = 0; 1239 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); 1240 1241 if (access_type == MMU_INST_FETCH) { 1242 as = FIELD_EX64(env->msr, MSR, IR); 1243 } 1244 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; 1245 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; 1246 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; 1247 env->spr[SPR_BOOKE_MAS3] = 0; 1248 env->spr[SPR_BOOKE_MAS6] = 0; 1249 env->spr[SPR_BOOKE_MAS7] = 0; 1250 1251 /* AS */ 1252 if (as) { 1253 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; 1254 env->spr[SPR_BOOKE_MAS6] |= MAS6_SAS; 1255 } 1256 1257 env->spr[SPR_BOOKE_MAS1] |= MAS1_VALID; 1258 env->spr[SPR_BOOKE_MAS2] |= address & MAS2_EPN_MASK; 1259 1260 if (!use_epid) { 1261 switch (env->spr[SPR_BOOKE_MAS4] & MAS4_TIDSELD_PIDZ) { 1262 case MAS4_TIDSELD_PID0: 1263 missed_tid = env->spr[SPR_BOOKE_PID]; 1264 break; 1265 case MAS4_TIDSELD_PID1: 1266 missed_tid = env->spr[SPR_BOOKE_PID1]; 1267 break; 1268 case MAS4_TIDSELD_PID2: 1269 missed_tid = env->spr[SPR_BOOKE_PID2]; 1270 break; 1271 } 1272 env->spr[SPR_BOOKE_MAS6] |= env->spr[SPR_BOOKE_PID] << 16; 1273 } else { 1274 missed_tid = epid; 1275 env->spr[SPR_BOOKE_MAS6] |= missed_tid << 16; 1276 } 1277 env->spr[SPR_BOOKE_MAS1] |= (missed_tid << MAS1_TID_SHIFT); 1278 1279 1280 /* next victim logic */ 1281 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; 1282 env->last_way++; 1283 env->last_way &= booke206_tlb_ways(env, 0) - 1; 1284 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; 1285 } 1286 1287 /* Perform address translation */ 1288 /* TODO: Split this by mmu_model. */ 1289 static bool ppc_jumbo_xlate(PowerPCCPU *cpu, vaddr eaddr, 1290 MMUAccessType access_type, 1291 hwaddr *raddrp, int *psizep, int *protp, 1292 int mmu_idx, bool guest_visible) 1293 { 1294 CPUState *cs = CPU(cpu); 1295 CPUPPCState *env = &cpu->env; 1296 mmu_ctx_t ctx; 1297 int type; 1298 int ret; 1299 1300 if (access_type == MMU_INST_FETCH) { 1301 /* code access */ 1302 type = ACCESS_CODE; 1303 } else if (guest_visible) { 1304 /* data access */ 1305 type = env->access_type; 1306 } else { 1307 type = ACCESS_INT; 1308 } 1309 1310 ret = get_physical_address_wtlb(env, &ctx, eaddr, access_type, 1311 type, mmu_idx); 1312 if (ret == 0) { 1313 *raddrp = ctx.raddr; 1314 *protp = ctx.prot; 1315 *psizep = TARGET_PAGE_BITS; 1316 return true; 1317 } 1318 1319 if (guest_visible) { 1320 log_cpu_state_mask(CPU_LOG_MMU, cs, 0); 1321 if (type == ACCESS_CODE) { 1322 switch (ret) { 1323 case -1: 1324 /* No matches in page tables or TLB */ 1325 switch (env->mmu_model) { 1326 case POWERPC_MMU_SOFT_6xx: 1327 cs->exception_index = POWERPC_EXCP_IFTLB; 1328 env->error_code = 1 << 18; 1329 env->spr[SPR_IMISS] = eaddr; 1330 env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem; 1331 goto tlb_miss; 1332 case POWERPC_MMU_SOFT_4xx: 1333 cs->exception_index = POWERPC_EXCP_ITLB; 1334 env->error_code = 0; 1335 env->spr[SPR_40x_DEAR] = eaddr; 1336 env->spr[SPR_40x_ESR] = 0x00000000; 1337 break; 1338 case POWERPC_MMU_BOOKE206: 1339 booke206_update_mas_tlb_miss(env, eaddr, 2, mmu_idx); 1340 /* fall through */ 1341 case POWERPC_MMU_BOOKE: 1342 cs->exception_index = POWERPC_EXCP_ITLB; 1343 env->error_code = 0; 1344 env->spr[SPR_BOOKE_DEAR] = eaddr; 1345 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, MMU_DATA_LOAD); 1346 break; 1347 case POWERPC_MMU_REAL: 1348 cpu_abort(cs, "PowerPC in real mode should never raise " 1349 "any MMU exceptions\n"); 1350 default: 1351 cpu_abort(cs, "Unknown or invalid MMU model\n"); 1352 } 1353 break; 1354 case -2: 1355 /* Access rights violation */ 1356 cs->exception_index = POWERPC_EXCP_ISI; 1357 if ((env->mmu_model == POWERPC_MMU_BOOKE) || 1358 (env->mmu_model == POWERPC_MMU_BOOKE206)) { 1359 env->error_code = 0; 1360 } else { 1361 env->error_code = 0x08000000; 1362 } 1363 break; 1364 case -3: 1365 /* No execute protection violation */ 1366 if ((env->mmu_model == POWERPC_MMU_BOOKE) || 1367 (env->mmu_model == POWERPC_MMU_BOOKE206)) { 1368 env->spr[SPR_BOOKE_ESR] = 0x00000000; 1369 env->error_code = 0; 1370 } else { 1371 env->error_code = 0x10000000; 1372 } 1373 cs->exception_index = POWERPC_EXCP_ISI; 1374 break; 1375 case -4: 1376 /* Direct store exception */ 1377 /* No code fetch is allowed in direct-store areas */ 1378 cs->exception_index = POWERPC_EXCP_ISI; 1379 if ((env->mmu_model == POWERPC_MMU_BOOKE) || 1380 (env->mmu_model == POWERPC_MMU_BOOKE206)) { 1381 env->error_code = 0; 1382 } else { 1383 env->error_code = 0x10000000; 1384 } 1385 break; 1386 } 1387 } else { 1388 switch (ret) { 1389 case -1: 1390 /* No matches in page tables or TLB */ 1391 switch (env->mmu_model) { 1392 case POWERPC_MMU_SOFT_6xx: 1393 if (access_type == MMU_DATA_STORE) { 1394 cs->exception_index = POWERPC_EXCP_DSTLB; 1395 env->error_code = 1 << 16; 1396 } else { 1397 cs->exception_index = POWERPC_EXCP_DLTLB; 1398 env->error_code = 0; 1399 } 1400 env->spr[SPR_DMISS] = eaddr; 1401 env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem; 1402 tlb_miss: 1403 env->error_code |= ctx.key << 19; 1404 env->spr[SPR_HASH1] = ppc_hash32_hpt_base(cpu) + 1405 get_pteg_offset32(cpu, ctx.hash[0]); 1406 env->spr[SPR_HASH2] = ppc_hash32_hpt_base(cpu) + 1407 get_pteg_offset32(cpu, ctx.hash[1]); 1408 break; 1409 case POWERPC_MMU_SOFT_4xx: 1410 cs->exception_index = POWERPC_EXCP_DTLB; 1411 env->error_code = 0; 1412 env->spr[SPR_40x_DEAR] = eaddr; 1413 if (access_type == MMU_DATA_STORE) { 1414 env->spr[SPR_40x_ESR] = 0x00800000; 1415 } else { 1416 env->spr[SPR_40x_ESR] = 0x00000000; 1417 } 1418 break; 1419 case POWERPC_MMU_BOOKE206: 1420 booke206_update_mas_tlb_miss(env, eaddr, access_type, mmu_idx); 1421 /* fall through */ 1422 case POWERPC_MMU_BOOKE: 1423 cs->exception_index = POWERPC_EXCP_DTLB; 1424 env->error_code = 0; 1425 env->spr[SPR_BOOKE_DEAR] = eaddr; 1426 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); 1427 break; 1428 case POWERPC_MMU_REAL: 1429 cpu_abort(cs, "PowerPC in real mode should never raise " 1430 "any MMU exceptions\n"); 1431 default: 1432 cpu_abort(cs, "Unknown or invalid MMU model\n"); 1433 } 1434 break; 1435 case -2: 1436 /* Access rights violation */ 1437 cs->exception_index = POWERPC_EXCP_DSI; 1438 env->error_code = 0; 1439 if (env->mmu_model == POWERPC_MMU_SOFT_4xx) { 1440 env->spr[SPR_40x_DEAR] = eaddr; 1441 if (access_type == MMU_DATA_STORE) { 1442 env->spr[SPR_40x_ESR] |= 0x00800000; 1443 } 1444 } else if ((env->mmu_model == POWERPC_MMU_BOOKE) || 1445 (env->mmu_model == POWERPC_MMU_BOOKE206)) { 1446 env->spr[SPR_BOOKE_DEAR] = eaddr; 1447 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); 1448 } else { 1449 env->spr[SPR_DAR] = eaddr; 1450 if (access_type == MMU_DATA_STORE) { 1451 env->spr[SPR_DSISR] = 0x0A000000; 1452 } else { 1453 env->spr[SPR_DSISR] = 0x08000000; 1454 } 1455 } 1456 break; 1457 case -4: 1458 /* Direct store exception */ 1459 switch (type) { 1460 case ACCESS_FLOAT: 1461 /* Floating point load/store */ 1462 cs->exception_index = POWERPC_EXCP_ALIGN; 1463 env->error_code = POWERPC_EXCP_ALIGN_FP; 1464 env->spr[SPR_DAR] = eaddr; 1465 break; 1466 case ACCESS_RES: 1467 /* lwarx, ldarx or stwcx. */ 1468 cs->exception_index = POWERPC_EXCP_DSI; 1469 env->error_code = 0; 1470 env->spr[SPR_DAR] = eaddr; 1471 if (access_type == MMU_DATA_STORE) { 1472 env->spr[SPR_DSISR] = 0x06000000; 1473 } else { 1474 env->spr[SPR_DSISR] = 0x04000000; 1475 } 1476 break; 1477 case ACCESS_EXT: 1478 /* eciwx or ecowx */ 1479 cs->exception_index = POWERPC_EXCP_DSI; 1480 env->error_code = 0; 1481 env->spr[SPR_DAR] = eaddr; 1482 if (access_type == MMU_DATA_STORE) { 1483 env->spr[SPR_DSISR] = 0x06100000; 1484 } else { 1485 env->spr[SPR_DSISR] = 0x04100000; 1486 } 1487 break; 1488 default: 1489 printf("DSI: invalid exception (%d)\n", ret); 1490 cs->exception_index = POWERPC_EXCP_PROGRAM; 1491 env->error_code = 1492 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL; 1493 env->spr[SPR_DAR] = eaddr; 1494 break; 1495 } 1496 break; 1497 } 1498 } 1499 } 1500 return false; 1501 } 1502 1503 /*****************************************************************************/ 1504 1505 bool ppc_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, 1506 hwaddr *raddrp, int *psizep, int *protp, 1507 int mmu_idx, bool guest_visible) 1508 { 1509 switch (cpu->env.mmu_model) { 1510 #if defined(TARGET_PPC64) 1511 case POWERPC_MMU_3_00: 1512 if (ppc64_v3_radix(cpu)) { 1513 return ppc_radix64_xlate(cpu, eaddr, access_type, raddrp, 1514 psizep, protp, mmu_idx, guest_visible); 1515 } 1516 /* fall through */ 1517 case POWERPC_MMU_64B: 1518 case POWERPC_MMU_2_03: 1519 case POWERPC_MMU_2_06: 1520 case POWERPC_MMU_2_07: 1521 return ppc_hash64_xlate(cpu, eaddr, access_type, 1522 raddrp, psizep, protp, mmu_idx, guest_visible); 1523 #endif 1524 1525 case POWERPC_MMU_32B: 1526 return ppc_hash32_xlate(cpu, eaddr, access_type, raddrp, 1527 psizep, protp, mmu_idx, guest_visible); 1528 case POWERPC_MMU_MPC8xx: 1529 cpu_abort(env_cpu(&cpu->env), "MPC8xx MMU model is not implemented\n"); 1530 default: 1531 return ppc_jumbo_xlate(cpu, eaddr, access_type, raddrp, 1532 psizep, protp, mmu_idx, guest_visible); 1533 } 1534 } 1535 1536 hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 1537 { 1538 PowerPCCPU *cpu = POWERPC_CPU(cs); 1539 hwaddr raddr; 1540 int s, p; 1541 1542 /* 1543 * Some MMUs have separate TLBs for code and data. If we only 1544 * try an MMU_DATA_LOAD, we may not be able to read instructions 1545 * mapped by code TLBs, so we also try a MMU_INST_FETCH. 1546 */ 1547 if (ppc_xlate(cpu, addr, MMU_DATA_LOAD, &raddr, &s, &p, 1548 ppc_env_mmu_index(&cpu->env, false), false) || 1549 ppc_xlate(cpu, addr, MMU_INST_FETCH, &raddr, &s, &p, 1550 ppc_env_mmu_index(&cpu->env, true), false)) { 1551 return raddr & TARGET_PAGE_MASK; 1552 } 1553 return -1; 1554 } 1555