1 /* 2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "cpu.h" 23 #include "sysemu/kvm.h" 24 #include "kvm_ppc.h" 25 #include "mmu-hash64.h" 26 #include "mmu-hash32.h" 27 #include "exec/exec-all.h" 28 #include "exec/log.h" 29 #include "helper_regs.h" 30 #include "qemu/error-report.h" 31 #include "qemu/qemu-print.h" 32 #include "internal.h" 33 #include "mmu-book3s-v3.h" 34 #include "mmu-radix64.h" 35 36 /* #define DUMP_PAGE_TABLES */ 37 38 void ppc_store_sdr1(CPUPPCState *env, target_ulong value) 39 { 40 PowerPCCPU *cpu = env_archcpu(env); 41 qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value); 42 assert(!cpu->env.has_hv_mode || !cpu->vhyp); 43 #if defined(TARGET_PPC64) 44 if (mmu_is_64bit(env->mmu_model)) { 45 target_ulong sdr_mask = SDR_64_HTABORG | SDR_64_HTABSIZE; 46 target_ulong htabsize = value & SDR_64_HTABSIZE; 47 48 if (value & ~sdr_mask) { 49 qemu_log_mask(LOG_GUEST_ERROR, "Invalid bits 0x"TARGET_FMT_lx 50 " set in SDR1", value & ~sdr_mask); 51 value &= sdr_mask; 52 } 53 if (htabsize > 28) { 54 qemu_log_mask(LOG_GUEST_ERROR, "Invalid HTABSIZE 0x" TARGET_FMT_lx 55 " stored in SDR1", htabsize); 56 return; 57 } 58 } 59 #endif /* defined(TARGET_PPC64) */ 60 /* FIXME: Should check for valid HTABMASK values in 32-bit case */ 61 env->spr[SPR_SDR1] = value; 62 } 63 64 /*****************************************************************************/ 65 /* PowerPC MMU emulation */ 66 67 static int pp_check(int key, int pp, int nx) 68 { 69 int access; 70 71 /* Compute access rights */ 72 access = 0; 73 if (key == 0) { 74 switch (pp) { 75 case 0x0: 76 case 0x1: 77 case 0x2: 78 access |= PAGE_WRITE; 79 /* fall through */ 80 case 0x3: 81 access |= PAGE_READ; 82 break; 83 } 84 } else { 85 switch (pp) { 86 case 0x0: 87 access = 0; 88 break; 89 case 0x1: 90 case 0x3: 91 access = PAGE_READ; 92 break; 93 case 0x2: 94 access = PAGE_READ | PAGE_WRITE; 95 break; 96 } 97 } 98 if (nx == 0) { 99 access |= PAGE_EXEC; 100 } 101 102 return access; 103 } 104 105 static int check_prot(int prot, MMUAccessType access_type) 106 { 107 return prot & prot_for_access_type(access_type) ? 0 : -2; 108 } 109 110 int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr, 111 int way, int is_code) 112 { 113 int nr; 114 115 /* Select TLB num in a way from address */ 116 nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1); 117 /* Select TLB way */ 118 nr += env->tlb_per_way * way; 119 /* 6xx have separate TLBs for instructions and data */ 120 if (is_code && env->id_tlbs == 1) { 121 nr += env->nb_tlb; 122 } 123 124 return nr; 125 } 126 127 static int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0, 128 target_ulong pte1, int h, 129 MMUAccessType access_type) 130 { 131 target_ulong ptem, mmask; 132 int access, ret, pteh, ptev, pp; 133 134 ret = -1; 135 /* Check validity and table match */ 136 ptev = pte_is_valid(pte0); 137 pteh = (pte0 >> 6) & 1; 138 if (ptev && h == pteh) { 139 /* Check vsid & api */ 140 ptem = pte0 & PTE_PTEM_MASK; 141 mmask = PTE_CHECK_MASK; 142 pp = pte1 & 0x00000003; 143 if (ptem == ctx->ptem) { 144 if (ctx->raddr != (hwaddr)-1ULL) { 145 /* all matches should have equal RPN, WIMG & PP */ 146 if ((ctx->raddr & mmask) != (pte1 & mmask)) { 147 qemu_log_mask(CPU_LOG_MMU, "Bad RPN/WIMG/PP\n"); 148 return -3; 149 } 150 } 151 /* Compute access rights */ 152 access = pp_check(ctx->key, pp, ctx->nx); 153 /* Keep the matching PTE information */ 154 ctx->raddr = pte1; 155 ctx->prot = access; 156 ret = check_prot(ctx->prot, access_type); 157 if (ret == 0) { 158 /* Access granted */ 159 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); 160 } else { 161 /* Access right violation */ 162 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); 163 } 164 } 165 } 166 167 return ret; 168 } 169 170 static int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p, 171 int ret, MMUAccessType access_type) 172 { 173 int store = 0; 174 175 /* Update page flags */ 176 if (!(*pte1p & 0x00000100)) { 177 /* Update accessed flag */ 178 *pte1p |= 0x00000100; 179 store = 1; 180 } 181 if (!(*pte1p & 0x00000080)) { 182 if (access_type == MMU_DATA_STORE && ret == 0) { 183 /* Update changed flag */ 184 *pte1p |= 0x00000080; 185 store = 1; 186 } else { 187 /* Force page fault for first write access */ 188 ctx->prot &= ~PAGE_WRITE; 189 } 190 } 191 192 return store; 193 } 194 195 /* Software driven TLB helpers */ 196 197 static int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx, 198 target_ulong eaddr, MMUAccessType access_type) 199 { 200 ppc6xx_tlb_t *tlb; 201 int nr, best, way; 202 int ret; 203 204 best = -1; 205 ret = -1; /* No TLB found */ 206 for (way = 0; way < env->nb_ways; way++) { 207 nr = ppc6xx_tlb_getnum(env, eaddr, way, access_type == MMU_INST_FETCH); 208 tlb = &env->tlb.tlb6[nr]; 209 /* This test "emulates" the PTE index match for hardware TLBs */ 210 if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) { 211 qemu_log_mask(CPU_LOG_MMU, "TLB %d/%d %s [" TARGET_FMT_lx 212 " " TARGET_FMT_lx "] <> " TARGET_FMT_lx "\n", 213 nr, env->nb_tlb, 214 pte_is_valid(tlb->pte0) ? "valid" : "inval", 215 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr); 216 continue; 217 } 218 qemu_log_mask(CPU_LOG_MMU, "TLB %d/%d %s " TARGET_FMT_lx " <> " 219 TARGET_FMT_lx " " TARGET_FMT_lx " %c %c\n", 220 nr, env->nb_tlb, 221 pte_is_valid(tlb->pte0) ? "valid" : "inval", 222 tlb->EPN, eaddr, tlb->pte1, 223 access_type == MMU_DATA_STORE ? 'S' : 'L', 224 access_type == MMU_INST_FETCH ? 'I' : 'D'); 225 switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1, 226 0, access_type)) { 227 case -3: 228 /* TLB inconsistency */ 229 return -1; 230 case -2: 231 /* Access violation */ 232 ret = -2; 233 best = nr; 234 break; 235 case -1: 236 default: 237 /* No match */ 238 break; 239 case 0: 240 /* access granted */ 241 /* 242 * XXX: we should go on looping to check all TLBs 243 * consistency but we can speed-up the whole thing as 244 * the result would be undefined if TLBs are not 245 * consistent. 246 */ 247 ret = 0; 248 best = nr; 249 goto done; 250 } 251 } 252 if (best != -1) { 253 done: 254 qemu_log_mask(CPU_LOG_MMU, "found TLB at addr " HWADDR_FMT_plx 255 " prot=%01x ret=%d\n", 256 ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret); 257 /* Update page flags */ 258 pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, access_type); 259 } 260 261 return ret; 262 } 263 264 /* Perform BAT hit & translation */ 265 static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp, 266 int *validp, int *protp, target_ulong *BATu, 267 target_ulong *BATl) 268 { 269 target_ulong bl; 270 int pp, valid, prot; 271 272 bl = (*BATu & 0x00001FFC) << 15; 273 valid = 0; 274 prot = 0; 275 if ((!FIELD_EX64(env->msr, MSR, PR) && (*BATu & 0x00000002)) || 276 (FIELD_EX64(env->msr, MSR, PR) && (*BATu & 0x00000001))) { 277 valid = 1; 278 pp = *BATl & 0x00000003; 279 if (pp != 0) { 280 prot = PAGE_READ | PAGE_EXEC; 281 if (pp == 0x2) { 282 prot |= PAGE_WRITE; 283 } 284 } 285 } 286 *blp = bl; 287 *validp = valid; 288 *protp = prot; 289 } 290 291 static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, 292 target_ulong virtual, MMUAccessType access_type) 293 { 294 target_ulong *BATlt, *BATut, *BATu, *BATl; 295 target_ulong BEPIl, BEPIu, bl; 296 int i, valid, prot; 297 int ret = -1; 298 bool ifetch = access_type == MMU_INST_FETCH; 299 300 qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT v " TARGET_FMT_lx "\n", __func__, 301 ifetch ? 'I' : 'D', virtual); 302 if (ifetch) { 303 BATlt = env->IBAT[1]; 304 BATut = env->IBAT[0]; 305 } else { 306 BATlt = env->DBAT[1]; 307 BATut = env->DBAT[0]; 308 } 309 for (i = 0; i < env->nb_BATs; i++) { 310 BATu = &BATut[i]; 311 BATl = &BATlt[i]; 312 BEPIu = *BATu & 0xF0000000; 313 BEPIl = *BATu & 0x0FFE0000; 314 bat_size_prot(env, &bl, &valid, &prot, BATu, BATl); 315 qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT%d v " TARGET_FMT_lx " BATu " 316 TARGET_FMT_lx " BATl " TARGET_FMT_lx "\n", __func__, 317 ifetch ? 'I' : 'D', i, virtual, *BATu, *BATl); 318 if ((virtual & 0xF0000000) == BEPIu && 319 ((virtual & 0x0FFE0000) & ~bl) == BEPIl) { 320 /* BAT matches */ 321 if (valid != 0) { 322 /* Get physical address */ 323 ctx->raddr = (*BATl & 0xF0000000) | 324 ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) | 325 (virtual & 0x0001F000); 326 /* Compute access rights */ 327 ctx->prot = prot; 328 ret = check_prot(ctx->prot, access_type); 329 if (ret == 0) { 330 qemu_log_mask(CPU_LOG_MMU, "BAT %d match: r " HWADDR_FMT_plx 331 " prot=%c%c\n", i, ctx->raddr, 332 ctx->prot & PAGE_READ ? 'R' : '-', 333 ctx->prot & PAGE_WRITE ? 'W' : '-'); 334 } 335 break; 336 } 337 } 338 } 339 if (ret < 0) { 340 if (qemu_log_enabled()) { 341 qemu_log_mask(CPU_LOG_MMU, "no BAT match for " 342 TARGET_FMT_lx ":\n", virtual); 343 for (i = 0; i < 4; i++) { 344 BATu = &BATut[i]; 345 BATl = &BATlt[i]; 346 BEPIu = *BATu & 0xF0000000; 347 BEPIl = *BATu & 0x0FFE0000; 348 bl = (*BATu & 0x00001FFC) << 15; 349 qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT%d v " 350 TARGET_FMT_lx " BATu " TARGET_FMT_lx 351 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " 352 TARGET_FMT_lx " " TARGET_FMT_lx "\n", 353 __func__, ifetch ? 'I' : 'D', i, virtual, 354 *BATu, *BATl, BEPIu, BEPIl, bl); 355 } 356 } 357 } 358 /* No hit */ 359 return ret; 360 } 361 362 /* Perform segment based translation */ 363 static int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, 364 target_ulong eaddr, MMUAccessType access_type, 365 int type) 366 { 367 PowerPCCPU *cpu = env_archcpu(env); 368 hwaddr hash; 369 target_ulong vsid; 370 int ds, target_page_bits; 371 bool pr; 372 int ret; 373 target_ulong sr, pgidx; 374 375 pr = FIELD_EX64(env->msr, MSR, PR); 376 ctx->eaddr = eaddr; 377 378 sr = env->sr[eaddr >> 28]; 379 ctx->key = (((sr & 0x20000000) && pr) || 380 ((sr & 0x40000000) && !pr)) ? 1 : 0; 381 ds = sr & 0x80000000 ? 1 : 0; 382 ctx->nx = sr & 0x10000000 ? 1 : 0; 383 vsid = sr & 0x00FFFFFF; 384 target_page_bits = TARGET_PAGE_BITS; 385 qemu_log_mask(CPU_LOG_MMU, 386 "Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx 387 " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx 388 " ir=%d dr=%d pr=%d %d t=%d\n", 389 eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, 390 (int)FIELD_EX64(env->msr, MSR, IR), 391 (int)FIELD_EX64(env->msr, MSR, DR), pr ? 1 : 0, 392 access_type == MMU_DATA_STORE, type); 393 pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits; 394 hash = vsid ^ pgidx; 395 ctx->ptem = (vsid << 7) | (pgidx >> 10); 396 397 qemu_log_mask(CPU_LOG_MMU, 398 "pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n", 399 ctx->key, ds, ctx->nx, vsid); 400 ret = -1; 401 if (!ds) { 402 /* Check if instruction fetch is allowed, if needed */ 403 if (type != ACCESS_CODE || ctx->nx == 0) { 404 /* Page address translation */ 405 qemu_log_mask(CPU_LOG_MMU, "htab_base " HWADDR_FMT_plx 406 " htab_mask " HWADDR_FMT_plx 407 " hash " HWADDR_FMT_plx "\n", 408 ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash); 409 ctx->hash[0] = hash; 410 ctx->hash[1] = ~hash; 411 412 /* Initialize real address with an invalid value */ 413 ctx->raddr = (hwaddr)-1ULL; 414 /* Software TLB search */ 415 ret = ppc6xx_tlb_check(env, ctx, eaddr, access_type); 416 #if defined(DUMP_PAGE_TABLES) 417 if (qemu_loglevel_mask(CPU_LOG_MMU)) { 418 CPUState *cs = env_cpu(env); 419 hwaddr curaddr; 420 uint32_t a0, a1, a2, a3; 421 422 qemu_log("Page table: " HWADDR_FMT_plx " len " HWADDR_FMT_plx 423 "\n", ppc_hash32_hpt_base(cpu), 424 ppc_hash32_hpt_mask(cpu) + 0x80); 425 for (curaddr = ppc_hash32_hpt_base(cpu); 426 curaddr < (ppc_hash32_hpt_base(cpu) 427 + ppc_hash32_hpt_mask(cpu) + 0x80); 428 curaddr += 16) { 429 a0 = ldl_phys(cs->as, curaddr); 430 a1 = ldl_phys(cs->as, curaddr + 4); 431 a2 = ldl_phys(cs->as, curaddr + 8); 432 a3 = ldl_phys(cs->as, curaddr + 12); 433 if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) { 434 qemu_log(HWADDR_FMT_plx ": %08x %08x %08x %08x\n", 435 curaddr, a0, a1, a2, a3); 436 } 437 } 438 } 439 #endif 440 } else { 441 qemu_log_mask(CPU_LOG_MMU, "No access allowed\n"); 442 ret = -3; 443 } 444 } else { 445 qemu_log_mask(CPU_LOG_MMU, "direct store...\n"); 446 /* Direct-store segment : absolutely *BUGGY* for now */ 447 448 switch (type) { 449 case ACCESS_INT: 450 /* Integer load/store : only access allowed */ 451 break; 452 case ACCESS_CODE: 453 /* No code fetch is allowed in direct-store areas */ 454 return -4; 455 case ACCESS_FLOAT: 456 /* Floating point load/store */ 457 return -4; 458 case ACCESS_RES: 459 /* lwarx, ldarx or srwcx. */ 460 return -4; 461 case ACCESS_CACHE: 462 /* 463 * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi 464 * 465 * Should make the instruction do no-op. As it already do 466 * no-op, it's quite easy :-) 467 */ 468 ctx->raddr = eaddr; 469 return 0; 470 case ACCESS_EXT: 471 /* eciwx or ecowx */ 472 return -4; 473 default: 474 qemu_log_mask(CPU_LOG_MMU, "ERROR: instruction should not need " 475 "address translation\n"); 476 return -4; 477 } 478 if ((access_type == MMU_DATA_STORE || ctx->key != 1) && 479 (access_type == MMU_DATA_LOAD || ctx->key != 0)) { 480 ctx->raddr = eaddr; 481 ret = 2; 482 } else { 483 ret = -2; 484 } 485 } 486 487 return ret; 488 } 489 490 /* Generic TLB check function for embedded PowerPC implementations */ 491 static bool ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb, 492 hwaddr *raddrp, 493 target_ulong address, uint32_t pid, int i) 494 { 495 target_ulong mask; 496 497 /* Check valid flag */ 498 if (!(tlb->prot & PAGE_VALID)) { 499 return false; 500 } 501 mask = ~(tlb->size - 1); 502 qemu_log_mask(CPU_LOG_MMU, "%s: TLB %d address " TARGET_FMT_lx 503 " PID %u <=> " TARGET_FMT_lx " " TARGET_FMT_lx " %u %x\n", 504 __func__, i, address, pid, tlb->EPN, 505 mask, (uint32_t)tlb->PID, tlb->prot); 506 /* Check PID */ 507 if (tlb->PID != 0 && tlb->PID != pid) { 508 return false; 509 } 510 /* Check effective address */ 511 if ((address & mask) != tlb->EPN) { 512 return false; 513 } 514 *raddrp = (tlb->RPN & mask) | (address & ~mask); 515 return true; 516 } 517 518 /* Generic TLB search function for PowerPC embedded implementations */ 519 int ppcemb_tlb_search(CPUPPCState *env, target_ulong address, uint32_t pid) 520 { 521 ppcemb_tlb_t *tlb; 522 hwaddr raddr; 523 int i; 524 525 for (i = 0; i < env->nb_tlb; i++) { 526 tlb = &env->tlb.tlbe[i]; 527 if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, i)) { 528 return i; 529 } 530 } 531 return -1; 532 } 533 534 static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 535 target_ulong address, 536 MMUAccessType access_type) 537 { 538 ppcemb_tlb_t *tlb; 539 hwaddr raddr; 540 int i, ret, zsel, zpr, pr; 541 542 ret = -1; 543 raddr = (hwaddr)-1ULL; 544 pr = FIELD_EX64(env->msr, MSR, PR); 545 for (i = 0; i < env->nb_tlb; i++) { 546 tlb = &env->tlb.tlbe[i]; 547 if (!ppcemb_tlb_check(env, tlb, &raddr, address, 548 env->spr[SPR_40x_PID], i)) { 549 continue; 550 } 551 zsel = (tlb->attr >> 4) & 0xF; 552 zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3; 553 qemu_log_mask(CPU_LOG_MMU, 554 "%s: TLB %d zsel %d zpr %d ty %d attr %08x\n", 555 __func__, i, zsel, zpr, access_type, tlb->attr); 556 /* Check execute enable bit */ 557 switch (zpr) { 558 case 0x2: 559 if (pr != 0) { 560 goto check_perms; 561 } 562 /* fall through */ 563 case 0x3: 564 /* All accesses granted */ 565 ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 566 ret = 0; 567 break; 568 case 0x0: 569 if (pr != 0) { 570 /* Raise Zone protection fault. */ 571 env->spr[SPR_40x_ESR] = 1 << 22; 572 ctx->prot = 0; 573 ret = -2; 574 break; 575 } 576 /* fall through */ 577 case 0x1: 578 check_perms: 579 /* Check from TLB entry */ 580 ctx->prot = tlb->prot; 581 ret = check_prot(ctx->prot, access_type); 582 if (ret == -2) { 583 env->spr[SPR_40x_ESR] = 0; 584 } 585 break; 586 } 587 if (ret >= 0) { 588 ctx->raddr = raddr; 589 qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx 590 " => " HWADDR_FMT_plx 591 " %d %d\n", __func__, address, ctx->raddr, ctx->prot, 592 ret); 593 return 0; 594 } 595 } 596 qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx 597 " => " HWADDR_FMT_plx 598 " %d %d\n", __func__, address, raddr, ctx->prot, ret); 599 600 return ret; 601 } 602 603 static bool mmubooke_check_pid(CPUPPCState *env, ppcemb_tlb_t *tlb, 604 hwaddr *raddr, target_ulong addr, int i) 605 { 606 if (ppcemb_tlb_check(env, tlb, raddr, addr, env->spr[SPR_BOOKE_PID], i)) { 607 if (!env->nb_pids) { 608 /* Extend the physical address to 36 bits */ 609 *raddr |= (uint64_t)(tlb->RPN & 0xF) << 32; 610 } 611 return true; 612 } else if (!env->nb_pids) { 613 return false; 614 } 615 if (env->spr[SPR_BOOKE_PID1] && 616 ppcemb_tlb_check(env, tlb, raddr, addr, env->spr[SPR_BOOKE_PID1], i)) { 617 return true; 618 } 619 if (env->spr[SPR_BOOKE_PID2] && 620 ppcemb_tlb_check(env, tlb, raddr, addr, env->spr[SPR_BOOKE_PID2], i)) { 621 return true; 622 } 623 return false; 624 } 625 626 static int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb, 627 hwaddr *raddr, int *prot, target_ulong address, 628 MMUAccessType access_type, int i) 629 { 630 int prot2; 631 632 if (!mmubooke_check_pid(env, tlb, raddr, address, i)) { 633 qemu_log_mask(CPU_LOG_MMU, "%s: TLB entry not found\n", __func__); 634 return -1; 635 } 636 637 if (FIELD_EX64(env->msr, MSR, PR)) { 638 prot2 = tlb->prot & 0xF; 639 } else { 640 prot2 = (tlb->prot >> 4) & 0xF; 641 } 642 643 /* Check the address space */ 644 if ((access_type == MMU_INST_FETCH ? 645 FIELD_EX64(env->msr, MSR, IR) : 646 FIELD_EX64(env->msr, MSR, DR)) != (tlb->attr & 1)) { 647 qemu_log_mask(CPU_LOG_MMU, "%s: AS doesn't match\n", __func__); 648 return -1; 649 } 650 651 *prot = prot2; 652 if (prot2 & prot_for_access_type(access_type)) { 653 qemu_log_mask(CPU_LOG_MMU, "%s: good TLB!\n", __func__); 654 return 0; 655 } 656 657 qemu_log_mask(CPU_LOG_MMU, "%s: no prot match: %x\n", __func__, prot2); 658 return access_type == MMU_INST_FETCH ? -3 : -2; 659 } 660 661 static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 662 target_ulong address, 663 MMUAccessType access_type) 664 { 665 ppcemb_tlb_t *tlb; 666 hwaddr raddr; 667 int i, ret; 668 669 ret = -1; 670 raddr = (hwaddr)-1ULL; 671 for (i = 0; i < env->nb_tlb; i++) { 672 tlb = &env->tlb.tlbe[i]; 673 ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address, 674 access_type, i); 675 if (ret != -1) { 676 break; 677 } 678 } 679 680 if (ret >= 0) { 681 ctx->raddr = raddr; 682 qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx 683 " => " HWADDR_FMT_plx " %d %d\n", __func__, 684 address, ctx->raddr, ctx->prot, ret); 685 } else { 686 qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx 687 " => " HWADDR_FMT_plx " %d %d\n", __func__, 688 address, raddr, ctx->prot, ret); 689 } 690 691 return ret; 692 } 693 694 hwaddr booke206_tlb_to_page_size(CPUPPCState *env, ppcmas_tlb_t *tlb) 695 { 696 int tlbm_size; 697 698 tlbm_size = (tlb->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 699 700 return 1024ULL << tlbm_size; 701 } 702 703 /* TLB check function for MAS based SoftTLBs */ 704 int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb, hwaddr *raddrp, 705 target_ulong address, uint32_t pid) 706 { 707 hwaddr mask; 708 uint32_t tlb_pid; 709 710 if (!FIELD_EX64(env->msr, MSR, CM)) { 711 /* In 32bit mode we can only address 32bit EAs */ 712 address = (uint32_t)address; 713 } 714 715 /* Check valid flag */ 716 if (!(tlb->mas1 & MAS1_VALID)) { 717 return -1; 718 } 719 720 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); 721 qemu_log_mask(CPU_LOG_MMU, "%s: TLB ADDR=0x" TARGET_FMT_lx 722 " PID=0x%x MAS1=0x%x MAS2=0x%" PRIx64 " mask=0x%" 723 HWADDR_PRIx " MAS7_3=0x%" PRIx64 " MAS8=0x%" PRIx32 "\n", 724 __func__, address, pid, tlb->mas1, tlb->mas2, mask, 725 tlb->mas7_3, tlb->mas8); 726 727 /* Check PID */ 728 tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT; 729 if (tlb_pid != 0 && tlb_pid != pid) { 730 return -1; 731 } 732 733 /* Check effective address */ 734 if ((address & mask) != (tlb->mas2 & MAS2_EPN_MASK)) { 735 return -1; 736 } 737 738 if (raddrp) { 739 *raddrp = (tlb->mas7_3 & mask) | (address & ~mask); 740 } 741 742 return 0; 743 } 744 745 static bool is_epid_mmu(int mmu_idx) 746 { 747 return mmu_idx == PPC_TLB_EPID_STORE || mmu_idx == PPC_TLB_EPID_LOAD; 748 } 749 750 static uint32_t mmubooke206_esr(int mmu_idx, MMUAccessType access_type) 751 { 752 uint32_t esr = 0; 753 if (access_type == MMU_DATA_STORE) { 754 esr |= ESR_ST; 755 } 756 if (is_epid_mmu(mmu_idx)) { 757 esr |= ESR_EPID; 758 } 759 return esr; 760 } 761 762 /* 763 * Get EPID register given the mmu_idx. If this is regular load, 764 * construct the EPID access bits from current processor state 765 * 766 * Get the effective AS and PR bits and the PID. The PID is returned 767 * only if EPID load is requested, otherwise the caller must detect 768 * the correct EPID. Return true if valid EPID is returned. 769 */ 770 static bool mmubooke206_get_as(CPUPPCState *env, 771 int mmu_idx, uint32_t *epid_out, 772 bool *as_out, bool *pr_out) 773 { 774 if (is_epid_mmu(mmu_idx)) { 775 uint32_t epidr; 776 if (mmu_idx == PPC_TLB_EPID_STORE) { 777 epidr = env->spr[SPR_BOOKE_EPSC]; 778 } else { 779 epidr = env->spr[SPR_BOOKE_EPLC]; 780 } 781 *epid_out = (epidr & EPID_EPID) >> EPID_EPID_SHIFT; 782 *as_out = !!(epidr & EPID_EAS); 783 *pr_out = !!(epidr & EPID_EPR); 784 return true; 785 } else { 786 *as_out = FIELD_EX64(env->msr, MSR, DS); 787 *pr_out = FIELD_EX64(env->msr, MSR, PR); 788 return false; 789 } 790 } 791 792 /* Check if the tlb found by hashing really matches */ 793 static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb, 794 hwaddr *raddr, int *prot, 795 target_ulong address, 796 MMUAccessType access_type, int mmu_idx) 797 { 798 int prot2 = 0; 799 uint32_t epid; 800 bool as, pr; 801 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); 802 803 if (!use_epid) { 804 if (ppcmas_tlb_check(env, tlb, raddr, address, 805 env->spr[SPR_BOOKE_PID]) >= 0) { 806 goto found_tlb; 807 } 808 809 if (env->spr[SPR_BOOKE_PID1] && 810 ppcmas_tlb_check(env, tlb, raddr, address, 811 env->spr[SPR_BOOKE_PID1]) >= 0) { 812 goto found_tlb; 813 } 814 815 if (env->spr[SPR_BOOKE_PID2] && 816 ppcmas_tlb_check(env, tlb, raddr, address, 817 env->spr[SPR_BOOKE_PID2]) >= 0) { 818 goto found_tlb; 819 } 820 } else { 821 if (ppcmas_tlb_check(env, tlb, raddr, address, epid) >= 0) { 822 goto found_tlb; 823 } 824 } 825 826 qemu_log_mask(CPU_LOG_MMU, "%s: No TLB entry found for effective address " 827 "0x" TARGET_FMT_lx "\n", __func__, address); 828 return -1; 829 830 found_tlb: 831 832 if (pr) { 833 if (tlb->mas7_3 & MAS3_UR) { 834 prot2 |= PAGE_READ; 835 } 836 if (tlb->mas7_3 & MAS3_UW) { 837 prot2 |= PAGE_WRITE; 838 } 839 if (tlb->mas7_3 & MAS3_UX) { 840 prot2 |= PAGE_EXEC; 841 } 842 } else { 843 if (tlb->mas7_3 & MAS3_SR) { 844 prot2 |= PAGE_READ; 845 } 846 if (tlb->mas7_3 & MAS3_SW) { 847 prot2 |= PAGE_WRITE; 848 } 849 if (tlb->mas7_3 & MAS3_SX) { 850 prot2 |= PAGE_EXEC; 851 } 852 } 853 854 /* Check the address space and permissions */ 855 if (access_type == MMU_INST_FETCH) { 856 /* There is no way to fetch code using epid load */ 857 assert(!use_epid); 858 as = FIELD_EX64(env->msr, MSR, IR); 859 } 860 861 if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { 862 qemu_log_mask(CPU_LOG_MMU, "%s: AS doesn't match\n", __func__); 863 return -1; 864 } 865 866 *prot = prot2; 867 if (prot2 & prot_for_access_type(access_type)) { 868 qemu_log_mask(CPU_LOG_MMU, "%s: good TLB!\n", __func__); 869 return 0; 870 } 871 872 qemu_log_mask(CPU_LOG_MMU, "%s: no prot match: %x\n", __func__, prot2); 873 return access_type == MMU_INST_FETCH ? -3 : -2; 874 } 875 876 static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 877 target_ulong address, 878 MMUAccessType access_type, 879 int mmu_idx) 880 { 881 ppcmas_tlb_t *tlb; 882 hwaddr raddr; 883 int i, j, ret; 884 885 ret = -1; 886 raddr = (hwaddr)-1ULL; 887 888 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 889 int ways = booke206_tlb_ways(env, i); 890 891 for (j = 0; j < ways; j++) { 892 tlb = booke206_get_tlbm(env, i, address, j); 893 if (!tlb) { 894 continue; 895 } 896 ret = mmubooke206_check_tlb(env, tlb, &raddr, &ctx->prot, address, 897 access_type, mmu_idx); 898 if (ret != -1) { 899 goto found_tlb; 900 } 901 } 902 } 903 904 found_tlb: 905 906 if (ret >= 0) { 907 ctx->raddr = raddr; 908 qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx 909 " => " HWADDR_FMT_plx " %d %d\n", __func__, address, 910 ctx->raddr, ctx->prot, ret); 911 } else { 912 qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx 913 " => " HWADDR_FMT_plx " %d %d\n", __func__, address, 914 raddr, ctx->prot, ret); 915 } 916 917 return ret; 918 } 919 920 static const char *book3e_tsize_to_str[32] = { 921 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", 922 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", 923 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G", 924 "1T", "2T" 925 }; 926 927 static void mmubooke_dump_mmu(CPUPPCState *env) 928 { 929 ppcemb_tlb_t *entry; 930 int i; 931 932 #ifdef CONFIG_KVM 933 if (kvm_enabled() && !env->kvm_sw_tlb) { 934 qemu_printf("Cannot access KVM TLB\n"); 935 return; 936 } 937 #endif 938 939 qemu_printf("\nTLB:\n"); 940 qemu_printf("Effective Physical Size PID Prot " 941 "Attr\n"); 942 943 entry = &env->tlb.tlbe[0]; 944 for (i = 0; i < env->nb_tlb; i++, entry++) { 945 hwaddr ea, pa; 946 target_ulong mask; 947 uint64_t size = (uint64_t)entry->size; 948 char size_buf[20]; 949 950 /* Check valid flag */ 951 if (!(entry->prot & PAGE_VALID)) { 952 continue; 953 } 954 955 mask = ~(entry->size - 1); 956 ea = entry->EPN & mask; 957 pa = entry->RPN & mask; 958 /* Extend the physical address to 36 bits */ 959 pa |= (hwaddr)(entry->RPN & 0xF) << 32; 960 if (size >= 1 * MiB) { 961 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "M", size / MiB); 962 } else { 963 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "k", size / KiB); 964 } 965 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n", 966 (uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID, 967 entry->prot, entry->attr); 968 } 969 970 } 971 972 static void mmubooke206_dump_one_tlb(CPUPPCState *env, int tlbn, int offset, 973 int tlbsize) 974 { 975 ppcmas_tlb_t *entry; 976 int i; 977 978 qemu_printf("\nTLB%d:\n", tlbn); 979 qemu_printf("Effective Physical Size TID TS SRWX" 980 " URWX WIMGE U0123\n"); 981 982 entry = &env->tlb.tlbm[offset]; 983 for (i = 0; i < tlbsize; i++, entry++) { 984 hwaddr ea, pa, size; 985 int tsize; 986 987 if (!(entry->mas1 & MAS1_VALID)) { 988 continue; 989 } 990 991 tsize = (entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 992 size = 1024ULL << tsize; 993 ea = entry->mas2 & ~(size - 1); 994 pa = entry->mas7_3 & ~(size - 1); 995 996 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c" 997 " U%c%c%c %c%c%c%c%c U%c%c%c%c\n", 998 (uint64_t)ea, (uint64_t)pa, 999 book3e_tsize_to_str[tsize], 1000 (entry->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT, 1001 (entry->mas1 & MAS1_TS) >> MAS1_TS_SHIFT, 1002 entry->mas7_3 & MAS3_SR ? 'R' : '-', 1003 entry->mas7_3 & MAS3_SW ? 'W' : '-', 1004 entry->mas7_3 & MAS3_SX ? 'X' : '-', 1005 entry->mas7_3 & MAS3_UR ? 'R' : '-', 1006 entry->mas7_3 & MAS3_UW ? 'W' : '-', 1007 entry->mas7_3 & MAS3_UX ? 'X' : '-', 1008 entry->mas2 & MAS2_W ? 'W' : '-', 1009 entry->mas2 & MAS2_I ? 'I' : '-', 1010 entry->mas2 & MAS2_M ? 'M' : '-', 1011 entry->mas2 & MAS2_G ? 'G' : '-', 1012 entry->mas2 & MAS2_E ? 'E' : '-', 1013 entry->mas7_3 & MAS3_U0 ? '0' : '-', 1014 entry->mas7_3 & MAS3_U1 ? '1' : '-', 1015 entry->mas7_3 & MAS3_U2 ? '2' : '-', 1016 entry->mas7_3 & MAS3_U3 ? '3' : '-'); 1017 } 1018 } 1019 1020 static void mmubooke206_dump_mmu(CPUPPCState *env) 1021 { 1022 int offset = 0; 1023 int i; 1024 1025 #ifdef CONFIG_KVM 1026 if (kvm_enabled() && !env->kvm_sw_tlb) { 1027 qemu_printf("Cannot access KVM TLB\n"); 1028 return; 1029 } 1030 #endif 1031 1032 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 1033 int size = booke206_tlb_size(env, i); 1034 1035 if (size == 0) { 1036 continue; 1037 } 1038 1039 mmubooke206_dump_one_tlb(env, i, offset, size); 1040 offset += size; 1041 } 1042 } 1043 1044 static void mmu6xx_dump_BATs(CPUPPCState *env, int type) 1045 { 1046 target_ulong *BATlt, *BATut, *BATu, *BATl; 1047 target_ulong BEPIl, BEPIu, bl; 1048 int i; 1049 1050 switch (type) { 1051 case ACCESS_CODE: 1052 BATlt = env->IBAT[1]; 1053 BATut = env->IBAT[0]; 1054 break; 1055 default: 1056 BATlt = env->DBAT[1]; 1057 BATut = env->DBAT[0]; 1058 break; 1059 } 1060 1061 for (i = 0; i < env->nb_BATs; i++) { 1062 BATu = &BATut[i]; 1063 BATl = &BATlt[i]; 1064 BEPIu = *BATu & 0xF0000000; 1065 BEPIl = *BATu & 0x0FFE0000; 1066 bl = (*BATu & 0x00001FFC) << 15; 1067 qemu_printf("%s BAT%d BATu " TARGET_FMT_lx 1068 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " 1069 TARGET_FMT_lx " " TARGET_FMT_lx "\n", 1070 type == ACCESS_CODE ? "code" : "data", i, 1071 *BATu, *BATl, BEPIu, BEPIl, bl); 1072 } 1073 } 1074 1075 static void mmu6xx_dump_mmu(CPUPPCState *env) 1076 { 1077 PowerPCCPU *cpu = env_archcpu(env); 1078 ppc6xx_tlb_t *tlb; 1079 target_ulong sr; 1080 int type, way, entry, i; 1081 1082 qemu_printf("HTAB base = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_base(cpu)); 1083 qemu_printf("HTAB mask = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_mask(cpu)); 1084 1085 qemu_printf("\nSegment registers:\n"); 1086 for (i = 0; i < 32; i++) { 1087 sr = env->sr[i]; 1088 if (sr & 0x80000000) { 1089 qemu_printf("%02d T=%d Ks=%d Kp=%d BUID=0x%03x " 1090 "CNTLR_SPEC=0x%05x\n", i, 1091 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, 1092 sr & 0x20000000 ? 1 : 0, (uint32_t)((sr >> 20) & 0x1FF), 1093 (uint32_t)(sr & 0xFFFFF)); 1094 } else { 1095 qemu_printf("%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i, 1096 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, 1097 sr & 0x20000000 ? 1 : 0, sr & 0x10000000 ? 1 : 0, 1098 (uint32_t)(sr & 0x00FFFFFF)); 1099 } 1100 } 1101 1102 qemu_printf("\nBATs:\n"); 1103 mmu6xx_dump_BATs(env, ACCESS_INT); 1104 mmu6xx_dump_BATs(env, ACCESS_CODE); 1105 1106 if (env->id_tlbs != 1) { 1107 qemu_printf("ERROR: 6xx MMU should have separated TLB" 1108 " for code and data\n"); 1109 } 1110 1111 qemu_printf("\nTLBs [EPN EPN + SIZE]\n"); 1112 1113 for (type = 0; type < 2; type++) { 1114 for (way = 0; way < env->nb_ways; way++) { 1115 for (entry = env->nb_tlb * type + env->tlb_per_way * way; 1116 entry < (env->nb_tlb * type + env->tlb_per_way * (way + 1)); 1117 entry++) { 1118 1119 tlb = &env->tlb.tlb6[entry]; 1120 qemu_printf("%s TLB %02d/%02d way:%d %s [" 1121 TARGET_FMT_lx " " TARGET_FMT_lx "]\n", 1122 type ? "code" : "data", entry % env->nb_tlb, 1123 env->nb_tlb, way, 1124 pte_is_valid(tlb->pte0) ? "valid" : "inval", 1125 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE); 1126 } 1127 } 1128 } 1129 } 1130 1131 void dump_mmu(CPUPPCState *env) 1132 { 1133 switch (env->mmu_model) { 1134 case POWERPC_MMU_BOOKE: 1135 mmubooke_dump_mmu(env); 1136 break; 1137 case POWERPC_MMU_BOOKE206: 1138 mmubooke206_dump_mmu(env); 1139 break; 1140 case POWERPC_MMU_SOFT_6xx: 1141 mmu6xx_dump_mmu(env); 1142 break; 1143 #if defined(TARGET_PPC64) 1144 case POWERPC_MMU_64B: 1145 case POWERPC_MMU_2_03: 1146 case POWERPC_MMU_2_06: 1147 case POWERPC_MMU_2_07: 1148 dump_slb(env_archcpu(env)); 1149 break; 1150 case POWERPC_MMU_3_00: 1151 if (ppc64_v3_radix(env_archcpu(env))) { 1152 qemu_log_mask(LOG_UNIMP, "%s: the PPC64 MMU is unsupported\n", 1153 __func__); 1154 } else { 1155 dump_slb(env_archcpu(env)); 1156 } 1157 break; 1158 #endif 1159 default: 1160 qemu_log_mask(LOG_UNIMP, "%s: unimplemented\n", __func__); 1161 } 1162 } 1163 1164 static int check_physical(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr, 1165 MMUAccessType access_type) 1166 { 1167 ctx->raddr = eaddr; 1168 ctx->prot = PAGE_READ | PAGE_EXEC; 1169 1170 switch (env->mmu_model) { 1171 case POWERPC_MMU_SOFT_6xx: 1172 case POWERPC_MMU_SOFT_4xx: 1173 case POWERPC_MMU_REAL: 1174 case POWERPC_MMU_BOOKE: 1175 ctx->prot |= PAGE_WRITE; 1176 break; 1177 1178 default: 1179 /* Caller's checks mean we should never get here for other models */ 1180 g_assert_not_reached(); 1181 } 1182 1183 return 0; 1184 } 1185 1186 int get_physical_address_wtlb(CPUPPCState *env, mmu_ctx_t *ctx, 1187 target_ulong eaddr, 1188 MMUAccessType access_type, int type, 1189 int mmu_idx) 1190 { 1191 int ret = -1; 1192 bool real_mode = (type == ACCESS_CODE && !FIELD_EX64(env->msr, MSR, IR)) || 1193 (type != ACCESS_CODE && !FIELD_EX64(env->msr, MSR, DR)); 1194 1195 switch (env->mmu_model) { 1196 case POWERPC_MMU_SOFT_6xx: 1197 if (real_mode) { 1198 ret = check_physical(env, ctx, eaddr, access_type); 1199 } else { 1200 /* Try to find a BAT */ 1201 if (env->nb_BATs != 0) { 1202 ret = get_bat_6xx_tlb(env, ctx, eaddr, access_type); 1203 } 1204 if (ret < 0) { 1205 /* We didn't match any BAT entry or don't have BATs */ 1206 ret = get_segment_6xx_tlb(env, ctx, eaddr, access_type, type); 1207 } 1208 } 1209 break; 1210 1211 case POWERPC_MMU_SOFT_4xx: 1212 if (real_mode) { 1213 ret = check_physical(env, ctx, eaddr, access_type); 1214 } else { 1215 ret = mmu40x_get_physical_address(env, ctx, eaddr, access_type); 1216 } 1217 break; 1218 case POWERPC_MMU_BOOKE: 1219 ret = mmubooke_get_physical_address(env, ctx, eaddr, access_type); 1220 break; 1221 case POWERPC_MMU_BOOKE206: 1222 ret = mmubooke206_get_physical_address(env, ctx, eaddr, access_type, 1223 mmu_idx); 1224 break; 1225 case POWERPC_MMU_MPC8xx: 1226 /* XXX: TODO */ 1227 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n"); 1228 break; 1229 case POWERPC_MMU_REAL: 1230 if (real_mode) { 1231 ret = check_physical(env, ctx, eaddr, access_type); 1232 } else { 1233 cpu_abort(env_cpu(env), 1234 "PowerPC in real mode do not do any translation\n"); 1235 } 1236 return -1; 1237 default: 1238 cpu_abort(env_cpu(env), "Unknown or invalid MMU model\n"); 1239 return -1; 1240 } 1241 1242 return ret; 1243 } 1244 1245 static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address, 1246 MMUAccessType access_type, int mmu_idx) 1247 { 1248 uint32_t epid; 1249 bool as, pr; 1250 uint32_t missed_tid = 0; 1251 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); 1252 1253 if (access_type == MMU_INST_FETCH) { 1254 as = FIELD_EX64(env->msr, MSR, IR); 1255 } 1256 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; 1257 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; 1258 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; 1259 env->spr[SPR_BOOKE_MAS3] = 0; 1260 env->spr[SPR_BOOKE_MAS6] = 0; 1261 env->spr[SPR_BOOKE_MAS7] = 0; 1262 1263 /* AS */ 1264 if (as) { 1265 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; 1266 env->spr[SPR_BOOKE_MAS6] |= MAS6_SAS; 1267 } 1268 1269 env->spr[SPR_BOOKE_MAS1] |= MAS1_VALID; 1270 env->spr[SPR_BOOKE_MAS2] |= address & MAS2_EPN_MASK; 1271 1272 if (!use_epid) { 1273 switch (env->spr[SPR_BOOKE_MAS4] & MAS4_TIDSELD_PIDZ) { 1274 case MAS4_TIDSELD_PID0: 1275 missed_tid = env->spr[SPR_BOOKE_PID]; 1276 break; 1277 case MAS4_TIDSELD_PID1: 1278 missed_tid = env->spr[SPR_BOOKE_PID1]; 1279 break; 1280 case MAS4_TIDSELD_PID2: 1281 missed_tid = env->spr[SPR_BOOKE_PID2]; 1282 break; 1283 } 1284 env->spr[SPR_BOOKE_MAS6] |= env->spr[SPR_BOOKE_PID] << 16; 1285 } else { 1286 missed_tid = epid; 1287 env->spr[SPR_BOOKE_MAS6] |= missed_tid << 16; 1288 } 1289 env->spr[SPR_BOOKE_MAS1] |= (missed_tid << MAS1_TID_SHIFT); 1290 1291 1292 /* next victim logic */ 1293 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; 1294 env->last_way++; 1295 env->last_way &= booke206_tlb_ways(env, 0) - 1; 1296 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; 1297 } 1298 1299 /* Perform address translation */ 1300 /* TODO: Split this by mmu_model. */ 1301 static bool ppc_jumbo_xlate(PowerPCCPU *cpu, vaddr eaddr, 1302 MMUAccessType access_type, 1303 hwaddr *raddrp, int *psizep, int *protp, 1304 int mmu_idx, bool guest_visible) 1305 { 1306 CPUState *cs = CPU(cpu); 1307 CPUPPCState *env = &cpu->env; 1308 mmu_ctx_t ctx; 1309 int type; 1310 int ret; 1311 1312 if (access_type == MMU_INST_FETCH) { 1313 /* code access */ 1314 type = ACCESS_CODE; 1315 } else if (guest_visible) { 1316 /* data access */ 1317 type = env->access_type; 1318 } else { 1319 type = ACCESS_INT; 1320 } 1321 1322 ret = get_physical_address_wtlb(env, &ctx, eaddr, access_type, 1323 type, mmu_idx); 1324 if (ret == 0) { 1325 *raddrp = ctx.raddr; 1326 *protp = ctx.prot; 1327 *psizep = TARGET_PAGE_BITS; 1328 return true; 1329 } 1330 1331 if (guest_visible) { 1332 log_cpu_state_mask(CPU_LOG_MMU, cs, 0); 1333 if (type == ACCESS_CODE) { 1334 switch (ret) { 1335 case -1: 1336 /* No matches in page tables or TLB */ 1337 switch (env->mmu_model) { 1338 case POWERPC_MMU_SOFT_6xx: 1339 cs->exception_index = POWERPC_EXCP_IFTLB; 1340 env->error_code = 1 << 18; 1341 env->spr[SPR_IMISS] = eaddr; 1342 env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem; 1343 goto tlb_miss; 1344 case POWERPC_MMU_SOFT_4xx: 1345 cs->exception_index = POWERPC_EXCP_ITLB; 1346 env->error_code = 0; 1347 env->spr[SPR_40x_DEAR] = eaddr; 1348 env->spr[SPR_40x_ESR] = 0x00000000; 1349 break; 1350 case POWERPC_MMU_BOOKE206: 1351 booke206_update_mas_tlb_miss(env, eaddr, 2, mmu_idx); 1352 /* fall through */ 1353 case POWERPC_MMU_BOOKE: 1354 cs->exception_index = POWERPC_EXCP_ITLB; 1355 env->error_code = 0; 1356 env->spr[SPR_BOOKE_DEAR] = eaddr; 1357 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, MMU_DATA_LOAD); 1358 break; 1359 case POWERPC_MMU_MPC8xx: 1360 cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); 1361 case POWERPC_MMU_REAL: 1362 cpu_abort(cs, "PowerPC in real mode should never raise " 1363 "any MMU exceptions\n"); 1364 default: 1365 cpu_abort(cs, "Unknown or invalid MMU model\n"); 1366 } 1367 break; 1368 case -2: 1369 /* Access rights violation */ 1370 cs->exception_index = POWERPC_EXCP_ISI; 1371 if ((env->mmu_model == POWERPC_MMU_BOOKE) || 1372 (env->mmu_model == POWERPC_MMU_BOOKE206)) { 1373 env->error_code = 0; 1374 } else { 1375 env->error_code = 0x08000000; 1376 } 1377 break; 1378 case -3: 1379 /* No execute protection violation */ 1380 if ((env->mmu_model == POWERPC_MMU_BOOKE) || 1381 (env->mmu_model == POWERPC_MMU_BOOKE206)) { 1382 env->spr[SPR_BOOKE_ESR] = 0x00000000; 1383 env->error_code = 0; 1384 } else { 1385 env->error_code = 0x10000000; 1386 } 1387 cs->exception_index = POWERPC_EXCP_ISI; 1388 break; 1389 case -4: 1390 /* Direct store exception */ 1391 /* No code fetch is allowed in direct-store areas */ 1392 cs->exception_index = POWERPC_EXCP_ISI; 1393 if ((env->mmu_model == POWERPC_MMU_BOOKE) || 1394 (env->mmu_model == POWERPC_MMU_BOOKE206)) { 1395 env->error_code = 0; 1396 } else { 1397 env->error_code = 0x10000000; 1398 } 1399 break; 1400 } 1401 } else { 1402 switch (ret) { 1403 case -1: 1404 /* No matches in page tables or TLB */ 1405 switch (env->mmu_model) { 1406 case POWERPC_MMU_SOFT_6xx: 1407 if (access_type == MMU_DATA_STORE) { 1408 cs->exception_index = POWERPC_EXCP_DSTLB; 1409 env->error_code = 1 << 16; 1410 } else { 1411 cs->exception_index = POWERPC_EXCP_DLTLB; 1412 env->error_code = 0; 1413 } 1414 env->spr[SPR_DMISS] = eaddr; 1415 env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem; 1416 tlb_miss: 1417 env->error_code |= ctx.key << 19; 1418 env->spr[SPR_HASH1] = ppc_hash32_hpt_base(cpu) + 1419 get_pteg_offset32(cpu, ctx.hash[0]); 1420 env->spr[SPR_HASH2] = ppc_hash32_hpt_base(cpu) + 1421 get_pteg_offset32(cpu, ctx.hash[1]); 1422 break; 1423 case POWERPC_MMU_SOFT_4xx: 1424 cs->exception_index = POWERPC_EXCP_DTLB; 1425 env->error_code = 0; 1426 env->spr[SPR_40x_DEAR] = eaddr; 1427 if (access_type == MMU_DATA_STORE) { 1428 env->spr[SPR_40x_ESR] = 0x00800000; 1429 } else { 1430 env->spr[SPR_40x_ESR] = 0x00000000; 1431 } 1432 break; 1433 case POWERPC_MMU_MPC8xx: 1434 /* XXX: TODO */ 1435 cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); 1436 case POWERPC_MMU_BOOKE206: 1437 booke206_update_mas_tlb_miss(env, eaddr, access_type, mmu_idx); 1438 /* fall through */ 1439 case POWERPC_MMU_BOOKE: 1440 cs->exception_index = POWERPC_EXCP_DTLB; 1441 env->error_code = 0; 1442 env->spr[SPR_BOOKE_DEAR] = eaddr; 1443 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); 1444 break; 1445 case POWERPC_MMU_REAL: 1446 cpu_abort(cs, "PowerPC in real mode should never raise " 1447 "any MMU exceptions\n"); 1448 default: 1449 cpu_abort(cs, "Unknown or invalid MMU model\n"); 1450 } 1451 break; 1452 case -2: 1453 /* Access rights violation */ 1454 cs->exception_index = POWERPC_EXCP_DSI; 1455 env->error_code = 0; 1456 if (env->mmu_model == POWERPC_MMU_SOFT_4xx) { 1457 env->spr[SPR_40x_DEAR] = eaddr; 1458 if (access_type == MMU_DATA_STORE) { 1459 env->spr[SPR_40x_ESR] |= 0x00800000; 1460 } 1461 } else if ((env->mmu_model == POWERPC_MMU_BOOKE) || 1462 (env->mmu_model == POWERPC_MMU_BOOKE206)) { 1463 env->spr[SPR_BOOKE_DEAR] = eaddr; 1464 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); 1465 } else { 1466 env->spr[SPR_DAR] = eaddr; 1467 if (access_type == MMU_DATA_STORE) { 1468 env->spr[SPR_DSISR] = 0x0A000000; 1469 } else { 1470 env->spr[SPR_DSISR] = 0x08000000; 1471 } 1472 } 1473 break; 1474 case -4: 1475 /* Direct store exception */ 1476 switch (type) { 1477 case ACCESS_FLOAT: 1478 /* Floating point load/store */ 1479 cs->exception_index = POWERPC_EXCP_ALIGN; 1480 env->error_code = POWERPC_EXCP_ALIGN_FP; 1481 env->spr[SPR_DAR] = eaddr; 1482 break; 1483 case ACCESS_RES: 1484 /* lwarx, ldarx or stwcx. */ 1485 cs->exception_index = POWERPC_EXCP_DSI; 1486 env->error_code = 0; 1487 env->spr[SPR_DAR] = eaddr; 1488 if (access_type == MMU_DATA_STORE) { 1489 env->spr[SPR_DSISR] = 0x06000000; 1490 } else { 1491 env->spr[SPR_DSISR] = 0x04000000; 1492 } 1493 break; 1494 case ACCESS_EXT: 1495 /* eciwx or ecowx */ 1496 cs->exception_index = POWERPC_EXCP_DSI; 1497 env->error_code = 0; 1498 env->spr[SPR_DAR] = eaddr; 1499 if (access_type == MMU_DATA_STORE) { 1500 env->spr[SPR_DSISR] = 0x06100000; 1501 } else { 1502 env->spr[SPR_DSISR] = 0x04100000; 1503 } 1504 break; 1505 default: 1506 printf("DSI: invalid exception (%d)\n", ret); 1507 cs->exception_index = POWERPC_EXCP_PROGRAM; 1508 env->error_code = 1509 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL; 1510 env->spr[SPR_DAR] = eaddr; 1511 break; 1512 } 1513 break; 1514 } 1515 } 1516 } 1517 return false; 1518 } 1519 1520 /*****************************************************************************/ 1521 1522 bool ppc_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, 1523 hwaddr *raddrp, int *psizep, int *protp, 1524 int mmu_idx, bool guest_visible) 1525 { 1526 switch (cpu->env.mmu_model) { 1527 #if defined(TARGET_PPC64) 1528 case POWERPC_MMU_3_00: 1529 if (ppc64_v3_radix(cpu)) { 1530 return ppc_radix64_xlate(cpu, eaddr, access_type, raddrp, 1531 psizep, protp, mmu_idx, guest_visible); 1532 } 1533 /* fall through */ 1534 case POWERPC_MMU_64B: 1535 case POWERPC_MMU_2_03: 1536 case POWERPC_MMU_2_06: 1537 case POWERPC_MMU_2_07: 1538 return ppc_hash64_xlate(cpu, eaddr, access_type, 1539 raddrp, psizep, protp, mmu_idx, guest_visible); 1540 #endif 1541 1542 case POWERPC_MMU_32B: 1543 return ppc_hash32_xlate(cpu, eaddr, access_type, raddrp, 1544 psizep, protp, mmu_idx, guest_visible); 1545 1546 default: 1547 return ppc_jumbo_xlate(cpu, eaddr, access_type, raddrp, 1548 psizep, protp, mmu_idx, guest_visible); 1549 } 1550 } 1551 1552 hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 1553 { 1554 PowerPCCPU *cpu = POWERPC_CPU(cs); 1555 hwaddr raddr; 1556 int s, p; 1557 1558 /* 1559 * Some MMUs have separate TLBs for code and data. If we only 1560 * try an MMU_DATA_LOAD, we may not be able to read instructions 1561 * mapped by code TLBs, so we also try a MMU_INST_FETCH. 1562 */ 1563 if (ppc_xlate(cpu, addr, MMU_DATA_LOAD, &raddr, &s, &p, 1564 cpu_mmu_index(&cpu->env, false), false) || 1565 ppc_xlate(cpu, addr, MMU_INST_FETCH, &raddr, &s, &p, 1566 cpu_mmu_index(&cpu->env, true), false)) { 1567 return raddr & TARGET_PAGE_MASK; 1568 } 1569 return -1; 1570 } 1571