1 /* 2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "cpu.h" 23 #include "sysemu/kvm.h" 24 #include "kvm_ppc.h" 25 #include "mmu-hash64.h" 26 #include "mmu-hash32.h" 27 #include "exec/exec-all.h" 28 #include "exec/log.h" 29 #include "helper_regs.h" 30 #include "qemu/error-report.h" 31 #include "qemu/main-loop.h" 32 #include "qemu/qemu-print.h" 33 #include "internal.h" 34 #include "mmu-book3s-v3.h" 35 #include "mmu-radix64.h" 36 37 /* #define DUMP_PAGE_TABLES */ 38 39 void ppc_store_sdr1(CPUPPCState *env, target_ulong value) 40 { 41 PowerPCCPU *cpu = env_archcpu(env); 42 qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value); 43 assert(!cpu->env.has_hv_mode || !cpu->vhyp); 44 #if defined(TARGET_PPC64) 45 if (mmu_is_64bit(env->mmu_model)) { 46 target_ulong sdr_mask = SDR_64_HTABORG | SDR_64_HTABSIZE; 47 target_ulong htabsize = value & SDR_64_HTABSIZE; 48 49 if (value & ~sdr_mask) { 50 qemu_log_mask(LOG_GUEST_ERROR, "Invalid bits 0x"TARGET_FMT_lx 51 " set in SDR1", value & ~sdr_mask); 52 value &= sdr_mask; 53 } 54 if (htabsize > 28) { 55 qemu_log_mask(LOG_GUEST_ERROR, "Invalid HTABSIZE 0x" TARGET_FMT_lx 56 " stored in SDR1", htabsize); 57 return; 58 } 59 } 60 #endif /* defined(TARGET_PPC64) */ 61 /* FIXME: Should check for valid HTABMASK values in 32-bit case */ 62 env->spr[SPR_SDR1] = value; 63 } 64 65 /*****************************************************************************/ 66 /* PowerPC MMU emulation */ 67 68 static int pp_check(int key, int pp, int nx) 69 { 70 int access; 71 72 /* Compute access rights */ 73 access = 0; 74 if (key == 0) { 75 switch (pp) { 76 case 0x0: 77 case 0x1: 78 case 0x2: 79 access |= PAGE_WRITE; 80 /* fall through */ 81 case 0x3: 82 access |= PAGE_READ; 83 break; 84 } 85 } else { 86 switch (pp) { 87 case 0x0: 88 access = 0; 89 break; 90 case 0x1: 91 case 0x3: 92 access = PAGE_READ; 93 break; 94 case 0x2: 95 access = PAGE_READ | PAGE_WRITE; 96 break; 97 } 98 } 99 if (nx == 0) { 100 access |= PAGE_EXEC; 101 } 102 103 return access; 104 } 105 106 static int check_prot(int prot, MMUAccessType access_type) 107 { 108 return prot & prot_for_access_type(access_type) ? 0 : -2; 109 } 110 111 int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr, 112 int way, int is_code) 113 { 114 int nr; 115 116 /* Select TLB num in a way from address */ 117 nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1); 118 /* Select TLB way */ 119 nr += env->tlb_per_way * way; 120 /* 6xx have separate TLBs for instructions and data */ 121 if (is_code && env->id_tlbs == 1) { 122 nr += env->nb_tlb; 123 } 124 125 return nr; 126 } 127 128 static int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0, 129 target_ulong pte1, int h, 130 MMUAccessType access_type) 131 { 132 target_ulong ptem, mmask; 133 int access, ret, pteh, ptev, pp; 134 135 ret = -1; 136 /* Check validity and table match */ 137 ptev = pte_is_valid(pte0); 138 pteh = (pte0 >> 6) & 1; 139 if (ptev && h == pteh) { 140 /* Check vsid & api */ 141 ptem = pte0 & PTE_PTEM_MASK; 142 mmask = PTE_CHECK_MASK; 143 pp = pte1 & 0x00000003; 144 if (ptem == ctx->ptem) { 145 if (ctx->raddr != (hwaddr)-1ULL) { 146 /* all matches should have equal RPN, WIMG & PP */ 147 if ((ctx->raddr & mmask) != (pte1 & mmask)) { 148 qemu_log_mask(CPU_LOG_MMU, "Bad RPN/WIMG/PP\n"); 149 return -3; 150 } 151 } 152 /* Compute access rights */ 153 access = pp_check(ctx->key, pp, ctx->nx); 154 /* Keep the matching PTE information */ 155 ctx->raddr = pte1; 156 ctx->prot = access; 157 ret = check_prot(ctx->prot, access_type); 158 if (ret == 0) { 159 /* Access granted */ 160 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); 161 } else { 162 /* Access right violation */ 163 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); 164 } 165 } 166 } 167 168 return ret; 169 } 170 171 static int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p, 172 int ret, MMUAccessType access_type) 173 { 174 int store = 0; 175 176 /* Update page flags */ 177 if (!(*pte1p & 0x00000100)) { 178 /* Update accessed flag */ 179 *pte1p |= 0x00000100; 180 store = 1; 181 } 182 if (!(*pte1p & 0x00000080)) { 183 if (access_type == MMU_DATA_STORE && ret == 0) { 184 /* Update changed flag */ 185 *pte1p |= 0x00000080; 186 store = 1; 187 } else { 188 /* Force page fault for first write access */ 189 ctx->prot &= ~PAGE_WRITE; 190 } 191 } 192 193 return store; 194 } 195 196 /* Software driven TLB helpers */ 197 198 static int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx, 199 target_ulong eaddr, MMUAccessType access_type) 200 { 201 ppc6xx_tlb_t *tlb; 202 int nr, best, way; 203 int ret; 204 205 best = -1; 206 ret = -1; /* No TLB found */ 207 for (way = 0; way < env->nb_ways; way++) { 208 nr = ppc6xx_tlb_getnum(env, eaddr, way, access_type == MMU_INST_FETCH); 209 tlb = &env->tlb.tlb6[nr]; 210 /* This test "emulates" the PTE index match for hardware TLBs */ 211 if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) { 212 qemu_log_mask(CPU_LOG_MMU, "TLB %d/%d %s [" TARGET_FMT_lx 213 " " TARGET_FMT_lx "] <> " TARGET_FMT_lx "\n", 214 nr, env->nb_tlb, 215 pte_is_valid(tlb->pte0) ? "valid" : "inval", 216 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr); 217 continue; 218 } 219 qemu_log_mask(CPU_LOG_MMU, "TLB %d/%d %s " TARGET_FMT_lx " <> " 220 TARGET_FMT_lx " " TARGET_FMT_lx " %c %c\n", 221 nr, env->nb_tlb, 222 pte_is_valid(tlb->pte0) ? "valid" : "inval", 223 tlb->EPN, eaddr, tlb->pte1, 224 access_type == MMU_DATA_STORE ? 'S' : 'L', 225 access_type == MMU_INST_FETCH ? 'I' : 'D'); 226 switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1, 227 0, access_type)) { 228 case -3: 229 /* TLB inconsistency */ 230 return -1; 231 case -2: 232 /* Access violation */ 233 ret = -2; 234 best = nr; 235 break; 236 case -1: 237 default: 238 /* No match */ 239 break; 240 case 0: 241 /* access granted */ 242 /* 243 * XXX: we should go on looping to check all TLBs 244 * consistency but we can speed-up the whole thing as 245 * the result would be undefined if TLBs are not 246 * consistent. 247 */ 248 ret = 0; 249 best = nr; 250 goto done; 251 } 252 } 253 if (best != -1) { 254 done: 255 qemu_log_mask(CPU_LOG_MMU, "found TLB at addr " TARGET_FMT_plx 256 " prot=%01x ret=%d\n", 257 ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret); 258 /* Update page flags */ 259 pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, access_type); 260 } 261 262 return ret; 263 } 264 265 /* Perform BAT hit & translation */ 266 static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp, 267 int *validp, int *protp, target_ulong *BATu, 268 target_ulong *BATl) 269 { 270 target_ulong bl; 271 int pp, valid, prot; 272 273 bl = (*BATu & 0x00001FFC) << 15; 274 valid = 0; 275 prot = 0; 276 if (((msr_pr == 0) && (*BATu & 0x00000002)) || 277 ((msr_pr != 0) && (*BATu & 0x00000001))) { 278 valid = 1; 279 pp = *BATl & 0x00000003; 280 if (pp != 0) { 281 prot = PAGE_READ | PAGE_EXEC; 282 if (pp == 0x2) { 283 prot |= PAGE_WRITE; 284 } 285 } 286 } 287 *blp = bl; 288 *validp = valid; 289 *protp = prot; 290 } 291 292 static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, 293 target_ulong virtual, MMUAccessType access_type) 294 { 295 target_ulong *BATlt, *BATut, *BATu, *BATl; 296 target_ulong BEPIl, BEPIu, bl; 297 int i, valid, prot; 298 int ret = -1; 299 bool ifetch = access_type == MMU_INST_FETCH; 300 301 qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT v " TARGET_FMT_lx "\n", __func__, 302 ifetch ? 'I' : 'D', virtual); 303 if (ifetch) { 304 BATlt = env->IBAT[1]; 305 BATut = env->IBAT[0]; 306 } else { 307 BATlt = env->DBAT[1]; 308 BATut = env->DBAT[0]; 309 } 310 for (i = 0; i < env->nb_BATs; i++) { 311 BATu = &BATut[i]; 312 BATl = &BATlt[i]; 313 BEPIu = *BATu & 0xF0000000; 314 BEPIl = *BATu & 0x0FFE0000; 315 bat_size_prot(env, &bl, &valid, &prot, BATu, BATl); 316 qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT%d v " TARGET_FMT_lx " BATu " 317 TARGET_FMT_lx " BATl " TARGET_FMT_lx "\n", __func__, 318 ifetch ? 'I' : 'D', i, virtual, *BATu, *BATl); 319 if ((virtual & 0xF0000000) == BEPIu && 320 ((virtual & 0x0FFE0000) & ~bl) == BEPIl) { 321 /* BAT matches */ 322 if (valid != 0) { 323 /* Get physical address */ 324 ctx->raddr = (*BATl & 0xF0000000) | 325 ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) | 326 (virtual & 0x0001F000); 327 /* Compute access rights */ 328 ctx->prot = prot; 329 ret = check_prot(ctx->prot, access_type); 330 if (ret == 0) { 331 qemu_log_mask(CPU_LOG_MMU, "BAT %d match: r " TARGET_FMT_plx 332 " prot=%c%c\n", i, ctx->raddr, 333 ctx->prot & PAGE_READ ? 'R' : '-', 334 ctx->prot & PAGE_WRITE ? 'W' : '-'); 335 } 336 break; 337 } 338 } 339 } 340 if (ret < 0) { 341 if (qemu_log_enabled()) { 342 qemu_log_mask(CPU_LOG_MMU, "no BAT match for " 343 TARGET_FMT_lx ":\n", virtual); 344 for (i = 0; i < 4; i++) { 345 BATu = &BATut[i]; 346 BATl = &BATlt[i]; 347 BEPIu = *BATu & 0xF0000000; 348 BEPIl = *BATu & 0x0FFE0000; 349 bl = (*BATu & 0x00001FFC) << 15; 350 qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT%d v " 351 TARGET_FMT_lx " BATu " TARGET_FMT_lx 352 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " 353 TARGET_FMT_lx " " TARGET_FMT_lx "\n", 354 __func__, ifetch ? 'I' : 'D', i, virtual, 355 *BATu, *BATl, BEPIu, BEPIl, bl); 356 } 357 } 358 } 359 /* No hit */ 360 return ret; 361 } 362 363 /* Perform segment based translation */ 364 static int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, 365 target_ulong eaddr, MMUAccessType access_type, 366 int type) 367 { 368 PowerPCCPU *cpu = env_archcpu(env); 369 hwaddr hash; 370 target_ulong vsid; 371 int ds, pr, target_page_bits; 372 int ret; 373 target_ulong sr, pgidx; 374 375 pr = msr_pr; 376 ctx->eaddr = eaddr; 377 378 sr = env->sr[eaddr >> 28]; 379 ctx->key = (((sr & 0x20000000) && (pr != 0)) || 380 ((sr & 0x40000000) && (pr == 0))) ? 1 : 0; 381 ds = sr & 0x80000000 ? 1 : 0; 382 ctx->nx = sr & 0x10000000 ? 1 : 0; 383 vsid = sr & 0x00FFFFFF; 384 target_page_bits = TARGET_PAGE_BITS; 385 qemu_log_mask(CPU_LOG_MMU, 386 "Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx 387 " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx 388 " ir=%d dr=%d pr=%d %d t=%d\n", 389 eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir, 390 (int)msr_dr, pr != 0 ? 1 : 0, 391 access_type == MMU_DATA_STORE, type); 392 pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits; 393 hash = vsid ^ pgidx; 394 ctx->ptem = (vsid << 7) | (pgidx >> 10); 395 396 qemu_log_mask(CPU_LOG_MMU, 397 "pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n", 398 ctx->key, ds, ctx->nx, vsid); 399 ret = -1; 400 if (!ds) { 401 /* Check if instruction fetch is allowed, if needed */ 402 if (type != ACCESS_CODE || ctx->nx == 0) { 403 /* Page address translation */ 404 qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx 405 " htab_mask " TARGET_FMT_plx 406 " hash " TARGET_FMT_plx "\n", 407 ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash); 408 ctx->hash[0] = hash; 409 ctx->hash[1] = ~hash; 410 411 /* Initialize real address with an invalid value */ 412 ctx->raddr = (hwaddr)-1ULL; 413 /* Software TLB search */ 414 ret = ppc6xx_tlb_check(env, ctx, eaddr, access_type); 415 #if defined(DUMP_PAGE_TABLES) 416 if (qemu_loglevel_mask(CPU_LOG_MMU)) { 417 CPUState *cs = env_cpu(env); 418 hwaddr curaddr; 419 uint32_t a0, a1, a2, a3; 420 421 qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx 422 "\n", ppc_hash32_hpt_base(cpu), 423 ppc_hash32_hpt_mask(cpu) + 0x80); 424 for (curaddr = ppc_hash32_hpt_base(cpu); 425 curaddr < (ppc_hash32_hpt_base(cpu) 426 + ppc_hash32_hpt_mask(cpu) + 0x80); 427 curaddr += 16) { 428 a0 = ldl_phys(cs->as, curaddr); 429 a1 = ldl_phys(cs->as, curaddr + 4); 430 a2 = ldl_phys(cs->as, curaddr + 8); 431 a3 = ldl_phys(cs->as, curaddr + 12); 432 if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) { 433 qemu_log(TARGET_FMT_plx ": %08x %08x %08x %08x\n", 434 curaddr, a0, a1, a2, a3); 435 } 436 } 437 } 438 #endif 439 } else { 440 qemu_log_mask(CPU_LOG_MMU, "No access allowed\n"); 441 ret = -3; 442 } 443 } else { 444 target_ulong sr; 445 446 qemu_log_mask(CPU_LOG_MMU, "direct store...\n"); 447 /* Direct-store segment : absolutely *BUGGY* for now */ 448 449 /* 450 * Direct-store implies a 32-bit MMU. 451 * Check the Segment Register's bus unit ID (BUID). 452 */ 453 sr = env->sr[eaddr >> 28]; 454 if ((sr & 0x1FF00000) >> 20 == 0x07f) { 455 /* 456 * Memory-forced I/O controller interface access 457 * 458 * If T=1 and BUID=x'07F', the 601 performs a memory 459 * access to SR[28-31] LA[4-31], bypassing all protection 460 * mechanisms. 461 */ 462 ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF); 463 ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 464 return 0; 465 } 466 467 switch (type) { 468 case ACCESS_INT: 469 /* Integer load/store : only access allowed */ 470 break; 471 case ACCESS_CODE: 472 /* No code fetch is allowed in direct-store areas */ 473 return -4; 474 case ACCESS_FLOAT: 475 /* Floating point load/store */ 476 return -4; 477 case ACCESS_RES: 478 /* lwarx, ldarx or srwcx. */ 479 return -4; 480 case ACCESS_CACHE: 481 /* 482 * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi 483 * 484 * Should make the instruction do no-op. As it already do 485 * no-op, it's quite easy :-) 486 */ 487 ctx->raddr = eaddr; 488 return 0; 489 case ACCESS_EXT: 490 /* eciwx or ecowx */ 491 return -4; 492 default: 493 qemu_log_mask(CPU_LOG_MMU, "ERROR: instruction should not need " 494 "address translation\n"); 495 return -4; 496 } 497 if ((access_type == MMU_DATA_STORE || ctx->key != 1) && 498 (access_type == MMU_DATA_LOAD || ctx->key != 0)) { 499 ctx->raddr = eaddr; 500 ret = 2; 501 } else { 502 ret = -2; 503 } 504 } 505 506 return ret; 507 } 508 509 /* Generic TLB check function for embedded PowerPC implementations */ 510 int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb, 511 hwaddr *raddrp, 512 target_ulong address, uint32_t pid, int ext, 513 int i) 514 { 515 target_ulong mask; 516 517 /* Check valid flag */ 518 if (!(tlb->prot & PAGE_VALID)) { 519 return -1; 520 } 521 mask = ~(tlb->size - 1); 522 qemu_log_mask(CPU_LOG_MMU, "%s: TLB %d address " TARGET_FMT_lx 523 " PID %u <=> " TARGET_FMT_lx " " TARGET_FMT_lx " %u %x\n", 524 __func__, i, address, pid, tlb->EPN, 525 mask, (uint32_t)tlb->PID, tlb->prot); 526 /* Check PID */ 527 if (tlb->PID != 0 && tlb->PID != pid) { 528 return -1; 529 } 530 /* Check effective address */ 531 if ((address & mask) != tlb->EPN) { 532 return -1; 533 } 534 *raddrp = (tlb->RPN & mask) | (address & ~mask); 535 if (ext) { 536 /* Extend the physical address to 36 bits */ 537 *raddrp |= (uint64_t)(tlb->RPN & 0xF) << 32; 538 } 539 540 return 0; 541 } 542 543 static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 544 target_ulong address, 545 MMUAccessType access_type) 546 { 547 ppcemb_tlb_t *tlb; 548 hwaddr raddr; 549 int i, ret, zsel, zpr, pr; 550 551 ret = -1; 552 raddr = (hwaddr)-1ULL; 553 pr = msr_pr; 554 for (i = 0; i < env->nb_tlb; i++) { 555 tlb = &env->tlb.tlbe[i]; 556 if (ppcemb_tlb_check(env, tlb, &raddr, address, 557 env->spr[SPR_40x_PID], 0, i) < 0) { 558 continue; 559 } 560 zsel = (tlb->attr >> 4) & 0xF; 561 zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3; 562 qemu_log_mask(CPU_LOG_MMU, 563 "%s: TLB %d zsel %d zpr %d ty %d attr %08x\n", 564 __func__, i, zsel, zpr, access_type, tlb->attr); 565 /* Check execute enable bit */ 566 switch (zpr) { 567 case 0x2: 568 if (pr != 0) { 569 goto check_perms; 570 } 571 /* fall through */ 572 case 0x3: 573 /* All accesses granted */ 574 ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 575 ret = 0; 576 break; 577 case 0x0: 578 if (pr != 0) { 579 /* Raise Zone protection fault. */ 580 env->spr[SPR_40x_ESR] = 1 << 22; 581 ctx->prot = 0; 582 ret = -2; 583 break; 584 } 585 /* fall through */ 586 case 0x1: 587 check_perms: 588 /* Check from TLB entry */ 589 ctx->prot = tlb->prot; 590 ret = check_prot(ctx->prot, access_type); 591 if (ret == -2) { 592 env->spr[SPR_40x_ESR] = 0; 593 } 594 break; 595 } 596 if (ret >= 0) { 597 ctx->raddr = raddr; 598 qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx 599 " => " TARGET_FMT_plx 600 " %d %d\n", __func__, address, ctx->raddr, ctx->prot, 601 ret); 602 return 0; 603 } 604 } 605 qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx 606 " => " TARGET_FMT_plx 607 " %d %d\n", __func__, address, raddr, ctx->prot, ret); 608 609 return ret; 610 } 611 612 static int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb, 613 hwaddr *raddr, int *prot, target_ulong address, 614 MMUAccessType access_type, int i) 615 { 616 int prot2; 617 618 if (ppcemb_tlb_check(env, tlb, raddr, address, 619 env->spr[SPR_BOOKE_PID], 620 !env->nb_pids, i) >= 0) { 621 goto found_tlb; 622 } 623 624 if (env->spr[SPR_BOOKE_PID1] && 625 ppcemb_tlb_check(env, tlb, raddr, address, 626 env->spr[SPR_BOOKE_PID1], 0, i) >= 0) { 627 goto found_tlb; 628 } 629 630 if (env->spr[SPR_BOOKE_PID2] && 631 ppcemb_tlb_check(env, tlb, raddr, address, 632 env->spr[SPR_BOOKE_PID2], 0, i) >= 0) { 633 goto found_tlb; 634 } 635 636 qemu_log_mask(CPU_LOG_MMU, "%s: TLB entry not found\n", __func__); 637 return -1; 638 639 found_tlb: 640 641 if (msr_pr != 0) { 642 prot2 = tlb->prot & 0xF; 643 } else { 644 prot2 = (tlb->prot >> 4) & 0xF; 645 } 646 647 /* Check the address space */ 648 if ((access_type == MMU_INST_FETCH ? msr_ir : msr_dr) != (tlb->attr & 1)) { 649 qemu_log_mask(CPU_LOG_MMU, "%s: AS doesn't match\n", __func__); 650 return -1; 651 } 652 653 *prot = prot2; 654 if (prot2 & prot_for_access_type(access_type)) { 655 qemu_log_mask(CPU_LOG_MMU, "%s: good TLB!\n", __func__); 656 return 0; 657 } 658 659 qemu_log_mask(CPU_LOG_MMU, "%s: no prot match: %x\n", __func__, prot2); 660 return access_type == MMU_INST_FETCH ? -3 : -2; 661 } 662 663 static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 664 target_ulong address, 665 MMUAccessType access_type) 666 { 667 ppcemb_tlb_t *tlb; 668 hwaddr raddr; 669 int i, ret; 670 671 ret = -1; 672 raddr = (hwaddr)-1ULL; 673 for (i = 0; i < env->nb_tlb; i++) { 674 tlb = &env->tlb.tlbe[i]; 675 ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address, 676 access_type, i); 677 if (ret != -1) { 678 break; 679 } 680 } 681 682 if (ret >= 0) { 683 ctx->raddr = raddr; 684 qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx 685 " => " TARGET_FMT_plx " %d %d\n", __func__, 686 address, ctx->raddr, ctx->prot, ret); 687 } else { 688 qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx 689 " => " TARGET_FMT_plx " %d %d\n", __func__, 690 address, raddr, ctx->prot, ret); 691 } 692 693 return ret; 694 } 695 696 hwaddr booke206_tlb_to_page_size(CPUPPCState *env, 697 ppcmas_tlb_t *tlb) 698 { 699 int tlbm_size; 700 701 tlbm_size = (tlb->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 702 703 return 1024ULL << tlbm_size; 704 } 705 706 /* TLB check function for MAS based SoftTLBs */ 707 int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb, 708 hwaddr *raddrp, target_ulong address, 709 uint32_t pid) 710 { 711 hwaddr mask; 712 uint32_t tlb_pid; 713 714 if (!msr_cm) { 715 /* In 32bit mode we can only address 32bit EAs */ 716 address = (uint32_t)address; 717 } 718 719 /* Check valid flag */ 720 if (!(tlb->mas1 & MAS1_VALID)) { 721 return -1; 722 } 723 724 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); 725 qemu_log_mask(CPU_LOG_MMU, "%s: TLB ADDR=0x" TARGET_FMT_lx 726 " PID=0x%x MAS1=0x%x MAS2=0x%" PRIx64 " mask=0x%" 727 HWADDR_PRIx " MAS7_3=0x%" PRIx64 " MAS8=0x%" PRIx32 "\n", 728 __func__, address, pid, tlb->mas1, tlb->mas2, mask, 729 tlb->mas7_3, tlb->mas8); 730 731 /* Check PID */ 732 tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT; 733 if (tlb_pid != 0 && tlb_pid != pid) { 734 return -1; 735 } 736 737 /* Check effective address */ 738 if ((address & mask) != (tlb->mas2 & MAS2_EPN_MASK)) { 739 return -1; 740 } 741 742 if (raddrp) { 743 *raddrp = (tlb->mas7_3 & mask) | (address & ~mask); 744 } 745 746 return 0; 747 } 748 749 static bool is_epid_mmu(int mmu_idx) 750 { 751 return mmu_idx == PPC_TLB_EPID_STORE || mmu_idx == PPC_TLB_EPID_LOAD; 752 } 753 754 static uint32_t mmubooke206_esr(int mmu_idx, MMUAccessType access_type) 755 { 756 uint32_t esr = 0; 757 if (access_type == MMU_DATA_STORE) { 758 esr |= ESR_ST; 759 } 760 if (is_epid_mmu(mmu_idx)) { 761 esr |= ESR_EPID; 762 } 763 return esr; 764 } 765 766 /* 767 * Get EPID register given the mmu_idx. If this is regular load, 768 * construct the EPID access bits from current processor state 769 * 770 * Get the effective AS and PR bits and the PID. The PID is returned 771 * only if EPID load is requested, otherwise the caller must detect 772 * the correct EPID. Return true if valid EPID is returned. 773 */ 774 static bool mmubooke206_get_as(CPUPPCState *env, 775 int mmu_idx, uint32_t *epid_out, 776 bool *as_out, bool *pr_out) 777 { 778 if (is_epid_mmu(mmu_idx)) { 779 uint32_t epidr; 780 if (mmu_idx == PPC_TLB_EPID_STORE) { 781 epidr = env->spr[SPR_BOOKE_EPSC]; 782 } else { 783 epidr = env->spr[SPR_BOOKE_EPLC]; 784 } 785 *epid_out = (epidr & EPID_EPID) >> EPID_EPID_SHIFT; 786 *as_out = !!(epidr & EPID_EAS); 787 *pr_out = !!(epidr & EPID_EPR); 788 return true; 789 } else { 790 *as_out = msr_ds; 791 *pr_out = msr_pr; 792 return false; 793 } 794 } 795 796 /* Check if the tlb found by hashing really matches */ 797 static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb, 798 hwaddr *raddr, int *prot, 799 target_ulong address, 800 MMUAccessType access_type, int mmu_idx) 801 { 802 int prot2 = 0; 803 uint32_t epid; 804 bool as, pr; 805 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); 806 807 if (!use_epid) { 808 if (ppcmas_tlb_check(env, tlb, raddr, address, 809 env->spr[SPR_BOOKE_PID]) >= 0) { 810 goto found_tlb; 811 } 812 813 if (env->spr[SPR_BOOKE_PID1] && 814 ppcmas_tlb_check(env, tlb, raddr, address, 815 env->spr[SPR_BOOKE_PID1]) >= 0) { 816 goto found_tlb; 817 } 818 819 if (env->spr[SPR_BOOKE_PID2] && 820 ppcmas_tlb_check(env, tlb, raddr, address, 821 env->spr[SPR_BOOKE_PID2]) >= 0) { 822 goto found_tlb; 823 } 824 } else { 825 if (ppcmas_tlb_check(env, tlb, raddr, address, epid) >= 0) { 826 goto found_tlb; 827 } 828 } 829 830 qemu_log_mask(CPU_LOG_MMU, "%s: TLB entry not found\n", __func__); 831 return -1; 832 833 found_tlb: 834 835 if (pr) { 836 if (tlb->mas7_3 & MAS3_UR) { 837 prot2 |= PAGE_READ; 838 } 839 if (tlb->mas7_3 & MAS3_UW) { 840 prot2 |= PAGE_WRITE; 841 } 842 if (tlb->mas7_3 & MAS3_UX) { 843 prot2 |= PAGE_EXEC; 844 } 845 } else { 846 if (tlb->mas7_3 & MAS3_SR) { 847 prot2 |= PAGE_READ; 848 } 849 if (tlb->mas7_3 & MAS3_SW) { 850 prot2 |= PAGE_WRITE; 851 } 852 if (tlb->mas7_3 & MAS3_SX) { 853 prot2 |= PAGE_EXEC; 854 } 855 } 856 857 /* Check the address space and permissions */ 858 if (access_type == MMU_INST_FETCH) { 859 /* There is no way to fetch code using epid load */ 860 assert(!use_epid); 861 as = msr_ir; 862 } 863 864 if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { 865 qemu_log_mask(CPU_LOG_MMU, "%s: AS doesn't match\n", __func__); 866 return -1; 867 } 868 869 *prot = prot2; 870 if (prot2 & prot_for_access_type(access_type)) { 871 qemu_log_mask(CPU_LOG_MMU, "%s: good TLB!\n", __func__); 872 return 0; 873 } 874 875 qemu_log_mask(CPU_LOG_MMU, "%s: no prot match: %x\n", __func__, prot2); 876 return access_type == MMU_INST_FETCH ? -3 : -2; 877 } 878 879 static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 880 target_ulong address, 881 MMUAccessType access_type, 882 int mmu_idx) 883 { 884 ppcmas_tlb_t *tlb; 885 hwaddr raddr; 886 int i, j, ret; 887 888 ret = -1; 889 raddr = (hwaddr)-1ULL; 890 891 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 892 int ways = booke206_tlb_ways(env, i); 893 894 for (j = 0; j < ways; j++) { 895 tlb = booke206_get_tlbm(env, i, address, j); 896 if (!tlb) { 897 continue; 898 } 899 ret = mmubooke206_check_tlb(env, tlb, &raddr, &ctx->prot, address, 900 access_type, mmu_idx); 901 if (ret != -1) { 902 goto found_tlb; 903 } 904 } 905 } 906 907 found_tlb: 908 909 if (ret >= 0) { 910 ctx->raddr = raddr; 911 qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx 912 " => " TARGET_FMT_plx " %d %d\n", __func__, address, 913 ctx->raddr, ctx->prot, ret); 914 } else { 915 qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx 916 " => " TARGET_FMT_plx " %d %d\n", __func__, address, 917 raddr, ctx->prot, ret); 918 } 919 920 return ret; 921 } 922 923 static const char *book3e_tsize_to_str[32] = { 924 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", 925 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", 926 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G", 927 "1T", "2T" 928 }; 929 930 static void mmubooke_dump_mmu(CPUPPCState *env) 931 { 932 ppcemb_tlb_t *entry; 933 int i; 934 935 if (kvm_enabled() && !env->kvm_sw_tlb) { 936 qemu_printf("Cannot access KVM TLB\n"); 937 return; 938 } 939 940 qemu_printf("\nTLB:\n"); 941 qemu_printf("Effective Physical Size PID Prot " 942 "Attr\n"); 943 944 entry = &env->tlb.tlbe[0]; 945 for (i = 0; i < env->nb_tlb; i++, entry++) { 946 hwaddr ea, pa; 947 target_ulong mask; 948 uint64_t size = (uint64_t)entry->size; 949 char size_buf[20]; 950 951 /* Check valid flag */ 952 if (!(entry->prot & PAGE_VALID)) { 953 continue; 954 } 955 956 mask = ~(entry->size - 1); 957 ea = entry->EPN & mask; 958 pa = entry->RPN & mask; 959 /* Extend the physical address to 36 bits */ 960 pa |= (hwaddr)(entry->RPN & 0xF) << 32; 961 if (size >= 1 * MiB) { 962 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "M", size / MiB); 963 } else { 964 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "k", size / KiB); 965 } 966 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n", 967 (uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID, 968 entry->prot, entry->attr); 969 } 970 971 } 972 973 static void mmubooke206_dump_one_tlb(CPUPPCState *env, int tlbn, int offset, 974 int tlbsize) 975 { 976 ppcmas_tlb_t *entry; 977 int i; 978 979 qemu_printf("\nTLB%d:\n", tlbn); 980 qemu_printf("Effective Physical Size TID TS SRWX" 981 " URWX WIMGE U0123\n"); 982 983 entry = &env->tlb.tlbm[offset]; 984 for (i = 0; i < tlbsize; i++, entry++) { 985 hwaddr ea, pa, size; 986 int tsize; 987 988 if (!(entry->mas1 & MAS1_VALID)) { 989 continue; 990 } 991 992 tsize = (entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 993 size = 1024ULL << tsize; 994 ea = entry->mas2 & ~(size - 1); 995 pa = entry->mas7_3 & ~(size - 1); 996 997 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c" 998 "U%c%c%c %c%c%c%c%c U%c%c%c%c\n", 999 (uint64_t)ea, (uint64_t)pa, 1000 book3e_tsize_to_str[tsize], 1001 (entry->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT, 1002 (entry->mas1 & MAS1_TS) >> MAS1_TS_SHIFT, 1003 entry->mas7_3 & MAS3_SR ? 'R' : '-', 1004 entry->mas7_3 & MAS3_SW ? 'W' : '-', 1005 entry->mas7_3 & MAS3_SX ? 'X' : '-', 1006 entry->mas7_3 & MAS3_UR ? 'R' : '-', 1007 entry->mas7_3 & MAS3_UW ? 'W' : '-', 1008 entry->mas7_3 & MAS3_UX ? 'X' : '-', 1009 entry->mas2 & MAS2_W ? 'W' : '-', 1010 entry->mas2 & MAS2_I ? 'I' : '-', 1011 entry->mas2 & MAS2_M ? 'M' : '-', 1012 entry->mas2 & MAS2_G ? 'G' : '-', 1013 entry->mas2 & MAS2_E ? 'E' : '-', 1014 entry->mas7_3 & MAS3_U0 ? '0' : '-', 1015 entry->mas7_3 & MAS3_U1 ? '1' : '-', 1016 entry->mas7_3 & MAS3_U2 ? '2' : '-', 1017 entry->mas7_3 & MAS3_U3 ? '3' : '-'); 1018 } 1019 } 1020 1021 static void mmubooke206_dump_mmu(CPUPPCState *env) 1022 { 1023 int offset = 0; 1024 int i; 1025 1026 if (kvm_enabled() && !env->kvm_sw_tlb) { 1027 qemu_printf("Cannot access KVM TLB\n"); 1028 return; 1029 } 1030 1031 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 1032 int size = booke206_tlb_size(env, i); 1033 1034 if (size == 0) { 1035 continue; 1036 } 1037 1038 mmubooke206_dump_one_tlb(env, i, offset, size); 1039 offset += size; 1040 } 1041 } 1042 1043 static void mmu6xx_dump_BATs(CPUPPCState *env, int type) 1044 { 1045 target_ulong *BATlt, *BATut, *BATu, *BATl; 1046 target_ulong BEPIl, BEPIu, bl; 1047 int i; 1048 1049 switch (type) { 1050 case ACCESS_CODE: 1051 BATlt = env->IBAT[1]; 1052 BATut = env->IBAT[0]; 1053 break; 1054 default: 1055 BATlt = env->DBAT[1]; 1056 BATut = env->DBAT[0]; 1057 break; 1058 } 1059 1060 for (i = 0; i < env->nb_BATs; i++) { 1061 BATu = &BATut[i]; 1062 BATl = &BATlt[i]; 1063 BEPIu = *BATu & 0xF0000000; 1064 BEPIl = *BATu & 0x0FFE0000; 1065 bl = (*BATu & 0x00001FFC) << 15; 1066 qemu_printf("%s BAT%d BATu " TARGET_FMT_lx 1067 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " 1068 TARGET_FMT_lx " " TARGET_FMT_lx "\n", 1069 type == ACCESS_CODE ? "code" : "data", i, 1070 *BATu, *BATl, BEPIu, BEPIl, bl); 1071 } 1072 } 1073 1074 static void mmu6xx_dump_mmu(CPUPPCState *env) 1075 { 1076 PowerPCCPU *cpu = env_archcpu(env); 1077 ppc6xx_tlb_t *tlb; 1078 target_ulong sr; 1079 int type, way, entry, i; 1080 1081 qemu_printf("HTAB base = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_base(cpu)); 1082 qemu_printf("HTAB mask = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_mask(cpu)); 1083 1084 qemu_printf("\nSegment registers:\n"); 1085 for (i = 0; i < 32; i++) { 1086 sr = env->sr[i]; 1087 if (sr & 0x80000000) { 1088 qemu_printf("%02d T=%d Ks=%d Kp=%d BUID=0x%03x " 1089 "CNTLR_SPEC=0x%05x\n", i, 1090 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, 1091 sr & 0x20000000 ? 1 : 0, (uint32_t)((sr >> 20) & 0x1FF), 1092 (uint32_t)(sr & 0xFFFFF)); 1093 } else { 1094 qemu_printf("%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i, 1095 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, 1096 sr & 0x20000000 ? 1 : 0, sr & 0x10000000 ? 1 : 0, 1097 (uint32_t)(sr & 0x00FFFFFF)); 1098 } 1099 } 1100 1101 qemu_printf("\nBATs:\n"); 1102 mmu6xx_dump_BATs(env, ACCESS_INT); 1103 mmu6xx_dump_BATs(env, ACCESS_CODE); 1104 1105 if (env->id_tlbs != 1) { 1106 qemu_printf("ERROR: 6xx MMU should have separated TLB" 1107 " for code and data\n"); 1108 } 1109 1110 qemu_printf("\nTLBs [EPN EPN + SIZE]\n"); 1111 1112 for (type = 0; type < 2; type++) { 1113 for (way = 0; way < env->nb_ways; way++) { 1114 for (entry = env->nb_tlb * type + env->tlb_per_way * way; 1115 entry < (env->nb_tlb * type + env->tlb_per_way * (way + 1)); 1116 entry++) { 1117 1118 tlb = &env->tlb.tlb6[entry]; 1119 qemu_printf("%s TLB %02d/%02d way:%d %s [" 1120 TARGET_FMT_lx " " TARGET_FMT_lx "]\n", 1121 type ? "code" : "data", entry % env->nb_tlb, 1122 env->nb_tlb, way, 1123 pte_is_valid(tlb->pte0) ? "valid" : "inval", 1124 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE); 1125 } 1126 } 1127 } 1128 } 1129 1130 void dump_mmu(CPUPPCState *env) 1131 { 1132 switch (env->mmu_model) { 1133 case POWERPC_MMU_BOOKE: 1134 mmubooke_dump_mmu(env); 1135 break; 1136 case POWERPC_MMU_BOOKE206: 1137 mmubooke206_dump_mmu(env); 1138 break; 1139 case POWERPC_MMU_SOFT_6xx: 1140 mmu6xx_dump_mmu(env); 1141 break; 1142 #if defined(TARGET_PPC64) 1143 case POWERPC_MMU_64B: 1144 case POWERPC_MMU_2_03: 1145 case POWERPC_MMU_2_06: 1146 case POWERPC_MMU_2_07: 1147 dump_slb(env_archcpu(env)); 1148 break; 1149 case POWERPC_MMU_3_00: 1150 if (ppc64_v3_radix(env_archcpu(env))) { 1151 qemu_log_mask(LOG_UNIMP, "%s: the PPC64 MMU is unsupported\n", 1152 __func__); 1153 } else { 1154 dump_slb(env_archcpu(env)); 1155 } 1156 break; 1157 #endif 1158 default: 1159 qemu_log_mask(LOG_UNIMP, "%s: unimplemented\n", __func__); 1160 } 1161 } 1162 1163 static int check_physical(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr, 1164 MMUAccessType access_type) 1165 { 1166 ctx->raddr = eaddr; 1167 ctx->prot = PAGE_READ | PAGE_EXEC; 1168 1169 switch (env->mmu_model) { 1170 case POWERPC_MMU_SOFT_6xx: 1171 case POWERPC_MMU_SOFT_4xx: 1172 case POWERPC_MMU_REAL: 1173 case POWERPC_MMU_BOOKE: 1174 ctx->prot |= PAGE_WRITE; 1175 break; 1176 1177 default: 1178 /* Caller's checks mean we should never get here for other models */ 1179 g_assert_not_reached(); 1180 } 1181 1182 return 0; 1183 } 1184 1185 int get_physical_address_wtlb(CPUPPCState *env, mmu_ctx_t *ctx, 1186 target_ulong eaddr, 1187 MMUAccessType access_type, int type, 1188 int mmu_idx) 1189 { 1190 int ret = -1; 1191 bool real_mode = (type == ACCESS_CODE && msr_ir == 0) 1192 || (type != ACCESS_CODE && msr_dr == 0); 1193 1194 switch (env->mmu_model) { 1195 case POWERPC_MMU_SOFT_6xx: 1196 if (real_mode) { 1197 ret = check_physical(env, ctx, eaddr, access_type); 1198 } else { 1199 /* Try to find a BAT */ 1200 if (env->nb_BATs != 0) { 1201 ret = get_bat_6xx_tlb(env, ctx, eaddr, access_type); 1202 } 1203 if (ret < 0) { 1204 /* We didn't match any BAT entry or don't have BATs */ 1205 ret = get_segment_6xx_tlb(env, ctx, eaddr, access_type, type); 1206 } 1207 } 1208 break; 1209 1210 case POWERPC_MMU_SOFT_4xx: 1211 if (real_mode) { 1212 ret = check_physical(env, ctx, eaddr, access_type); 1213 } else { 1214 ret = mmu40x_get_physical_address(env, ctx, eaddr, access_type); 1215 } 1216 break; 1217 case POWERPC_MMU_BOOKE: 1218 ret = mmubooke_get_physical_address(env, ctx, eaddr, access_type); 1219 break; 1220 case POWERPC_MMU_BOOKE206: 1221 ret = mmubooke206_get_physical_address(env, ctx, eaddr, access_type, 1222 mmu_idx); 1223 break; 1224 case POWERPC_MMU_MPC8xx: 1225 /* XXX: TODO */ 1226 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n"); 1227 break; 1228 case POWERPC_MMU_REAL: 1229 if (real_mode) { 1230 ret = check_physical(env, ctx, eaddr, access_type); 1231 } else { 1232 cpu_abort(env_cpu(env), 1233 "PowerPC in real mode do not do any translation\n"); 1234 } 1235 return -1; 1236 default: 1237 cpu_abort(env_cpu(env), "Unknown or invalid MMU model\n"); 1238 return -1; 1239 } 1240 1241 return ret; 1242 } 1243 1244 static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address, 1245 MMUAccessType access_type, int mmu_idx) 1246 { 1247 uint32_t epid; 1248 bool as, pr; 1249 uint32_t missed_tid = 0; 1250 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); 1251 1252 if (access_type == MMU_INST_FETCH) { 1253 as = msr_ir; 1254 } 1255 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; 1256 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; 1257 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; 1258 env->spr[SPR_BOOKE_MAS3] = 0; 1259 env->spr[SPR_BOOKE_MAS6] = 0; 1260 env->spr[SPR_BOOKE_MAS7] = 0; 1261 1262 /* AS */ 1263 if (as) { 1264 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; 1265 env->spr[SPR_BOOKE_MAS6] |= MAS6_SAS; 1266 } 1267 1268 env->spr[SPR_BOOKE_MAS1] |= MAS1_VALID; 1269 env->spr[SPR_BOOKE_MAS2] |= address & MAS2_EPN_MASK; 1270 1271 if (!use_epid) { 1272 switch (env->spr[SPR_BOOKE_MAS4] & MAS4_TIDSELD_PIDZ) { 1273 case MAS4_TIDSELD_PID0: 1274 missed_tid = env->spr[SPR_BOOKE_PID]; 1275 break; 1276 case MAS4_TIDSELD_PID1: 1277 missed_tid = env->spr[SPR_BOOKE_PID1]; 1278 break; 1279 case MAS4_TIDSELD_PID2: 1280 missed_tid = env->spr[SPR_BOOKE_PID2]; 1281 break; 1282 } 1283 env->spr[SPR_BOOKE_MAS6] |= env->spr[SPR_BOOKE_PID] << 16; 1284 } else { 1285 missed_tid = epid; 1286 env->spr[SPR_BOOKE_MAS6] |= missed_tid << 16; 1287 } 1288 env->spr[SPR_BOOKE_MAS1] |= (missed_tid << MAS1_TID_SHIFT); 1289 1290 1291 /* next victim logic */ 1292 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; 1293 env->last_way++; 1294 env->last_way &= booke206_tlb_ways(env, 0) - 1; 1295 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; 1296 } 1297 1298 /* Perform address translation */ 1299 /* TODO: Split this by mmu_model. */ 1300 static bool ppc_jumbo_xlate(PowerPCCPU *cpu, vaddr eaddr, 1301 MMUAccessType access_type, 1302 hwaddr *raddrp, int *psizep, int *protp, 1303 int mmu_idx, bool guest_visible) 1304 { 1305 CPUState *cs = CPU(cpu); 1306 CPUPPCState *env = &cpu->env; 1307 mmu_ctx_t ctx; 1308 int type; 1309 int ret; 1310 1311 if (access_type == MMU_INST_FETCH) { 1312 /* code access */ 1313 type = ACCESS_CODE; 1314 } else if (guest_visible) { 1315 /* data access */ 1316 type = env->access_type; 1317 } else { 1318 type = ACCESS_INT; 1319 } 1320 1321 ret = get_physical_address_wtlb(env, &ctx, eaddr, access_type, 1322 type, mmu_idx); 1323 if (ret == 0) { 1324 *raddrp = ctx.raddr; 1325 *protp = ctx.prot; 1326 *psizep = TARGET_PAGE_BITS; 1327 return true; 1328 } 1329 1330 if (guest_visible) { 1331 log_cpu_state_mask(CPU_LOG_MMU, cs, 0); 1332 if (type == ACCESS_CODE) { 1333 switch (ret) { 1334 case -1: 1335 /* No matches in page tables or TLB */ 1336 switch (env->mmu_model) { 1337 case POWERPC_MMU_SOFT_6xx: 1338 cs->exception_index = POWERPC_EXCP_IFTLB; 1339 env->error_code = 1 << 18; 1340 env->spr[SPR_IMISS] = eaddr; 1341 env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem; 1342 goto tlb_miss; 1343 case POWERPC_MMU_SOFT_4xx: 1344 cs->exception_index = POWERPC_EXCP_ITLB; 1345 env->error_code = 0; 1346 env->spr[SPR_40x_DEAR] = eaddr; 1347 env->spr[SPR_40x_ESR] = 0x00000000; 1348 break; 1349 case POWERPC_MMU_BOOKE206: 1350 booke206_update_mas_tlb_miss(env, eaddr, 2, mmu_idx); 1351 /* fall through */ 1352 case POWERPC_MMU_BOOKE: 1353 cs->exception_index = POWERPC_EXCP_ITLB; 1354 env->error_code = 0; 1355 env->spr[SPR_BOOKE_DEAR] = eaddr; 1356 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, MMU_DATA_LOAD); 1357 break; 1358 case POWERPC_MMU_MPC8xx: 1359 cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); 1360 case POWERPC_MMU_REAL: 1361 cpu_abort(cs, "PowerPC in real mode should never raise " 1362 "any MMU exceptions\n"); 1363 default: 1364 cpu_abort(cs, "Unknown or invalid MMU model\n"); 1365 } 1366 break; 1367 case -2: 1368 /* Access rights violation */ 1369 cs->exception_index = POWERPC_EXCP_ISI; 1370 env->error_code = 0x08000000; 1371 break; 1372 case -3: 1373 /* No execute protection violation */ 1374 if ((env->mmu_model == POWERPC_MMU_BOOKE) || 1375 (env->mmu_model == POWERPC_MMU_BOOKE206)) { 1376 env->spr[SPR_BOOKE_ESR] = 0x00000000; 1377 } 1378 cs->exception_index = POWERPC_EXCP_ISI; 1379 env->error_code = 0x10000000; 1380 break; 1381 case -4: 1382 /* Direct store exception */ 1383 /* No code fetch is allowed in direct-store areas */ 1384 cs->exception_index = POWERPC_EXCP_ISI; 1385 env->error_code = 0x10000000; 1386 break; 1387 } 1388 } else { 1389 switch (ret) { 1390 case -1: 1391 /* No matches in page tables or TLB */ 1392 switch (env->mmu_model) { 1393 case POWERPC_MMU_SOFT_6xx: 1394 if (access_type == MMU_DATA_STORE) { 1395 cs->exception_index = POWERPC_EXCP_DSTLB; 1396 env->error_code = 1 << 16; 1397 } else { 1398 cs->exception_index = POWERPC_EXCP_DLTLB; 1399 env->error_code = 0; 1400 } 1401 env->spr[SPR_DMISS] = eaddr; 1402 env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem; 1403 tlb_miss: 1404 env->error_code |= ctx.key << 19; 1405 env->spr[SPR_HASH1] = ppc_hash32_hpt_base(cpu) + 1406 get_pteg_offset32(cpu, ctx.hash[0]); 1407 env->spr[SPR_HASH2] = ppc_hash32_hpt_base(cpu) + 1408 get_pteg_offset32(cpu, ctx.hash[1]); 1409 break; 1410 case POWERPC_MMU_SOFT_4xx: 1411 cs->exception_index = POWERPC_EXCP_DTLB; 1412 env->error_code = 0; 1413 env->spr[SPR_40x_DEAR] = eaddr; 1414 if (access_type == MMU_DATA_STORE) { 1415 env->spr[SPR_40x_ESR] = 0x00800000; 1416 } else { 1417 env->spr[SPR_40x_ESR] = 0x00000000; 1418 } 1419 break; 1420 case POWERPC_MMU_MPC8xx: 1421 /* XXX: TODO */ 1422 cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); 1423 case POWERPC_MMU_BOOKE206: 1424 booke206_update_mas_tlb_miss(env, eaddr, access_type, mmu_idx); 1425 /* fall through */ 1426 case POWERPC_MMU_BOOKE: 1427 cs->exception_index = POWERPC_EXCP_DTLB; 1428 env->error_code = 0; 1429 env->spr[SPR_BOOKE_DEAR] = eaddr; 1430 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); 1431 break; 1432 case POWERPC_MMU_REAL: 1433 cpu_abort(cs, "PowerPC in real mode should never raise " 1434 "any MMU exceptions\n"); 1435 default: 1436 cpu_abort(cs, "Unknown or invalid MMU model\n"); 1437 } 1438 break; 1439 case -2: 1440 /* Access rights violation */ 1441 cs->exception_index = POWERPC_EXCP_DSI; 1442 env->error_code = 0; 1443 if (env->mmu_model == POWERPC_MMU_SOFT_4xx) { 1444 env->spr[SPR_40x_DEAR] = eaddr; 1445 if (access_type == MMU_DATA_STORE) { 1446 env->spr[SPR_40x_ESR] |= 0x00800000; 1447 } 1448 } else if ((env->mmu_model == POWERPC_MMU_BOOKE) || 1449 (env->mmu_model == POWERPC_MMU_BOOKE206)) { 1450 env->spr[SPR_BOOKE_DEAR] = eaddr; 1451 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); 1452 } else { 1453 env->spr[SPR_DAR] = eaddr; 1454 if (access_type == MMU_DATA_STORE) { 1455 env->spr[SPR_DSISR] = 0x0A000000; 1456 } else { 1457 env->spr[SPR_DSISR] = 0x08000000; 1458 } 1459 } 1460 break; 1461 case -4: 1462 /* Direct store exception */ 1463 switch (type) { 1464 case ACCESS_FLOAT: 1465 /* Floating point load/store */ 1466 cs->exception_index = POWERPC_EXCP_ALIGN; 1467 env->error_code = POWERPC_EXCP_ALIGN_FP; 1468 env->spr[SPR_DAR] = eaddr; 1469 break; 1470 case ACCESS_RES: 1471 /* lwarx, ldarx or stwcx. */ 1472 cs->exception_index = POWERPC_EXCP_DSI; 1473 env->error_code = 0; 1474 env->spr[SPR_DAR] = eaddr; 1475 if (access_type == MMU_DATA_STORE) { 1476 env->spr[SPR_DSISR] = 0x06000000; 1477 } else { 1478 env->spr[SPR_DSISR] = 0x04000000; 1479 } 1480 break; 1481 case ACCESS_EXT: 1482 /* eciwx or ecowx */ 1483 cs->exception_index = POWERPC_EXCP_DSI; 1484 env->error_code = 0; 1485 env->spr[SPR_DAR] = eaddr; 1486 if (access_type == MMU_DATA_STORE) { 1487 env->spr[SPR_DSISR] = 0x06100000; 1488 } else { 1489 env->spr[SPR_DSISR] = 0x04100000; 1490 } 1491 break; 1492 default: 1493 printf("DSI: invalid exception (%d)\n", ret); 1494 cs->exception_index = POWERPC_EXCP_PROGRAM; 1495 env->error_code = 1496 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL; 1497 env->spr[SPR_DAR] = eaddr; 1498 break; 1499 } 1500 break; 1501 } 1502 } 1503 } 1504 return false; 1505 } 1506 1507 /*****************************************************************************/ 1508 1509 bool ppc_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, 1510 hwaddr *raddrp, int *psizep, int *protp, 1511 int mmu_idx, bool guest_visible) 1512 { 1513 switch (cpu->env.mmu_model) { 1514 #if defined(TARGET_PPC64) 1515 case POWERPC_MMU_3_00: 1516 if (ppc64_v3_radix(cpu)) { 1517 return ppc_radix64_xlate(cpu, eaddr, access_type, raddrp, 1518 psizep, protp, mmu_idx, guest_visible); 1519 } 1520 /* fall through */ 1521 case POWERPC_MMU_64B: 1522 case POWERPC_MMU_2_03: 1523 case POWERPC_MMU_2_06: 1524 case POWERPC_MMU_2_07: 1525 return ppc_hash64_xlate(cpu, eaddr, access_type, 1526 raddrp, psizep, protp, mmu_idx, guest_visible); 1527 #endif 1528 1529 case POWERPC_MMU_32B: 1530 case POWERPC_MMU_601: 1531 return ppc_hash32_xlate(cpu, eaddr, access_type, raddrp, 1532 psizep, protp, mmu_idx, guest_visible); 1533 1534 default: 1535 return ppc_jumbo_xlate(cpu, eaddr, access_type, raddrp, 1536 psizep, protp, mmu_idx, guest_visible); 1537 } 1538 } 1539 1540 hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 1541 { 1542 PowerPCCPU *cpu = POWERPC_CPU(cs); 1543 hwaddr raddr; 1544 int s, p; 1545 1546 /* 1547 * Some MMUs have separate TLBs for code and data. If we only 1548 * try an MMU_DATA_LOAD, we may not be able to read instructions 1549 * mapped by code TLBs, so we also try a MMU_INST_FETCH. 1550 */ 1551 if (ppc_xlate(cpu, addr, MMU_DATA_LOAD, &raddr, &s, &p, 1552 cpu_mmu_index(&cpu->env, false), false) || 1553 ppc_xlate(cpu, addr, MMU_INST_FETCH, &raddr, &s, &p, 1554 cpu_mmu_index(&cpu->env, true), false)) { 1555 return raddr & TARGET_PAGE_MASK; 1556 } 1557 return -1; 1558 } 1559