1 /* 2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "cpu.h" 23 #include "exec/helper-proto.h" 24 #include "sysemu/kvm.h" 25 #include "kvm_ppc.h" 26 #include "mmu-hash64.h" 27 #include "mmu-hash32.h" 28 #include "exec/exec-all.h" 29 #include "exec/cpu_ldst.h" 30 #include "exec/log.h" 31 #include "helper_regs.h" 32 #include "qemu/error-report.h" 33 #include "qemu/main-loop.h" 34 #include "qemu/qemu-print.h" 35 #include "internal.h" 36 #include "mmu-book3s-v3.h" 37 #include "mmu-radix64.h" 38 39 /* #define DEBUG_MMU */ 40 /* #define DEBUG_BATS */ 41 /* #define DEBUG_SOFTWARE_TLB */ 42 /* #define DUMP_PAGE_TABLES */ 43 /* #define FLUSH_ALL_TLBS */ 44 45 #ifdef DEBUG_MMU 46 # define LOG_MMU_STATE(cpu) log_cpu_state_mask(CPU_LOG_MMU, (cpu), 0) 47 #else 48 # define LOG_MMU_STATE(cpu) do { } while (0) 49 #endif 50 51 #ifdef DEBUG_SOFTWARE_TLB 52 # define LOG_SWTLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) 53 #else 54 # define LOG_SWTLB(...) do { } while (0) 55 #endif 56 57 #ifdef DEBUG_BATS 58 # define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) 59 #else 60 # define LOG_BATS(...) do { } while (0) 61 #endif 62 63 /*****************************************************************************/ 64 /* PowerPC MMU emulation */ 65 66 /* Context used internally during MMU translations */ 67 typedef struct mmu_ctx_t mmu_ctx_t; 68 struct mmu_ctx_t { 69 hwaddr raddr; /* Real address */ 70 hwaddr eaddr; /* Effective address */ 71 int prot; /* Protection bits */ 72 hwaddr hash[2]; /* Pagetable hash values */ 73 target_ulong ptem; /* Virtual segment ID | API */ 74 int key; /* Access key */ 75 int nx; /* Non-execute area */ 76 }; 77 78 /* Common routines used by software and hardware TLBs emulation */ 79 static inline int pte_is_valid(target_ulong pte0) 80 { 81 return pte0 & 0x80000000 ? 1 : 0; 82 } 83 84 static inline void pte_invalidate(target_ulong *pte0) 85 { 86 *pte0 &= ~0x80000000; 87 } 88 89 #define PTE_PTEM_MASK 0x7FFFFFBF 90 #define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B) 91 92 static int pp_check(int key, int pp, int nx) 93 { 94 int access; 95 96 /* Compute access rights */ 97 access = 0; 98 if (key == 0) { 99 switch (pp) { 100 case 0x0: 101 case 0x1: 102 case 0x2: 103 access |= PAGE_WRITE; 104 /* fall through */ 105 case 0x3: 106 access |= PAGE_READ; 107 break; 108 } 109 } else { 110 switch (pp) { 111 case 0x0: 112 access = 0; 113 break; 114 case 0x1: 115 case 0x3: 116 access = PAGE_READ; 117 break; 118 case 0x2: 119 access = PAGE_READ | PAGE_WRITE; 120 break; 121 } 122 } 123 if (nx == 0) { 124 access |= PAGE_EXEC; 125 } 126 127 return access; 128 } 129 130 static int check_prot(int prot, MMUAccessType access_type) 131 { 132 return prot & prot_for_access_type(access_type) ? 0 : -2; 133 } 134 135 static int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0, 136 target_ulong pte1, int h, 137 MMUAccessType access_type) 138 { 139 target_ulong ptem, mmask; 140 int access, ret, pteh, ptev, pp; 141 142 ret = -1; 143 /* Check validity and table match */ 144 ptev = pte_is_valid(pte0); 145 pteh = (pte0 >> 6) & 1; 146 if (ptev && h == pteh) { 147 /* Check vsid & api */ 148 ptem = pte0 & PTE_PTEM_MASK; 149 mmask = PTE_CHECK_MASK; 150 pp = pte1 & 0x00000003; 151 if (ptem == ctx->ptem) { 152 if (ctx->raddr != (hwaddr)-1ULL) { 153 /* all matches should have equal RPN, WIMG & PP */ 154 if ((ctx->raddr & mmask) != (pte1 & mmask)) { 155 qemu_log_mask(CPU_LOG_MMU, "Bad RPN/WIMG/PP\n"); 156 return -3; 157 } 158 } 159 /* Compute access rights */ 160 access = pp_check(ctx->key, pp, ctx->nx); 161 /* Keep the matching PTE information */ 162 ctx->raddr = pte1; 163 ctx->prot = access; 164 ret = check_prot(ctx->prot, access_type); 165 if (ret == 0) { 166 /* Access granted */ 167 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); 168 } else { 169 /* Access right violation */ 170 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); 171 } 172 } 173 } 174 175 return ret; 176 } 177 178 static int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p, 179 int ret, MMUAccessType access_type) 180 { 181 int store = 0; 182 183 /* Update page flags */ 184 if (!(*pte1p & 0x00000100)) { 185 /* Update accessed flag */ 186 *pte1p |= 0x00000100; 187 store = 1; 188 } 189 if (!(*pte1p & 0x00000080)) { 190 if (access_type == MMU_DATA_STORE && ret == 0) { 191 /* Update changed flag */ 192 *pte1p |= 0x00000080; 193 store = 1; 194 } else { 195 /* Force page fault for first write access */ 196 ctx->prot &= ~PAGE_WRITE; 197 } 198 } 199 200 return store; 201 } 202 203 /* Software driven TLB helpers */ 204 static inline int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr, 205 int way, int is_code) 206 { 207 int nr; 208 209 /* Select TLB num in a way from address */ 210 nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1); 211 /* Select TLB way */ 212 nr += env->tlb_per_way * way; 213 /* 6xx have separate TLBs for instructions and data */ 214 if (is_code && env->id_tlbs == 1) { 215 nr += env->nb_tlb; 216 } 217 218 return nr; 219 } 220 221 static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env) 222 { 223 ppc6xx_tlb_t *tlb; 224 int nr, max; 225 226 /* LOG_SWTLB("Invalidate all TLBs\n"); */ 227 /* Invalidate all defined software TLB */ 228 max = env->nb_tlb; 229 if (env->id_tlbs == 1) { 230 max *= 2; 231 } 232 for (nr = 0; nr < max; nr++) { 233 tlb = &env->tlb.tlb6[nr]; 234 pte_invalidate(&tlb->pte0); 235 } 236 tlb_flush(env_cpu(env)); 237 } 238 239 static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env, 240 target_ulong eaddr, 241 int is_code, int match_epn) 242 { 243 #if !defined(FLUSH_ALL_TLBS) 244 CPUState *cs = env_cpu(env); 245 ppc6xx_tlb_t *tlb; 246 int way, nr; 247 248 /* Invalidate ITLB + DTLB, all ways */ 249 for (way = 0; way < env->nb_ways; way++) { 250 nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code); 251 tlb = &env->tlb.tlb6[nr]; 252 if (pte_is_valid(tlb->pte0) && (match_epn == 0 || eaddr == tlb->EPN)) { 253 LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx "\n", nr, 254 env->nb_tlb, eaddr); 255 pte_invalidate(&tlb->pte0); 256 tlb_flush_page(cs, tlb->EPN); 257 } 258 } 259 #else 260 /* XXX: PowerPC specification say this is valid as well */ 261 ppc6xx_tlb_invalidate_all(env); 262 #endif 263 } 264 265 static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState *env, 266 target_ulong eaddr, int is_code) 267 { 268 ppc6xx_tlb_invalidate_virt2(env, eaddr, is_code, 0); 269 } 270 271 static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way, 272 int is_code, target_ulong pte0, target_ulong pte1) 273 { 274 ppc6xx_tlb_t *tlb; 275 int nr; 276 277 nr = ppc6xx_tlb_getnum(env, EPN, way, is_code); 278 tlb = &env->tlb.tlb6[nr]; 279 LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx 280 " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb, EPN, pte0, pte1); 281 /* Invalidate any pending reference in QEMU for this virtual address */ 282 ppc6xx_tlb_invalidate_virt2(env, EPN, is_code, 1); 283 tlb->pte0 = pte0; 284 tlb->pte1 = pte1; 285 tlb->EPN = EPN; 286 /* Store last way for LRU mechanism */ 287 env->last_way = way; 288 } 289 290 static int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx, 291 target_ulong eaddr, MMUAccessType access_type) 292 { 293 ppc6xx_tlb_t *tlb; 294 int nr, best, way; 295 int ret; 296 297 best = -1; 298 ret = -1; /* No TLB found */ 299 for (way = 0; way < env->nb_ways; way++) { 300 nr = ppc6xx_tlb_getnum(env, eaddr, way, access_type == MMU_INST_FETCH); 301 tlb = &env->tlb.tlb6[nr]; 302 /* This test "emulates" the PTE index match for hardware TLBs */ 303 if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) { 304 LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx " " TARGET_FMT_lx 305 "] <> " TARGET_FMT_lx "\n", nr, env->nb_tlb, 306 pte_is_valid(tlb->pte0) ? "valid" : "inval", 307 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr); 308 continue; 309 } 310 LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx " <> " TARGET_FMT_lx " " 311 TARGET_FMT_lx " %c %c\n", nr, env->nb_tlb, 312 pte_is_valid(tlb->pte0) ? "valid" : "inval", 313 tlb->EPN, eaddr, tlb->pte1, 314 access_type == MMU_DATA_STORE ? 'S' : 'L', 315 access_type == MMU_INST_FETCH ? 'I' : 'D'); 316 switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1, 317 0, access_type)) { 318 case -3: 319 /* TLB inconsistency */ 320 return -1; 321 case -2: 322 /* Access violation */ 323 ret = -2; 324 best = nr; 325 break; 326 case -1: 327 default: 328 /* No match */ 329 break; 330 case 0: 331 /* access granted */ 332 /* 333 * XXX: we should go on looping to check all TLBs 334 * consistency but we can speed-up the whole thing as 335 * the result would be undefined if TLBs are not 336 * consistent. 337 */ 338 ret = 0; 339 best = nr; 340 goto done; 341 } 342 } 343 if (best != -1) { 344 done: 345 LOG_SWTLB("found TLB at addr " TARGET_FMT_plx " prot=%01x ret=%d\n", 346 ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret); 347 /* Update page flags */ 348 pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, access_type); 349 } 350 351 return ret; 352 } 353 354 /* Perform BAT hit & translation */ 355 static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp, 356 int *validp, int *protp, target_ulong *BATu, 357 target_ulong *BATl) 358 { 359 target_ulong bl; 360 int pp, valid, prot; 361 362 bl = (*BATu & 0x00001FFC) << 15; 363 valid = 0; 364 prot = 0; 365 if (((msr_pr == 0) && (*BATu & 0x00000002)) || 366 ((msr_pr != 0) && (*BATu & 0x00000001))) { 367 valid = 1; 368 pp = *BATl & 0x00000003; 369 if (pp != 0) { 370 prot = PAGE_READ | PAGE_EXEC; 371 if (pp == 0x2) { 372 prot |= PAGE_WRITE; 373 } 374 } 375 } 376 *blp = bl; 377 *validp = valid; 378 *protp = prot; 379 } 380 381 static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, 382 target_ulong virtual, MMUAccessType access_type) 383 { 384 target_ulong *BATlt, *BATut, *BATu, *BATl; 385 target_ulong BEPIl, BEPIu, bl; 386 int i, valid, prot; 387 int ret = -1; 388 bool ifetch = access_type == MMU_INST_FETCH; 389 390 LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__, 391 ifetch ? 'I' : 'D', virtual); 392 if (ifetch) { 393 BATlt = env->IBAT[1]; 394 BATut = env->IBAT[0]; 395 } else { 396 BATlt = env->DBAT[1]; 397 BATut = env->DBAT[0]; 398 } 399 for (i = 0; i < env->nb_BATs; i++) { 400 BATu = &BATut[i]; 401 BATl = &BATlt[i]; 402 BEPIu = *BATu & 0xF0000000; 403 BEPIl = *BATu & 0x0FFE0000; 404 bat_size_prot(env, &bl, &valid, &prot, BATu, BATl); 405 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx 406 " BATl " TARGET_FMT_lx "\n", __func__, 407 ifetch ? 'I' : 'D', i, virtual, *BATu, *BATl); 408 if ((virtual & 0xF0000000) == BEPIu && 409 ((virtual & 0x0FFE0000) & ~bl) == BEPIl) { 410 /* BAT matches */ 411 if (valid != 0) { 412 /* Get physical address */ 413 ctx->raddr = (*BATl & 0xF0000000) | 414 ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) | 415 (virtual & 0x0001F000); 416 /* Compute access rights */ 417 ctx->prot = prot; 418 ret = check_prot(ctx->prot, access_type); 419 if (ret == 0) { 420 LOG_BATS("BAT %d match: r " TARGET_FMT_plx " prot=%c%c\n", 421 i, ctx->raddr, ctx->prot & PAGE_READ ? 'R' : '-', 422 ctx->prot & PAGE_WRITE ? 'W' : '-'); 423 } 424 break; 425 } 426 } 427 } 428 if (ret < 0) { 429 #if defined(DEBUG_BATS) 430 if (qemu_log_enabled()) { 431 LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", virtual); 432 for (i = 0; i < 4; i++) { 433 BATu = &BATut[i]; 434 BATl = &BATlt[i]; 435 BEPIu = *BATu & 0xF0000000; 436 BEPIl = *BATu & 0x0FFE0000; 437 bl = (*BATu & 0x00001FFC) << 15; 438 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx 439 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " 440 TARGET_FMT_lx " " TARGET_FMT_lx "\n", 441 __func__, ifetch ? 'I' : 'D', i, virtual, 442 *BATu, *BATl, BEPIu, BEPIl, bl); 443 } 444 } 445 #endif 446 } 447 /* No hit */ 448 return ret; 449 } 450 451 /* Perform segment based translation */ 452 static int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, 453 target_ulong eaddr, MMUAccessType access_type, 454 int type) 455 { 456 PowerPCCPU *cpu = env_archcpu(env); 457 hwaddr hash; 458 target_ulong vsid; 459 int ds, pr, target_page_bits; 460 int ret; 461 target_ulong sr, pgidx; 462 463 pr = msr_pr; 464 ctx->eaddr = eaddr; 465 466 sr = env->sr[eaddr >> 28]; 467 ctx->key = (((sr & 0x20000000) && (pr != 0)) || 468 ((sr & 0x40000000) && (pr == 0))) ? 1 : 0; 469 ds = sr & 0x80000000 ? 1 : 0; 470 ctx->nx = sr & 0x10000000 ? 1 : 0; 471 vsid = sr & 0x00FFFFFF; 472 target_page_bits = TARGET_PAGE_BITS; 473 qemu_log_mask(CPU_LOG_MMU, 474 "Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx 475 " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx 476 " ir=%d dr=%d pr=%d %d t=%d\n", 477 eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir, 478 (int)msr_dr, pr != 0 ? 1 : 0, access_type == MMU_DATA_STORE, type); 479 pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits; 480 hash = vsid ^ pgidx; 481 ctx->ptem = (vsid << 7) | (pgidx >> 10); 482 483 qemu_log_mask(CPU_LOG_MMU, 484 "pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n", 485 ctx->key, ds, ctx->nx, vsid); 486 ret = -1; 487 if (!ds) { 488 /* Check if instruction fetch is allowed, if needed */ 489 if (type != ACCESS_CODE || ctx->nx == 0) { 490 /* Page address translation */ 491 qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx 492 " htab_mask " TARGET_FMT_plx 493 " hash " TARGET_FMT_plx "\n", 494 ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash); 495 ctx->hash[0] = hash; 496 ctx->hash[1] = ~hash; 497 498 /* Initialize real address with an invalid value */ 499 ctx->raddr = (hwaddr)-1ULL; 500 /* Software TLB search */ 501 ret = ppc6xx_tlb_check(env, ctx, eaddr, access_type); 502 #if defined(DUMP_PAGE_TABLES) 503 if (qemu_loglevel_mask(CPU_LOG_MMU)) { 504 CPUState *cs = env_cpu(env); 505 hwaddr curaddr; 506 uint32_t a0, a1, a2, a3; 507 508 qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx 509 "\n", ppc_hash32_hpt_base(cpu), 510 ppc_hash32_hpt_mask(env) + 0x80); 511 for (curaddr = ppc_hash32_hpt_base(cpu); 512 curaddr < (ppc_hash32_hpt_base(cpu) 513 + ppc_hash32_hpt_mask(cpu) + 0x80); 514 curaddr += 16) { 515 a0 = ldl_phys(cs->as, curaddr); 516 a1 = ldl_phys(cs->as, curaddr + 4); 517 a2 = ldl_phys(cs->as, curaddr + 8); 518 a3 = ldl_phys(cs->as, curaddr + 12); 519 if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) { 520 qemu_log(TARGET_FMT_plx ": %08x %08x %08x %08x\n", 521 curaddr, a0, a1, a2, a3); 522 } 523 } 524 } 525 #endif 526 } else { 527 qemu_log_mask(CPU_LOG_MMU, "No access allowed\n"); 528 ret = -3; 529 } 530 } else { 531 target_ulong sr; 532 533 qemu_log_mask(CPU_LOG_MMU, "direct store...\n"); 534 /* Direct-store segment : absolutely *BUGGY* for now */ 535 536 /* 537 * Direct-store implies a 32-bit MMU. 538 * Check the Segment Register's bus unit ID (BUID). 539 */ 540 sr = env->sr[eaddr >> 28]; 541 if ((sr & 0x1FF00000) >> 20 == 0x07f) { 542 /* 543 * Memory-forced I/O controller interface access 544 * 545 * If T=1 and BUID=x'07F', the 601 performs a memory 546 * access to SR[28-31] LA[4-31], bypassing all protection 547 * mechanisms. 548 */ 549 ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF); 550 ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 551 return 0; 552 } 553 554 switch (type) { 555 case ACCESS_INT: 556 /* Integer load/store : only access allowed */ 557 break; 558 case ACCESS_CODE: 559 /* No code fetch is allowed in direct-store areas */ 560 return -4; 561 case ACCESS_FLOAT: 562 /* Floating point load/store */ 563 return -4; 564 case ACCESS_RES: 565 /* lwarx, ldarx or srwcx. */ 566 return -4; 567 case ACCESS_CACHE: 568 /* 569 * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi 570 * 571 * Should make the instruction do no-op. As it already do 572 * no-op, it's quite easy :-) 573 */ 574 ctx->raddr = eaddr; 575 return 0; 576 case ACCESS_EXT: 577 /* eciwx or ecowx */ 578 return -4; 579 default: 580 qemu_log_mask(CPU_LOG_MMU, "ERROR: instruction should not need " 581 "address translation\n"); 582 return -4; 583 } 584 if ((access_type == MMU_DATA_STORE || ctx->key != 1) && 585 (access_type == MMU_DATA_LOAD || ctx->key != 0)) { 586 ctx->raddr = eaddr; 587 ret = 2; 588 } else { 589 ret = -2; 590 } 591 } 592 593 return ret; 594 } 595 596 /* Generic TLB check function for embedded PowerPC implementations */ 597 static int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb, 598 hwaddr *raddrp, 599 target_ulong address, uint32_t pid, int ext, 600 int i) 601 { 602 target_ulong mask; 603 604 /* Check valid flag */ 605 if (!(tlb->prot & PAGE_VALID)) { 606 return -1; 607 } 608 mask = ~(tlb->size - 1); 609 LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx " PID %u <=> " TARGET_FMT_lx 610 " " TARGET_FMT_lx " %u %x\n", __func__, i, address, pid, tlb->EPN, 611 mask, (uint32_t)tlb->PID, tlb->prot); 612 /* Check PID */ 613 if (tlb->PID != 0 && tlb->PID != pid) { 614 return -1; 615 } 616 /* Check effective address */ 617 if ((address & mask) != tlb->EPN) { 618 return -1; 619 } 620 *raddrp = (tlb->RPN & mask) | (address & ~mask); 621 if (ext) { 622 /* Extend the physical address to 36 bits */ 623 *raddrp |= (uint64_t)(tlb->RPN & 0xF) << 32; 624 } 625 626 return 0; 627 } 628 629 /* Generic TLB search function for PowerPC embedded implementations */ 630 static int ppcemb_tlb_search(CPUPPCState *env, target_ulong address, 631 uint32_t pid) 632 { 633 ppcemb_tlb_t *tlb; 634 hwaddr raddr; 635 int i, ret; 636 637 /* Default return value is no match */ 638 ret = -1; 639 for (i = 0; i < env->nb_tlb; i++) { 640 tlb = &env->tlb.tlbe[i]; 641 if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, 0, i) == 0) { 642 ret = i; 643 break; 644 } 645 } 646 647 return ret; 648 } 649 650 /* Helpers specific to PowerPC 40x implementations */ 651 static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env) 652 { 653 ppcemb_tlb_t *tlb; 654 int i; 655 656 for (i = 0; i < env->nb_tlb; i++) { 657 tlb = &env->tlb.tlbe[i]; 658 tlb->prot &= ~PAGE_VALID; 659 } 660 tlb_flush(env_cpu(env)); 661 } 662 663 static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 664 target_ulong address, 665 MMUAccessType access_type) 666 { 667 ppcemb_tlb_t *tlb; 668 hwaddr raddr; 669 int i, ret, zsel, zpr, pr; 670 671 ret = -1; 672 raddr = (hwaddr)-1ULL; 673 pr = msr_pr; 674 for (i = 0; i < env->nb_tlb; i++) { 675 tlb = &env->tlb.tlbe[i]; 676 if (ppcemb_tlb_check(env, tlb, &raddr, address, 677 env->spr[SPR_40x_PID], 0, i) < 0) { 678 continue; 679 } 680 zsel = (tlb->attr >> 4) & 0xF; 681 zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3; 682 LOG_SWTLB("%s: TLB %d zsel %d zpr %d ty %d attr %08x\n", 683 __func__, i, zsel, zpr, access_type, tlb->attr); 684 /* Check execute enable bit */ 685 switch (zpr) { 686 case 0x2: 687 if (pr != 0) { 688 goto check_perms; 689 } 690 /* fall through */ 691 case 0x3: 692 /* All accesses granted */ 693 ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 694 ret = 0; 695 break; 696 case 0x0: 697 if (pr != 0) { 698 /* Raise Zone protection fault. */ 699 env->spr[SPR_40x_ESR] = 1 << 22; 700 ctx->prot = 0; 701 ret = -2; 702 break; 703 } 704 /* fall through */ 705 case 0x1: 706 check_perms: 707 /* Check from TLB entry */ 708 ctx->prot = tlb->prot; 709 ret = check_prot(ctx->prot, access_type); 710 if (ret == -2) { 711 env->spr[SPR_40x_ESR] = 0; 712 } 713 break; 714 } 715 if (ret >= 0) { 716 ctx->raddr = raddr; 717 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx 718 " %d %d\n", __func__, address, ctx->raddr, ctx->prot, 719 ret); 720 return 0; 721 } 722 } 723 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx 724 " %d %d\n", __func__, address, raddr, ctx->prot, ret); 725 726 return ret; 727 } 728 729 void store_40x_sler(CPUPPCState *env, uint32_t val) 730 { 731 /* XXX: TO BE FIXED */ 732 if (val != 0x00000000) { 733 cpu_abort(env_cpu(env), 734 "Little-endian regions are not supported by now\n"); 735 } 736 env->spr[SPR_405_SLER] = val; 737 } 738 739 static int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb, 740 hwaddr *raddr, int *prot, target_ulong address, 741 MMUAccessType access_type, int i) 742 { 743 int prot2; 744 745 if (ppcemb_tlb_check(env, tlb, raddr, address, 746 env->spr[SPR_BOOKE_PID], 747 !env->nb_pids, i) >= 0) { 748 goto found_tlb; 749 } 750 751 if (env->spr[SPR_BOOKE_PID1] && 752 ppcemb_tlb_check(env, tlb, raddr, address, 753 env->spr[SPR_BOOKE_PID1], 0, i) >= 0) { 754 goto found_tlb; 755 } 756 757 if (env->spr[SPR_BOOKE_PID2] && 758 ppcemb_tlb_check(env, tlb, raddr, address, 759 env->spr[SPR_BOOKE_PID2], 0, i) >= 0) { 760 goto found_tlb; 761 } 762 763 LOG_SWTLB("%s: TLB entry not found\n", __func__); 764 return -1; 765 766 found_tlb: 767 768 if (msr_pr != 0) { 769 prot2 = tlb->prot & 0xF; 770 } else { 771 prot2 = (tlb->prot >> 4) & 0xF; 772 } 773 774 /* Check the address space */ 775 if ((access_type == MMU_INST_FETCH ? msr_ir : msr_dr) != (tlb->attr & 1)) { 776 LOG_SWTLB("%s: AS doesn't match\n", __func__); 777 return -1; 778 } 779 780 *prot = prot2; 781 if (prot2 & prot_for_access_type(access_type)) { 782 LOG_SWTLB("%s: good TLB!\n", __func__); 783 return 0; 784 } 785 786 LOG_SWTLB("%s: no prot match: %x\n", __func__, prot2); 787 return access_type == MMU_INST_FETCH ? -3 : -2; 788 } 789 790 static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 791 target_ulong address, 792 MMUAccessType access_type) 793 { 794 ppcemb_tlb_t *tlb; 795 hwaddr raddr; 796 int i, ret; 797 798 ret = -1; 799 raddr = (hwaddr)-1ULL; 800 for (i = 0; i < env->nb_tlb; i++) { 801 tlb = &env->tlb.tlbe[i]; 802 ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address, 803 access_type, i); 804 if (ret != -1) { 805 break; 806 } 807 } 808 809 if (ret >= 0) { 810 ctx->raddr = raddr; 811 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx 812 " %d %d\n", __func__, address, ctx->raddr, ctx->prot, 813 ret); 814 } else { 815 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx 816 " %d %d\n", __func__, address, raddr, ctx->prot, ret); 817 } 818 819 return ret; 820 } 821 822 static void booke206_flush_tlb(CPUPPCState *env, int flags, 823 const int check_iprot) 824 { 825 int tlb_size; 826 int i, j; 827 ppcmas_tlb_t *tlb = env->tlb.tlbm; 828 829 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 830 if (flags & (1 << i)) { 831 tlb_size = booke206_tlb_size(env, i); 832 for (j = 0; j < tlb_size; j++) { 833 if (!check_iprot || !(tlb[j].mas1 & MAS1_IPROT)) { 834 tlb[j].mas1 &= ~MAS1_VALID; 835 } 836 } 837 } 838 tlb += booke206_tlb_size(env, i); 839 } 840 841 tlb_flush(env_cpu(env)); 842 } 843 844 static hwaddr booke206_tlb_to_page_size(CPUPPCState *env, 845 ppcmas_tlb_t *tlb) 846 { 847 int tlbm_size; 848 849 tlbm_size = (tlb->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 850 851 return 1024ULL << tlbm_size; 852 } 853 854 /* TLB check function for MAS based SoftTLBs */ 855 static int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb, 856 hwaddr *raddrp, target_ulong address, 857 uint32_t pid) 858 { 859 hwaddr mask; 860 uint32_t tlb_pid; 861 862 if (!msr_cm) { 863 /* In 32bit mode we can only address 32bit EAs */ 864 address = (uint32_t)address; 865 } 866 867 /* Check valid flag */ 868 if (!(tlb->mas1 & MAS1_VALID)) { 869 return -1; 870 } 871 872 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); 873 LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx " PID=0x%x MAS1=0x%x MAS2=0x%" 874 PRIx64 " mask=0x%" HWADDR_PRIx " MAS7_3=0x%" PRIx64 " MAS8=0x%" 875 PRIx32 "\n", __func__, address, pid, tlb->mas1, tlb->mas2, mask, 876 tlb->mas7_3, tlb->mas8); 877 878 /* Check PID */ 879 tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT; 880 if (tlb_pid != 0 && tlb_pid != pid) { 881 return -1; 882 } 883 884 /* Check effective address */ 885 if ((address & mask) != (tlb->mas2 & MAS2_EPN_MASK)) { 886 return -1; 887 } 888 889 if (raddrp) { 890 *raddrp = (tlb->mas7_3 & mask) | (address & ~mask); 891 } 892 893 return 0; 894 } 895 896 static bool is_epid_mmu(int mmu_idx) 897 { 898 return mmu_idx == PPC_TLB_EPID_STORE || mmu_idx == PPC_TLB_EPID_LOAD; 899 } 900 901 static uint32_t mmubooke206_esr(int mmu_idx, MMUAccessType access_type) 902 { 903 uint32_t esr = 0; 904 if (access_type == MMU_DATA_STORE) { 905 esr |= ESR_ST; 906 } 907 if (is_epid_mmu(mmu_idx)) { 908 esr |= ESR_EPID; 909 } 910 return esr; 911 } 912 913 /* 914 * Get EPID register given the mmu_idx. If this is regular load, 915 * construct the EPID access bits from current processor state 916 * 917 * Get the effective AS and PR bits and the PID. The PID is returned 918 * only if EPID load is requested, otherwise the caller must detect 919 * the correct EPID. Return true if valid EPID is returned. 920 */ 921 static bool mmubooke206_get_as(CPUPPCState *env, 922 int mmu_idx, uint32_t *epid_out, 923 bool *as_out, bool *pr_out) 924 { 925 if (is_epid_mmu(mmu_idx)) { 926 uint32_t epidr; 927 if (mmu_idx == PPC_TLB_EPID_STORE) { 928 epidr = env->spr[SPR_BOOKE_EPSC]; 929 } else { 930 epidr = env->spr[SPR_BOOKE_EPLC]; 931 } 932 *epid_out = (epidr & EPID_EPID) >> EPID_EPID_SHIFT; 933 *as_out = !!(epidr & EPID_EAS); 934 *pr_out = !!(epidr & EPID_EPR); 935 return true; 936 } else { 937 *as_out = msr_ds; 938 *pr_out = msr_pr; 939 return false; 940 } 941 } 942 943 /* Check if the tlb found by hashing really matches */ 944 static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb, 945 hwaddr *raddr, int *prot, 946 target_ulong address, 947 MMUAccessType access_type, int mmu_idx) 948 { 949 int prot2 = 0; 950 uint32_t epid; 951 bool as, pr; 952 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); 953 954 if (!use_epid) { 955 if (ppcmas_tlb_check(env, tlb, raddr, address, 956 env->spr[SPR_BOOKE_PID]) >= 0) { 957 goto found_tlb; 958 } 959 960 if (env->spr[SPR_BOOKE_PID1] && 961 ppcmas_tlb_check(env, tlb, raddr, address, 962 env->spr[SPR_BOOKE_PID1]) >= 0) { 963 goto found_tlb; 964 } 965 966 if (env->spr[SPR_BOOKE_PID2] && 967 ppcmas_tlb_check(env, tlb, raddr, address, 968 env->spr[SPR_BOOKE_PID2]) >= 0) { 969 goto found_tlb; 970 } 971 } else { 972 if (ppcmas_tlb_check(env, tlb, raddr, address, epid) >= 0) { 973 goto found_tlb; 974 } 975 } 976 977 LOG_SWTLB("%s: TLB entry not found\n", __func__); 978 return -1; 979 980 found_tlb: 981 982 if (pr) { 983 if (tlb->mas7_3 & MAS3_UR) { 984 prot2 |= PAGE_READ; 985 } 986 if (tlb->mas7_3 & MAS3_UW) { 987 prot2 |= PAGE_WRITE; 988 } 989 if (tlb->mas7_3 & MAS3_UX) { 990 prot2 |= PAGE_EXEC; 991 } 992 } else { 993 if (tlb->mas7_3 & MAS3_SR) { 994 prot2 |= PAGE_READ; 995 } 996 if (tlb->mas7_3 & MAS3_SW) { 997 prot2 |= PAGE_WRITE; 998 } 999 if (tlb->mas7_3 & MAS3_SX) { 1000 prot2 |= PAGE_EXEC; 1001 } 1002 } 1003 1004 /* Check the address space and permissions */ 1005 if (access_type == MMU_INST_FETCH) { 1006 /* There is no way to fetch code using epid load */ 1007 assert(!use_epid); 1008 as = msr_ir; 1009 } 1010 1011 if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { 1012 LOG_SWTLB("%s: AS doesn't match\n", __func__); 1013 return -1; 1014 } 1015 1016 *prot = prot2; 1017 if (prot2 & prot_for_access_type(access_type)) { 1018 LOG_SWTLB("%s: good TLB!\n", __func__); 1019 return 0; 1020 } 1021 1022 LOG_SWTLB("%s: no prot match: %x\n", __func__, prot2); 1023 return access_type == MMU_INST_FETCH ? -3 : -2; 1024 } 1025 1026 static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 1027 target_ulong address, 1028 MMUAccessType access_type, 1029 int mmu_idx) 1030 { 1031 ppcmas_tlb_t *tlb; 1032 hwaddr raddr; 1033 int i, j, ret; 1034 1035 ret = -1; 1036 raddr = (hwaddr)-1ULL; 1037 1038 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 1039 int ways = booke206_tlb_ways(env, i); 1040 1041 for (j = 0; j < ways; j++) { 1042 tlb = booke206_get_tlbm(env, i, address, j); 1043 if (!tlb) { 1044 continue; 1045 } 1046 ret = mmubooke206_check_tlb(env, tlb, &raddr, &ctx->prot, address, 1047 access_type, mmu_idx); 1048 if (ret != -1) { 1049 goto found_tlb; 1050 } 1051 } 1052 } 1053 1054 found_tlb: 1055 1056 if (ret >= 0) { 1057 ctx->raddr = raddr; 1058 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx 1059 " %d %d\n", __func__, address, ctx->raddr, ctx->prot, 1060 ret); 1061 } else { 1062 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx 1063 " %d %d\n", __func__, address, raddr, ctx->prot, ret); 1064 } 1065 1066 return ret; 1067 } 1068 1069 static const char *book3e_tsize_to_str[32] = { 1070 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", 1071 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", 1072 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G", 1073 "1T", "2T" 1074 }; 1075 1076 static void mmubooke_dump_mmu(CPUPPCState *env) 1077 { 1078 ppcemb_tlb_t *entry; 1079 int i; 1080 1081 if (kvm_enabled() && !env->kvm_sw_tlb) { 1082 qemu_printf("Cannot access KVM TLB\n"); 1083 return; 1084 } 1085 1086 qemu_printf("\nTLB:\n"); 1087 qemu_printf("Effective Physical Size PID Prot " 1088 "Attr\n"); 1089 1090 entry = &env->tlb.tlbe[0]; 1091 for (i = 0; i < env->nb_tlb; i++, entry++) { 1092 hwaddr ea, pa; 1093 target_ulong mask; 1094 uint64_t size = (uint64_t)entry->size; 1095 char size_buf[20]; 1096 1097 /* Check valid flag */ 1098 if (!(entry->prot & PAGE_VALID)) { 1099 continue; 1100 } 1101 1102 mask = ~(entry->size - 1); 1103 ea = entry->EPN & mask; 1104 pa = entry->RPN & mask; 1105 /* Extend the physical address to 36 bits */ 1106 pa |= (hwaddr)(entry->RPN & 0xF) << 32; 1107 if (size >= 1 * MiB) { 1108 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "M", size / MiB); 1109 } else { 1110 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "k", size / KiB); 1111 } 1112 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n", 1113 (uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID, 1114 entry->prot, entry->attr); 1115 } 1116 1117 } 1118 1119 static void mmubooke206_dump_one_tlb(CPUPPCState *env, int tlbn, int offset, 1120 int tlbsize) 1121 { 1122 ppcmas_tlb_t *entry; 1123 int i; 1124 1125 qemu_printf("\nTLB%d:\n", tlbn); 1126 qemu_printf("Effective Physical Size TID TS SRWX" 1127 " URWX WIMGE U0123\n"); 1128 1129 entry = &env->tlb.tlbm[offset]; 1130 for (i = 0; i < tlbsize; i++, entry++) { 1131 hwaddr ea, pa, size; 1132 int tsize; 1133 1134 if (!(entry->mas1 & MAS1_VALID)) { 1135 continue; 1136 } 1137 1138 tsize = (entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 1139 size = 1024ULL << tsize; 1140 ea = entry->mas2 & ~(size - 1); 1141 pa = entry->mas7_3 & ~(size - 1); 1142 1143 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c" 1144 "U%c%c%c %c%c%c%c%c U%c%c%c%c\n", 1145 (uint64_t)ea, (uint64_t)pa, 1146 book3e_tsize_to_str[tsize], 1147 (entry->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT, 1148 (entry->mas1 & MAS1_TS) >> MAS1_TS_SHIFT, 1149 entry->mas7_3 & MAS3_SR ? 'R' : '-', 1150 entry->mas7_3 & MAS3_SW ? 'W' : '-', 1151 entry->mas7_3 & MAS3_SX ? 'X' : '-', 1152 entry->mas7_3 & MAS3_UR ? 'R' : '-', 1153 entry->mas7_3 & MAS3_UW ? 'W' : '-', 1154 entry->mas7_3 & MAS3_UX ? 'X' : '-', 1155 entry->mas2 & MAS2_W ? 'W' : '-', 1156 entry->mas2 & MAS2_I ? 'I' : '-', 1157 entry->mas2 & MAS2_M ? 'M' : '-', 1158 entry->mas2 & MAS2_G ? 'G' : '-', 1159 entry->mas2 & MAS2_E ? 'E' : '-', 1160 entry->mas7_3 & MAS3_U0 ? '0' : '-', 1161 entry->mas7_3 & MAS3_U1 ? '1' : '-', 1162 entry->mas7_3 & MAS3_U2 ? '2' : '-', 1163 entry->mas7_3 & MAS3_U3 ? '3' : '-'); 1164 } 1165 } 1166 1167 static void mmubooke206_dump_mmu(CPUPPCState *env) 1168 { 1169 int offset = 0; 1170 int i; 1171 1172 if (kvm_enabled() && !env->kvm_sw_tlb) { 1173 qemu_printf("Cannot access KVM TLB\n"); 1174 return; 1175 } 1176 1177 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 1178 int size = booke206_tlb_size(env, i); 1179 1180 if (size == 0) { 1181 continue; 1182 } 1183 1184 mmubooke206_dump_one_tlb(env, i, offset, size); 1185 offset += size; 1186 } 1187 } 1188 1189 static void mmu6xx_dump_BATs(CPUPPCState *env, int type) 1190 { 1191 target_ulong *BATlt, *BATut, *BATu, *BATl; 1192 target_ulong BEPIl, BEPIu, bl; 1193 int i; 1194 1195 switch (type) { 1196 case ACCESS_CODE: 1197 BATlt = env->IBAT[1]; 1198 BATut = env->IBAT[0]; 1199 break; 1200 default: 1201 BATlt = env->DBAT[1]; 1202 BATut = env->DBAT[0]; 1203 break; 1204 } 1205 1206 for (i = 0; i < env->nb_BATs; i++) { 1207 BATu = &BATut[i]; 1208 BATl = &BATlt[i]; 1209 BEPIu = *BATu & 0xF0000000; 1210 BEPIl = *BATu & 0x0FFE0000; 1211 bl = (*BATu & 0x00001FFC) << 15; 1212 qemu_printf("%s BAT%d BATu " TARGET_FMT_lx 1213 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " 1214 TARGET_FMT_lx " " TARGET_FMT_lx "\n", 1215 type == ACCESS_CODE ? "code" : "data", i, 1216 *BATu, *BATl, BEPIu, BEPIl, bl); 1217 } 1218 } 1219 1220 static void mmu6xx_dump_mmu(CPUPPCState *env) 1221 { 1222 PowerPCCPU *cpu = env_archcpu(env); 1223 ppc6xx_tlb_t *tlb; 1224 target_ulong sr; 1225 int type, way, entry, i; 1226 1227 qemu_printf("HTAB base = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_base(cpu)); 1228 qemu_printf("HTAB mask = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_mask(cpu)); 1229 1230 qemu_printf("\nSegment registers:\n"); 1231 for (i = 0; i < 32; i++) { 1232 sr = env->sr[i]; 1233 if (sr & 0x80000000) { 1234 qemu_printf("%02d T=%d Ks=%d Kp=%d BUID=0x%03x " 1235 "CNTLR_SPEC=0x%05x\n", i, 1236 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, 1237 sr & 0x20000000 ? 1 : 0, (uint32_t)((sr >> 20) & 0x1FF), 1238 (uint32_t)(sr & 0xFFFFF)); 1239 } else { 1240 qemu_printf("%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i, 1241 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, 1242 sr & 0x20000000 ? 1 : 0, sr & 0x10000000 ? 1 : 0, 1243 (uint32_t)(sr & 0x00FFFFFF)); 1244 } 1245 } 1246 1247 qemu_printf("\nBATs:\n"); 1248 mmu6xx_dump_BATs(env, ACCESS_INT); 1249 mmu6xx_dump_BATs(env, ACCESS_CODE); 1250 1251 if (env->id_tlbs != 1) { 1252 qemu_printf("ERROR: 6xx MMU should have separated TLB" 1253 " for code and data\n"); 1254 } 1255 1256 qemu_printf("\nTLBs [EPN EPN + SIZE]\n"); 1257 1258 for (type = 0; type < 2; type++) { 1259 for (way = 0; way < env->nb_ways; way++) { 1260 for (entry = env->nb_tlb * type + env->tlb_per_way * way; 1261 entry < (env->nb_tlb * type + env->tlb_per_way * (way + 1)); 1262 entry++) { 1263 1264 tlb = &env->tlb.tlb6[entry]; 1265 qemu_printf("%s TLB %02d/%02d way:%d %s [" 1266 TARGET_FMT_lx " " TARGET_FMT_lx "]\n", 1267 type ? "code" : "data", entry % env->nb_tlb, 1268 env->nb_tlb, way, 1269 pte_is_valid(tlb->pte0) ? "valid" : "inval", 1270 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE); 1271 } 1272 } 1273 } 1274 } 1275 1276 void dump_mmu(CPUPPCState *env) 1277 { 1278 switch (env->mmu_model) { 1279 case POWERPC_MMU_BOOKE: 1280 mmubooke_dump_mmu(env); 1281 break; 1282 case POWERPC_MMU_BOOKE206: 1283 mmubooke206_dump_mmu(env); 1284 break; 1285 case POWERPC_MMU_SOFT_6xx: 1286 case POWERPC_MMU_SOFT_74xx: 1287 mmu6xx_dump_mmu(env); 1288 break; 1289 #if defined(TARGET_PPC64) 1290 case POWERPC_MMU_64B: 1291 case POWERPC_MMU_2_03: 1292 case POWERPC_MMU_2_06: 1293 case POWERPC_MMU_2_07: 1294 dump_slb(env_archcpu(env)); 1295 break; 1296 case POWERPC_MMU_3_00: 1297 if (ppc64_v3_radix(env_archcpu(env))) { 1298 qemu_log_mask(LOG_UNIMP, "%s: the PPC64 MMU is unsupported\n", 1299 __func__); 1300 } else { 1301 dump_slb(env_archcpu(env)); 1302 } 1303 break; 1304 #endif 1305 default: 1306 qemu_log_mask(LOG_UNIMP, "%s: unimplemented\n", __func__); 1307 } 1308 } 1309 1310 static int check_physical(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr, 1311 MMUAccessType access_type) 1312 { 1313 int in_plb, ret; 1314 1315 ctx->raddr = eaddr; 1316 ctx->prot = PAGE_READ | PAGE_EXEC; 1317 ret = 0; 1318 switch (env->mmu_model) { 1319 case POWERPC_MMU_SOFT_6xx: 1320 case POWERPC_MMU_SOFT_74xx: 1321 case POWERPC_MMU_SOFT_4xx: 1322 case POWERPC_MMU_REAL: 1323 case POWERPC_MMU_BOOKE: 1324 ctx->prot |= PAGE_WRITE; 1325 break; 1326 1327 case POWERPC_MMU_SOFT_4xx_Z: 1328 if (unlikely(msr_pe != 0)) { 1329 /* 1330 * 403 family add some particular protections, using 1331 * PBL/PBU registers for accesses with no translation. 1332 */ 1333 in_plb = 1334 /* Check PLB validity */ 1335 (env->pb[0] < env->pb[1] && 1336 /* and address in plb area */ 1337 eaddr >= env->pb[0] && eaddr < env->pb[1]) || 1338 (env->pb[2] < env->pb[3] && 1339 eaddr >= env->pb[2] && eaddr < env->pb[3]) ? 1 : 0; 1340 if (in_plb ^ msr_px) { 1341 /* Access in protected area */ 1342 if (access_type == MMU_DATA_STORE) { 1343 /* Access is not allowed */ 1344 ret = -2; 1345 } 1346 } else { 1347 /* Read-write access is allowed */ 1348 ctx->prot |= PAGE_WRITE; 1349 } 1350 } 1351 break; 1352 1353 default: 1354 /* Caller's checks mean we should never get here for other models */ 1355 abort(); 1356 return -1; 1357 } 1358 1359 return ret; 1360 } 1361 1362 static int get_physical_address_wtlb(CPUPPCState *env, mmu_ctx_t *ctx, 1363 target_ulong eaddr, 1364 MMUAccessType access_type, int type, 1365 int mmu_idx) 1366 { 1367 int ret = -1; 1368 bool real_mode = (type == ACCESS_CODE && msr_ir == 0) 1369 || (type != ACCESS_CODE && msr_dr == 0); 1370 1371 switch (env->mmu_model) { 1372 case POWERPC_MMU_SOFT_6xx: 1373 case POWERPC_MMU_SOFT_74xx: 1374 if (real_mode) { 1375 ret = check_physical(env, ctx, eaddr, access_type); 1376 } else { 1377 /* Try to find a BAT */ 1378 if (env->nb_BATs != 0) { 1379 ret = get_bat_6xx_tlb(env, ctx, eaddr, access_type); 1380 } 1381 if (ret < 0) { 1382 /* We didn't match any BAT entry or don't have BATs */ 1383 ret = get_segment_6xx_tlb(env, ctx, eaddr, access_type, type); 1384 } 1385 } 1386 break; 1387 1388 case POWERPC_MMU_SOFT_4xx: 1389 case POWERPC_MMU_SOFT_4xx_Z: 1390 if (real_mode) { 1391 ret = check_physical(env, ctx, eaddr, access_type); 1392 } else { 1393 ret = mmu40x_get_physical_address(env, ctx, eaddr, access_type); 1394 } 1395 break; 1396 case POWERPC_MMU_BOOKE: 1397 ret = mmubooke_get_physical_address(env, ctx, eaddr, access_type); 1398 break; 1399 case POWERPC_MMU_BOOKE206: 1400 ret = mmubooke206_get_physical_address(env, ctx, eaddr, access_type, 1401 mmu_idx); 1402 break; 1403 case POWERPC_MMU_MPC8xx: 1404 /* XXX: TODO */ 1405 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n"); 1406 break; 1407 case POWERPC_MMU_REAL: 1408 if (real_mode) { 1409 ret = check_physical(env, ctx, eaddr, access_type); 1410 } else { 1411 cpu_abort(env_cpu(env), 1412 "PowerPC in real mode do not do any translation\n"); 1413 } 1414 return -1; 1415 default: 1416 cpu_abort(env_cpu(env), "Unknown or invalid MMU model\n"); 1417 return -1; 1418 } 1419 1420 return ret; 1421 } 1422 1423 static int get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 1424 target_ulong eaddr, MMUAccessType access_type, 1425 int type) 1426 { 1427 return get_physical_address_wtlb(env, ctx, eaddr, access_type, type, 0); 1428 } 1429 1430 hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 1431 { 1432 PowerPCCPU *cpu = POWERPC_CPU(cs); 1433 CPUPPCState *env = &cpu->env; 1434 mmu_ctx_t ctx; 1435 1436 switch (env->mmu_model) { 1437 #if defined(TARGET_PPC64) 1438 case POWERPC_MMU_64B: 1439 case POWERPC_MMU_2_03: 1440 case POWERPC_MMU_2_06: 1441 case POWERPC_MMU_2_07: 1442 return ppc_hash64_get_phys_page_debug(cpu, addr); 1443 case POWERPC_MMU_3_00: 1444 return ppc64_v3_get_phys_page_debug(cpu, addr); 1445 #endif 1446 1447 case POWERPC_MMU_32B: 1448 case POWERPC_MMU_601: 1449 return ppc_hash32_get_phys_page_debug(cpu, addr); 1450 1451 default: 1452 ; 1453 } 1454 1455 if (unlikely(get_physical_address(env, &ctx, addr, MMU_DATA_LOAD, 1456 ACCESS_INT) != 0)) { 1457 1458 /* 1459 * Some MMUs have separate TLBs for code and data. If we only 1460 * try an ACCESS_INT, we may not be able to read instructions 1461 * mapped by code TLBs, so we also try a ACCESS_CODE. 1462 */ 1463 if (unlikely(get_physical_address(env, &ctx, addr, MMU_INST_FETCH, 1464 ACCESS_CODE) != 0)) { 1465 return -1; 1466 } 1467 } 1468 1469 return ctx.raddr & TARGET_PAGE_MASK; 1470 } 1471 1472 static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address, 1473 MMUAccessType access_type, int mmu_idx) 1474 { 1475 uint32_t epid; 1476 bool as, pr; 1477 uint32_t missed_tid = 0; 1478 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); 1479 1480 if (access_type == MMU_INST_FETCH) { 1481 as = msr_ir; 1482 } 1483 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; 1484 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; 1485 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; 1486 env->spr[SPR_BOOKE_MAS3] = 0; 1487 env->spr[SPR_BOOKE_MAS6] = 0; 1488 env->spr[SPR_BOOKE_MAS7] = 0; 1489 1490 /* AS */ 1491 if (as) { 1492 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; 1493 env->spr[SPR_BOOKE_MAS6] |= MAS6_SAS; 1494 } 1495 1496 env->spr[SPR_BOOKE_MAS1] |= MAS1_VALID; 1497 env->spr[SPR_BOOKE_MAS2] |= address & MAS2_EPN_MASK; 1498 1499 if (!use_epid) { 1500 switch (env->spr[SPR_BOOKE_MAS4] & MAS4_TIDSELD_PIDZ) { 1501 case MAS4_TIDSELD_PID0: 1502 missed_tid = env->spr[SPR_BOOKE_PID]; 1503 break; 1504 case MAS4_TIDSELD_PID1: 1505 missed_tid = env->spr[SPR_BOOKE_PID1]; 1506 break; 1507 case MAS4_TIDSELD_PID2: 1508 missed_tid = env->spr[SPR_BOOKE_PID2]; 1509 break; 1510 } 1511 env->spr[SPR_BOOKE_MAS6] |= env->spr[SPR_BOOKE_PID] << 16; 1512 } else { 1513 missed_tid = epid; 1514 env->spr[SPR_BOOKE_MAS6] |= missed_tid << 16; 1515 } 1516 env->spr[SPR_BOOKE_MAS1] |= (missed_tid << MAS1_TID_SHIFT); 1517 1518 1519 /* next victim logic */ 1520 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; 1521 env->last_way++; 1522 env->last_way &= booke206_tlb_ways(env, 0) - 1; 1523 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; 1524 } 1525 1526 /* Perform address translation */ 1527 static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address, 1528 MMUAccessType access_type, int mmu_idx) 1529 { 1530 CPUState *cs = env_cpu(env); 1531 PowerPCCPU *cpu = POWERPC_CPU(cs); 1532 mmu_ctx_t ctx; 1533 int type; 1534 int ret = 0; 1535 1536 if (access_type == MMU_INST_FETCH) { 1537 /* code access */ 1538 type = ACCESS_CODE; 1539 } else { 1540 /* data access */ 1541 type = env->access_type; 1542 } 1543 ret = get_physical_address_wtlb(env, &ctx, address, access_type, 1544 type, mmu_idx); 1545 if (ret == 0) { 1546 tlb_set_page(cs, address & TARGET_PAGE_MASK, 1547 ctx.raddr & TARGET_PAGE_MASK, ctx.prot, 1548 mmu_idx, TARGET_PAGE_SIZE); 1549 ret = 0; 1550 } else if (ret < 0) { 1551 LOG_MMU_STATE(cs); 1552 if (type == ACCESS_CODE) { 1553 switch (ret) { 1554 case -1: 1555 /* No matches in page tables or TLB */ 1556 switch (env->mmu_model) { 1557 case POWERPC_MMU_SOFT_6xx: 1558 cs->exception_index = POWERPC_EXCP_IFTLB; 1559 env->error_code = 1 << 18; 1560 env->spr[SPR_IMISS] = address; 1561 env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem; 1562 goto tlb_miss; 1563 case POWERPC_MMU_SOFT_74xx: 1564 cs->exception_index = POWERPC_EXCP_IFTLB; 1565 goto tlb_miss_74xx; 1566 case POWERPC_MMU_SOFT_4xx: 1567 case POWERPC_MMU_SOFT_4xx_Z: 1568 cs->exception_index = POWERPC_EXCP_ITLB; 1569 env->error_code = 0; 1570 env->spr[SPR_40x_DEAR] = address; 1571 env->spr[SPR_40x_ESR] = 0x00000000; 1572 break; 1573 case POWERPC_MMU_BOOKE206: 1574 booke206_update_mas_tlb_miss(env, address, 2, mmu_idx); 1575 /* fall through */ 1576 case POWERPC_MMU_BOOKE: 1577 cs->exception_index = POWERPC_EXCP_ITLB; 1578 env->error_code = 0; 1579 env->spr[SPR_BOOKE_DEAR] = address; 1580 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, MMU_DATA_LOAD); 1581 return -1; 1582 case POWERPC_MMU_MPC8xx: 1583 /* XXX: TODO */ 1584 cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); 1585 break; 1586 case POWERPC_MMU_REAL: 1587 cpu_abort(cs, "PowerPC in real mode should never raise " 1588 "any MMU exceptions\n"); 1589 return -1; 1590 default: 1591 cpu_abort(cs, "Unknown or invalid MMU model\n"); 1592 return -1; 1593 } 1594 break; 1595 case -2: 1596 /* Access rights violation */ 1597 cs->exception_index = POWERPC_EXCP_ISI; 1598 env->error_code = 0x08000000; 1599 break; 1600 case -3: 1601 /* No execute protection violation */ 1602 if ((env->mmu_model == POWERPC_MMU_BOOKE) || 1603 (env->mmu_model == POWERPC_MMU_BOOKE206)) { 1604 env->spr[SPR_BOOKE_ESR] = 0x00000000; 1605 } 1606 cs->exception_index = POWERPC_EXCP_ISI; 1607 env->error_code = 0x10000000; 1608 break; 1609 case -4: 1610 /* Direct store exception */ 1611 /* No code fetch is allowed in direct-store areas */ 1612 cs->exception_index = POWERPC_EXCP_ISI; 1613 env->error_code = 0x10000000; 1614 break; 1615 } 1616 } else { 1617 switch (ret) { 1618 case -1: 1619 /* No matches in page tables or TLB */ 1620 switch (env->mmu_model) { 1621 case POWERPC_MMU_SOFT_6xx: 1622 if (access_type == MMU_DATA_STORE) { 1623 cs->exception_index = POWERPC_EXCP_DSTLB; 1624 env->error_code = 1 << 16; 1625 } else { 1626 cs->exception_index = POWERPC_EXCP_DLTLB; 1627 env->error_code = 0; 1628 } 1629 env->spr[SPR_DMISS] = address; 1630 env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem; 1631 tlb_miss: 1632 env->error_code |= ctx.key << 19; 1633 env->spr[SPR_HASH1] = ppc_hash32_hpt_base(cpu) + 1634 get_pteg_offset32(cpu, ctx.hash[0]); 1635 env->spr[SPR_HASH2] = ppc_hash32_hpt_base(cpu) + 1636 get_pteg_offset32(cpu, ctx.hash[1]); 1637 break; 1638 case POWERPC_MMU_SOFT_74xx: 1639 if (access_type == MMU_DATA_STORE) { 1640 cs->exception_index = POWERPC_EXCP_DSTLB; 1641 } else { 1642 cs->exception_index = POWERPC_EXCP_DLTLB; 1643 } 1644 tlb_miss_74xx: 1645 /* Implement LRU algorithm */ 1646 env->error_code = ctx.key << 19; 1647 env->spr[SPR_TLBMISS] = (address & ~((target_ulong)0x3)) | 1648 ((env->last_way + 1) & (env->nb_ways - 1)); 1649 env->spr[SPR_PTEHI] = 0x80000000 | ctx.ptem; 1650 break; 1651 case POWERPC_MMU_SOFT_4xx: 1652 case POWERPC_MMU_SOFT_4xx_Z: 1653 cs->exception_index = POWERPC_EXCP_DTLB; 1654 env->error_code = 0; 1655 env->spr[SPR_40x_DEAR] = address; 1656 if (access_type == MMU_DATA_STORE) { 1657 env->spr[SPR_40x_ESR] = 0x00800000; 1658 } else { 1659 env->spr[SPR_40x_ESR] = 0x00000000; 1660 } 1661 break; 1662 case POWERPC_MMU_MPC8xx: 1663 /* XXX: TODO */ 1664 cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); 1665 break; 1666 case POWERPC_MMU_BOOKE206: 1667 booke206_update_mas_tlb_miss(env, address, access_type, mmu_idx); 1668 /* fall through */ 1669 case POWERPC_MMU_BOOKE: 1670 cs->exception_index = POWERPC_EXCP_DTLB; 1671 env->error_code = 0; 1672 env->spr[SPR_BOOKE_DEAR] = address; 1673 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); 1674 return -1; 1675 case POWERPC_MMU_REAL: 1676 cpu_abort(cs, "PowerPC in real mode should never raise " 1677 "any MMU exceptions\n"); 1678 return -1; 1679 default: 1680 cpu_abort(cs, "Unknown or invalid MMU model\n"); 1681 return -1; 1682 } 1683 break; 1684 case -2: 1685 /* Access rights violation */ 1686 cs->exception_index = POWERPC_EXCP_DSI; 1687 env->error_code = 0; 1688 if (env->mmu_model == POWERPC_MMU_SOFT_4xx 1689 || env->mmu_model == POWERPC_MMU_SOFT_4xx_Z) { 1690 env->spr[SPR_40x_DEAR] = address; 1691 if (access_type == MMU_DATA_STORE) { 1692 env->spr[SPR_40x_ESR] |= 0x00800000; 1693 } 1694 } else if ((env->mmu_model == POWERPC_MMU_BOOKE) || 1695 (env->mmu_model == POWERPC_MMU_BOOKE206)) { 1696 env->spr[SPR_BOOKE_DEAR] = address; 1697 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); 1698 } else { 1699 env->spr[SPR_DAR] = address; 1700 if (access_type == MMU_DATA_STORE) { 1701 env->spr[SPR_DSISR] = 0x0A000000; 1702 } else { 1703 env->spr[SPR_DSISR] = 0x08000000; 1704 } 1705 } 1706 break; 1707 case -4: 1708 /* Direct store exception */ 1709 switch (type) { 1710 case ACCESS_FLOAT: 1711 /* Floating point load/store */ 1712 cs->exception_index = POWERPC_EXCP_ALIGN; 1713 env->error_code = POWERPC_EXCP_ALIGN_FP; 1714 env->spr[SPR_DAR] = address; 1715 break; 1716 case ACCESS_RES: 1717 /* lwarx, ldarx or stwcx. */ 1718 cs->exception_index = POWERPC_EXCP_DSI; 1719 env->error_code = 0; 1720 env->spr[SPR_DAR] = address; 1721 if (access_type == MMU_DATA_STORE) { 1722 env->spr[SPR_DSISR] = 0x06000000; 1723 } else { 1724 env->spr[SPR_DSISR] = 0x04000000; 1725 } 1726 break; 1727 case ACCESS_EXT: 1728 /* eciwx or ecowx */ 1729 cs->exception_index = POWERPC_EXCP_DSI; 1730 env->error_code = 0; 1731 env->spr[SPR_DAR] = address; 1732 if (access_type == MMU_DATA_STORE) { 1733 env->spr[SPR_DSISR] = 0x06100000; 1734 } else { 1735 env->spr[SPR_DSISR] = 0x04100000; 1736 } 1737 break; 1738 default: 1739 printf("DSI: invalid exception (%d)\n", ret); 1740 cs->exception_index = POWERPC_EXCP_PROGRAM; 1741 env->error_code = 1742 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL; 1743 env->spr[SPR_DAR] = address; 1744 break; 1745 } 1746 break; 1747 } 1748 } 1749 ret = 1; 1750 } 1751 1752 return ret; 1753 } 1754 1755 /*****************************************************************************/ 1756 /* BATs management */ 1757 #if !defined(FLUSH_ALL_TLBS) 1758 static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu, 1759 target_ulong mask) 1760 { 1761 CPUState *cs = env_cpu(env); 1762 target_ulong base, end, page; 1763 1764 base = BATu & ~0x0001FFFF; 1765 end = base + mask + 0x00020000; 1766 if (((end - base) >> TARGET_PAGE_BITS) > 1024) { 1767 /* Flushing 1024 4K pages is slower than a complete flush */ 1768 LOG_BATS("Flush all BATs\n"); 1769 tlb_flush(cs); 1770 LOG_BATS("Flush done\n"); 1771 return; 1772 } 1773 LOG_BATS("Flush BAT from " TARGET_FMT_lx " to " TARGET_FMT_lx " (" 1774 TARGET_FMT_lx ")\n", base, end, mask); 1775 for (page = base; page != end; page += TARGET_PAGE_SIZE) { 1776 tlb_flush_page(cs, page); 1777 } 1778 LOG_BATS("Flush done\n"); 1779 } 1780 #endif 1781 1782 static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr, 1783 target_ulong value) 1784 { 1785 LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", ID, 1786 nr, ul == 0 ? 'u' : 'l', value, env->nip); 1787 } 1788 1789 void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value) 1790 { 1791 target_ulong mask; 1792 #if defined(FLUSH_ALL_TLBS) 1793 PowerPCCPU *cpu = env_archcpu(env); 1794 #endif 1795 1796 dump_store_bat(env, 'I', 0, nr, value); 1797 if (env->IBAT[0][nr] != value) { 1798 mask = (value << 15) & 0x0FFE0000UL; 1799 #if !defined(FLUSH_ALL_TLBS) 1800 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 1801 #endif 1802 /* 1803 * When storing valid upper BAT, mask BEPI and BRPN and 1804 * invalidate all TLBs covered by this BAT 1805 */ 1806 mask = (value << 15) & 0x0FFE0000UL; 1807 env->IBAT[0][nr] = (value & 0x00001FFFUL) | 1808 (value & ~0x0001FFFFUL & ~mask); 1809 env->IBAT[1][nr] = (env->IBAT[1][nr] & 0x0000007B) | 1810 (env->IBAT[1][nr] & ~0x0001FFFF & ~mask); 1811 #if !defined(FLUSH_ALL_TLBS) 1812 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 1813 #else 1814 tlb_flush(env_cpu(env)); 1815 #endif 1816 } 1817 } 1818 1819 void helper_store_ibatl(CPUPPCState *env, uint32_t nr, target_ulong value) 1820 { 1821 dump_store_bat(env, 'I', 1, nr, value); 1822 env->IBAT[1][nr] = value; 1823 } 1824 1825 void helper_store_dbatu(CPUPPCState *env, uint32_t nr, target_ulong value) 1826 { 1827 target_ulong mask; 1828 #if defined(FLUSH_ALL_TLBS) 1829 PowerPCCPU *cpu = env_archcpu(env); 1830 #endif 1831 1832 dump_store_bat(env, 'D', 0, nr, value); 1833 if (env->DBAT[0][nr] != value) { 1834 /* 1835 * When storing valid upper BAT, mask BEPI and BRPN and 1836 * invalidate all TLBs covered by this BAT 1837 */ 1838 mask = (value << 15) & 0x0FFE0000UL; 1839 #if !defined(FLUSH_ALL_TLBS) 1840 do_invalidate_BAT(env, env->DBAT[0][nr], mask); 1841 #endif 1842 mask = (value << 15) & 0x0FFE0000UL; 1843 env->DBAT[0][nr] = (value & 0x00001FFFUL) | 1844 (value & ~0x0001FFFFUL & ~mask); 1845 env->DBAT[1][nr] = (env->DBAT[1][nr] & 0x0000007B) | 1846 (env->DBAT[1][nr] & ~0x0001FFFF & ~mask); 1847 #if !defined(FLUSH_ALL_TLBS) 1848 do_invalidate_BAT(env, env->DBAT[0][nr], mask); 1849 #else 1850 tlb_flush(env_cpu(env)); 1851 #endif 1852 } 1853 } 1854 1855 void helper_store_dbatl(CPUPPCState *env, uint32_t nr, target_ulong value) 1856 { 1857 dump_store_bat(env, 'D', 1, nr, value); 1858 env->DBAT[1][nr] = value; 1859 } 1860 1861 void helper_store_601_batu(CPUPPCState *env, uint32_t nr, target_ulong value) 1862 { 1863 target_ulong mask; 1864 #if defined(FLUSH_ALL_TLBS) 1865 PowerPCCPU *cpu = env_archcpu(env); 1866 int do_inval; 1867 #endif 1868 1869 dump_store_bat(env, 'I', 0, nr, value); 1870 if (env->IBAT[0][nr] != value) { 1871 #if defined(FLUSH_ALL_TLBS) 1872 do_inval = 0; 1873 #endif 1874 mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL; 1875 if (env->IBAT[1][nr] & 0x40) { 1876 /* Invalidate BAT only if it is valid */ 1877 #if !defined(FLUSH_ALL_TLBS) 1878 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 1879 #else 1880 do_inval = 1; 1881 #endif 1882 } 1883 /* 1884 * When storing valid upper BAT, mask BEPI and BRPN and 1885 * invalidate all TLBs covered by this BAT 1886 */ 1887 env->IBAT[0][nr] = (value & 0x00001FFFUL) | 1888 (value & ~0x0001FFFFUL & ~mask); 1889 env->DBAT[0][nr] = env->IBAT[0][nr]; 1890 if (env->IBAT[1][nr] & 0x40) { 1891 #if !defined(FLUSH_ALL_TLBS) 1892 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 1893 #else 1894 do_inval = 1; 1895 #endif 1896 } 1897 #if defined(FLUSH_ALL_TLBS) 1898 if (do_inval) { 1899 tlb_flush(env_cpu(env)); 1900 } 1901 #endif 1902 } 1903 } 1904 1905 void helper_store_601_batl(CPUPPCState *env, uint32_t nr, target_ulong value) 1906 { 1907 #if !defined(FLUSH_ALL_TLBS) 1908 target_ulong mask; 1909 #else 1910 PowerPCCPU *cpu = env_archcpu(env); 1911 int do_inval; 1912 #endif 1913 1914 dump_store_bat(env, 'I', 1, nr, value); 1915 if (env->IBAT[1][nr] != value) { 1916 #if defined(FLUSH_ALL_TLBS) 1917 do_inval = 0; 1918 #endif 1919 if (env->IBAT[1][nr] & 0x40) { 1920 #if !defined(FLUSH_ALL_TLBS) 1921 mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL; 1922 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 1923 #else 1924 do_inval = 1; 1925 #endif 1926 } 1927 if (value & 0x40) { 1928 #if !defined(FLUSH_ALL_TLBS) 1929 mask = (value << 17) & 0x0FFE0000UL; 1930 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 1931 #else 1932 do_inval = 1; 1933 #endif 1934 } 1935 env->IBAT[1][nr] = value; 1936 env->DBAT[1][nr] = value; 1937 #if defined(FLUSH_ALL_TLBS) 1938 if (do_inval) { 1939 tlb_flush(env_cpu(env)); 1940 } 1941 #endif 1942 } 1943 } 1944 1945 /*****************************************************************************/ 1946 /* TLB management */ 1947 void ppc_tlb_invalidate_all(CPUPPCState *env) 1948 { 1949 #if defined(TARGET_PPC64) 1950 if (mmu_is_64bit(env->mmu_model)) { 1951 env->tlb_need_flush = 0; 1952 tlb_flush(env_cpu(env)); 1953 } else 1954 #endif /* defined(TARGET_PPC64) */ 1955 switch (env->mmu_model) { 1956 case POWERPC_MMU_SOFT_6xx: 1957 case POWERPC_MMU_SOFT_74xx: 1958 ppc6xx_tlb_invalidate_all(env); 1959 break; 1960 case POWERPC_MMU_SOFT_4xx: 1961 case POWERPC_MMU_SOFT_4xx_Z: 1962 ppc4xx_tlb_invalidate_all(env); 1963 break; 1964 case POWERPC_MMU_REAL: 1965 cpu_abort(env_cpu(env), "No TLB for PowerPC 4xx in real mode\n"); 1966 break; 1967 case POWERPC_MMU_MPC8xx: 1968 /* XXX: TODO */ 1969 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n"); 1970 break; 1971 case POWERPC_MMU_BOOKE: 1972 tlb_flush(env_cpu(env)); 1973 break; 1974 case POWERPC_MMU_BOOKE206: 1975 booke206_flush_tlb(env, -1, 0); 1976 break; 1977 case POWERPC_MMU_32B: 1978 case POWERPC_MMU_601: 1979 env->tlb_need_flush = 0; 1980 tlb_flush(env_cpu(env)); 1981 break; 1982 default: 1983 /* XXX: TODO */ 1984 cpu_abort(env_cpu(env), "Unknown MMU model %x\n", env->mmu_model); 1985 break; 1986 } 1987 } 1988 1989 void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr) 1990 { 1991 #if !defined(FLUSH_ALL_TLBS) 1992 addr &= TARGET_PAGE_MASK; 1993 #if defined(TARGET_PPC64) 1994 if (mmu_is_64bit(env->mmu_model)) { 1995 /* tlbie invalidate TLBs for all segments */ 1996 /* 1997 * XXX: given the fact that there are too many segments to invalidate, 1998 * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU, 1999 * we just invalidate all TLBs 2000 */ 2001 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 2002 } else 2003 #endif /* defined(TARGET_PPC64) */ 2004 switch (env->mmu_model) { 2005 case POWERPC_MMU_SOFT_6xx: 2006 case POWERPC_MMU_SOFT_74xx: 2007 ppc6xx_tlb_invalidate_virt(env, addr, 0); 2008 if (env->id_tlbs == 1) { 2009 ppc6xx_tlb_invalidate_virt(env, addr, 1); 2010 } 2011 break; 2012 case POWERPC_MMU_32B: 2013 case POWERPC_MMU_601: 2014 /* 2015 * Actual CPUs invalidate entire congruence classes based on 2016 * the geometry of their TLBs and some OSes take that into 2017 * account, we just mark the TLB to be flushed later (context 2018 * synchronizing event or sync instruction on 32-bit). 2019 */ 2020 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 2021 break; 2022 default: 2023 /* Should never reach here with other MMU models */ 2024 assert(0); 2025 } 2026 #else 2027 ppc_tlb_invalidate_all(env); 2028 #endif 2029 } 2030 2031 /*****************************************************************************/ 2032 /* Special registers manipulation */ 2033 #if defined(TARGET_PPC64) 2034 void ppc_store_ptcr(CPUPPCState *env, target_ulong value) 2035 { 2036 PowerPCCPU *cpu = env_archcpu(env); 2037 target_ulong ptcr_mask = PTCR_PATB | PTCR_PATS; 2038 target_ulong patbsize = value & PTCR_PATS; 2039 2040 qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value); 2041 2042 assert(!cpu->vhyp); 2043 assert(env->mmu_model & POWERPC_MMU_3_00); 2044 2045 if (value & ~ptcr_mask) { 2046 error_report("Invalid bits 0x"TARGET_FMT_lx" set in PTCR", 2047 value & ~ptcr_mask); 2048 value &= ptcr_mask; 2049 } 2050 2051 if (patbsize > 24) { 2052 error_report("Invalid Partition Table size 0x" TARGET_FMT_lx 2053 " stored in PTCR", patbsize); 2054 return; 2055 } 2056 2057 env->spr[SPR_PTCR] = value; 2058 } 2059 2060 #endif /* defined(TARGET_PPC64) */ 2061 2062 /* Segment registers load and store */ 2063 target_ulong helper_load_sr(CPUPPCState *env, target_ulong sr_num) 2064 { 2065 #if defined(TARGET_PPC64) 2066 if (mmu_is_64bit(env->mmu_model)) { 2067 /* XXX */ 2068 return 0; 2069 } 2070 #endif 2071 return env->sr[sr_num]; 2072 } 2073 2074 void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value) 2075 { 2076 qemu_log_mask(CPU_LOG_MMU, 2077 "%s: reg=%d " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__, 2078 (int)srnum, value, env->sr[srnum]); 2079 #if defined(TARGET_PPC64) 2080 if (mmu_is_64bit(env->mmu_model)) { 2081 PowerPCCPU *cpu = env_archcpu(env); 2082 uint64_t esid, vsid; 2083 2084 /* ESID = srnum */ 2085 esid = ((uint64_t)(srnum & 0xf) << 28) | SLB_ESID_V; 2086 2087 /* VSID = VSID */ 2088 vsid = (value & 0xfffffff) << 12; 2089 /* flags = flags */ 2090 vsid |= ((value >> 27) & 0xf) << 8; 2091 2092 ppc_store_slb(cpu, srnum, esid, vsid); 2093 } else 2094 #endif 2095 if (env->sr[srnum] != value) { 2096 env->sr[srnum] = value; 2097 /* 2098 * Invalidating 256MB of virtual memory in 4kB pages is way 2099 * longer than flushing the whole TLB. 2100 */ 2101 #if !defined(FLUSH_ALL_TLBS) && 0 2102 { 2103 target_ulong page, end; 2104 /* Invalidate 256 MB of virtual memory */ 2105 page = (16 << 20) * srnum; 2106 end = page + (16 << 20); 2107 for (; page != end; page += TARGET_PAGE_SIZE) { 2108 tlb_flush_page(env_cpu(env), page); 2109 } 2110 } 2111 #else 2112 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 2113 #endif 2114 } 2115 } 2116 2117 /* TLB management */ 2118 void helper_tlbia(CPUPPCState *env) 2119 { 2120 ppc_tlb_invalidate_all(env); 2121 } 2122 2123 void helper_tlbie(CPUPPCState *env, target_ulong addr) 2124 { 2125 ppc_tlb_invalidate_one(env, addr); 2126 } 2127 2128 void helper_tlbiva(CPUPPCState *env, target_ulong addr) 2129 { 2130 /* tlbiva instruction only exists on BookE */ 2131 assert(env->mmu_model == POWERPC_MMU_BOOKE); 2132 /* XXX: TODO */ 2133 cpu_abort(env_cpu(env), "BookE MMU model is not implemented\n"); 2134 } 2135 2136 /* Software driven TLBs management */ 2137 /* PowerPC 602/603 software TLB load instructions helpers */ 2138 static void do_6xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code) 2139 { 2140 target_ulong RPN, CMP, EPN; 2141 int way; 2142 2143 RPN = env->spr[SPR_RPA]; 2144 if (is_code) { 2145 CMP = env->spr[SPR_ICMP]; 2146 EPN = env->spr[SPR_IMISS]; 2147 } else { 2148 CMP = env->spr[SPR_DCMP]; 2149 EPN = env->spr[SPR_DMISS]; 2150 } 2151 way = (env->spr[SPR_SRR1] >> 17) & 1; 2152 (void)EPN; /* avoid a compiler warning */ 2153 LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx 2154 " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP, 2155 RPN, way); 2156 /* Store this TLB */ 2157 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK), 2158 way, is_code, CMP, RPN); 2159 } 2160 2161 void helper_6xx_tlbd(CPUPPCState *env, target_ulong EPN) 2162 { 2163 do_6xx_tlb(env, EPN, 0); 2164 } 2165 2166 void helper_6xx_tlbi(CPUPPCState *env, target_ulong EPN) 2167 { 2168 do_6xx_tlb(env, EPN, 1); 2169 } 2170 2171 /* PowerPC 74xx software TLB load instructions helpers */ 2172 static void do_74xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code) 2173 { 2174 target_ulong RPN, CMP, EPN; 2175 int way; 2176 2177 RPN = env->spr[SPR_PTELO]; 2178 CMP = env->spr[SPR_PTEHI]; 2179 EPN = env->spr[SPR_TLBMISS] & ~0x3; 2180 way = env->spr[SPR_TLBMISS] & 0x3; 2181 (void)EPN; /* avoid a compiler warning */ 2182 LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx 2183 " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP, 2184 RPN, way); 2185 /* Store this TLB */ 2186 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK), 2187 way, is_code, CMP, RPN); 2188 } 2189 2190 void helper_74xx_tlbd(CPUPPCState *env, target_ulong EPN) 2191 { 2192 do_74xx_tlb(env, EPN, 0); 2193 } 2194 2195 void helper_74xx_tlbi(CPUPPCState *env, target_ulong EPN) 2196 { 2197 do_74xx_tlb(env, EPN, 1); 2198 } 2199 2200 /*****************************************************************************/ 2201 /* PowerPC 601 specific instructions (POWER bridge) */ 2202 2203 target_ulong helper_rac(CPUPPCState *env, target_ulong addr) 2204 { 2205 mmu_ctx_t ctx; 2206 int nb_BATs; 2207 target_ulong ret = 0; 2208 2209 /* 2210 * We don't have to generate many instances of this instruction, 2211 * as rac is supervisor only. 2212 * 2213 * XXX: FIX THIS: Pretend we have no BAT 2214 */ 2215 nb_BATs = env->nb_BATs; 2216 env->nb_BATs = 0; 2217 if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0) { 2218 ret = ctx.raddr; 2219 } 2220 env->nb_BATs = nb_BATs; 2221 return ret; 2222 } 2223 2224 static inline target_ulong booke_tlb_to_page_size(int size) 2225 { 2226 return 1024 << (2 * size); 2227 } 2228 2229 static inline int booke_page_size_to_tlb(target_ulong page_size) 2230 { 2231 int size; 2232 2233 switch (page_size) { 2234 case 0x00000400UL: 2235 size = 0x0; 2236 break; 2237 case 0x00001000UL: 2238 size = 0x1; 2239 break; 2240 case 0x00004000UL: 2241 size = 0x2; 2242 break; 2243 case 0x00010000UL: 2244 size = 0x3; 2245 break; 2246 case 0x00040000UL: 2247 size = 0x4; 2248 break; 2249 case 0x00100000UL: 2250 size = 0x5; 2251 break; 2252 case 0x00400000UL: 2253 size = 0x6; 2254 break; 2255 case 0x01000000UL: 2256 size = 0x7; 2257 break; 2258 case 0x04000000UL: 2259 size = 0x8; 2260 break; 2261 case 0x10000000UL: 2262 size = 0x9; 2263 break; 2264 case 0x40000000UL: 2265 size = 0xA; 2266 break; 2267 #if defined(TARGET_PPC64) 2268 case 0x000100000000ULL: 2269 size = 0xB; 2270 break; 2271 case 0x000400000000ULL: 2272 size = 0xC; 2273 break; 2274 case 0x001000000000ULL: 2275 size = 0xD; 2276 break; 2277 case 0x004000000000ULL: 2278 size = 0xE; 2279 break; 2280 case 0x010000000000ULL: 2281 size = 0xF; 2282 break; 2283 #endif 2284 default: 2285 size = -1; 2286 break; 2287 } 2288 2289 return size; 2290 } 2291 2292 /* Helpers for 4xx TLB management */ 2293 #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */ 2294 2295 #define PPC4XX_TLBHI_V 0x00000040 2296 #define PPC4XX_TLBHI_E 0x00000020 2297 #define PPC4XX_TLBHI_SIZE_MIN 0 2298 #define PPC4XX_TLBHI_SIZE_MAX 7 2299 #define PPC4XX_TLBHI_SIZE_DEFAULT 1 2300 #define PPC4XX_TLBHI_SIZE_SHIFT 7 2301 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007 2302 2303 #define PPC4XX_TLBLO_EX 0x00000200 2304 #define PPC4XX_TLBLO_WR 0x00000100 2305 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF 2306 #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00 2307 2308 target_ulong helper_4xx_tlbre_hi(CPUPPCState *env, target_ulong entry) 2309 { 2310 ppcemb_tlb_t *tlb; 2311 target_ulong ret; 2312 int size; 2313 2314 entry &= PPC4XX_TLB_ENTRY_MASK; 2315 tlb = &env->tlb.tlbe[entry]; 2316 ret = tlb->EPN; 2317 if (tlb->prot & PAGE_VALID) { 2318 ret |= PPC4XX_TLBHI_V; 2319 } 2320 size = booke_page_size_to_tlb(tlb->size); 2321 if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) { 2322 size = PPC4XX_TLBHI_SIZE_DEFAULT; 2323 } 2324 ret |= size << PPC4XX_TLBHI_SIZE_SHIFT; 2325 env->spr[SPR_40x_PID] = tlb->PID; 2326 return ret; 2327 } 2328 2329 target_ulong helper_4xx_tlbre_lo(CPUPPCState *env, target_ulong entry) 2330 { 2331 ppcemb_tlb_t *tlb; 2332 target_ulong ret; 2333 2334 entry &= PPC4XX_TLB_ENTRY_MASK; 2335 tlb = &env->tlb.tlbe[entry]; 2336 ret = tlb->RPN; 2337 if (tlb->prot & PAGE_EXEC) { 2338 ret |= PPC4XX_TLBLO_EX; 2339 } 2340 if (tlb->prot & PAGE_WRITE) { 2341 ret |= PPC4XX_TLBLO_WR; 2342 } 2343 return ret; 2344 } 2345 2346 void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry, 2347 target_ulong val) 2348 { 2349 CPUState *cs = env_cpu(env); 2350 ppcemb_tlb_t *tlb; 2351 target_ulong page, end; 2352 2353 LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry, 2354 val); 2355 entry &= PPC4XX_TLB_ENTRY_MASK; 2356 tlb = &env->tlb.tlbe[entry]; 2357 /* Invalidate previous TLB (if it's valid) */ 2358 if (tlb->prot & PAGE_VALID) { 2359 end = tlb->EPN + tlb->size; 2360 LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end " 2361 TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end); 2362 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) { 2363 tlb_flush_page(cs, page); 2364 } 2365 } 2366 tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT) 2367 & PPC4XX_TLBHI_SIZE_MASK); 2368 /* 2369 * We cannot handle TLB size < TARGET_PAGE_SIZE. 2370 * If this ever occurs, we should implement TARGET_PAGE_BITS_VARY 2371 */ 2372 if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) { 2373 cpu_abort(cs, "TLB size " TARGET_FMT_lu " < %u " 2374 "are not supported (%d)\n" 2375 "Please implement TARGET_PAGE_BITS_VARY\n", 2376 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7)); 2377 } 2378 tlb->EPN = val & ~(tlb->size - 1); 2379 if (val & PPC4XX_TLBHI_V) { 2380 tlb->prot |= PAGE_VALID; 2381 if (val & PPC4XX_TLBHI_E) { 2382 /* XXX: TO BE FIXED */ 2383 cpu_abort(cs, 2384 "Little-endian TLB entries are not supported by now\n"); 2385 } 2386 } else { 2387 tlb->prot &= ~PAGE_VALID; 2388 } 2389 tlb->PID = env->spr[SPR_40x_PID]; /* PID */ 2390 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx 2391 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__, 2392 (int)entry, tlb->RPN, tlb->EPN, tlb->size, 2393 tlb->prot & PAGE_READ ? 'r' : '-', 2394 tlb->prot & PAGE_WRITE ? 'w' : '-', 2395 tlb->prot & PAGE_EXEC ? 'x' : '-', 2396 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID); 2397 /* Invalidate new TLB (if valid) */ 2398 if (tlb->prot & PAGE_VALID) { 2399 end = tlb->EPN + tlb->size; 2400 LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end " 2401 TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end); 2402 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) { 2403 tlb_flush_page(cs, page); 2404 } 2405 } 2406 } 2407 2408 void helper_4xx_tlbwe_lo(CPUPPCState *env, target_ulong entry, 2409 target_ulong val) 2410 { 2411 ppcemb_tlb_t *tlb; 2412 2413 LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry, 2414 val); 2415 entry &= PPC4XX_TLB_ENTRY_MASK; 2416 tlb = &env->tlb.tlbe[entry]; 2417 tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK; 2418 tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK; 2419 tlb->prot = PAGE_READ; 2420 if (val & PPC4XX_TLBLO_EX) { 2421 tlb->prot |= PAGE_EXEC; 2422 } 2423 if (val & PPC4XX_TLBLO_WR) { 2424 tlb->prot |= PAGE_WRITE; 2425 } 2426 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx 2427 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__, 2428 (int)entry, tlb->RPN, tlb->EPN, tlb->size, 2429 tlb->prot & PAGE_READ ? 'r' : '-', 2430 tlb->prot & PAGE_WRITE ? 'w' : '-', 2431 tlb->prot & PAGE_EXEC ? 'x' : '-', 2432 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID); 2433 } 2434 2435 target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address) 2436 { 2437 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]); 2438 } 2439 2440 /* PowerPC 440 TLB management */ 2441 void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry, 2442 target_ulong value) 2443 { 2444 ppcemb_tlb_t *tlb; 2445 target_ulong EPN, RPN, size; 2446 int do_flush_tlbs; 2447 2448 LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n", 2449 __func__, word, (int)entry, value); 2450 do_flush_tlbs = 0; 2451 entry &= 0x3F; 2452 tlb = &env->tlb.tlbe[entry]; 2453 switch (word) { 2454 default: 2455 /* Just here to please gcc */ 2456 case 0: 2457 EPN = value & 0xFFFFFC00; 2458 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN) { 2459 do_flush_tlbs = 1; 2460 } 2461 tlb->EPN = EPN; 2462 size = booke_tlb_to_page_size((value >> 4) & 0xF); 2463 if ((tlb->prot & PAGE_VALID) && tlb->size < size) { 2464 do_flush_tlbs = 1; 2465 } 2466 tlb->size = size; 2467 tlb->attr &= ~0x1; 2468 tlb->attr |= (value >> 8) & 1; 2469 if (value & 0x200) { 2470 tlb->prot |= PAGE_VALID; 2471 } else { 2472 if (tlb->prot & PAGE_VALID) { 2473 tlb->prot &= ~PAGE_VALID; 2474 do_flush_tlbs = 1; 2475 } 2476 } 2477 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF; 2478 if (do_flush_tlbs) { 2479 tlb_flush(env_cpu(env)); 2480 } 2481 break; 2482 case 1: 2483 RPN = value & 0xFFFFFC0F; 2484 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN) { 2485 tlb_flush(env_cpu(env)); 2486 } 2487 tlb->RPN = RPN; 2488 break; 2489 case 2: 2490 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00); 2491 tlb->prot = tlb->prot & PAGE_VALID; 2492 if (value & 0x1) { 2493 tlb->prot |= PAGE_READ << 4; 2494 } 2495 if (value & 0x2) { 2496 tlb->prot |= PAGE_WRITE << 4; 2497 } 2498 if (value & 0x4) { 2499 tlb->prot |= PAGE_EXEC << 4; 2500 } 2501 if (value & 0x8) { 2502 tlb->prot |= PAGE_READ; 2503 } 2504 if (value & 0x10) { 2505 tlb->prot |= PAGE_WRITE; 2506 } 2507 if (value & 0x20) { 2508 tlb->prot |= PAGE_EXEC; 2509 } 2510 break; 2511 } 2512 } 2513 2514 target_ulong helper_440_tlbre(CPUPPCState *env, uint32_t word, 2515 target_ulong entry) 2516 { 2517 ppcemb_tlb_t *tlb; 2518 target_ulong ret; 2519 int size; 2520 2521 entry &= 0x3F; 2522 tlb = &env->tlb.tlbe[entry]; 2523 switch (word) { 2524 default: 2525 /* Just here to please gcc */ 2526 case 0: 2527 ret = tlb->EPN; 2528 size = booke_page_size_to_tlb(tlb->size); 2529 if (size < 0 || size > 0xF) { 2530 size = 1; 2531 } 2532 ret |= size << 4; 2533 if (tlb->attr & 0x1) { 2534 ret |= 0x100; 2535 } 2536 if (tlb->prot & PAGE_VALID) { 2537 ret |= 0x200; 2538 } 2539 env->spr[SPR_440_MMUCR] &= ~0x000000FF; 2540 env->spr[SPR_440_MMUCR] |= tlb->PID; 2541 break; 2542 case 1: 2543 ret = tlb->RPN; 2544 break; 2545 case 2: 2546 ret = tlb->attr & ~0x1; 2547 if (tlb->prot & (PAGE_READ << 4)) { 2548 ret |= 0x1; 2549 } 2550 if (tlb->prot & (PAGE_WRITE << 4)) { 2551 ret |= 0x2; 2552 } 2553 if (tlb->prot & (PAGE_EXEC << 4)) { 2554 ret |= 0x4; 2555 } 2556 if (tlb->prot & PAGE_READ) { 2557 ret |= 0x8; 2558 } 2559 if (tlb->prot & PAGE_WRITE) { 2560 ret |= 0x10; 2561 } 2562 if (tlb->prot & PAGE_EXEC) { 2563 ret |= 0x20; 2564 } 2565 break; 2566 } 2567 return ret; 2568 } 2569 2570 target_ulong helper_440_tlbsx(CPUPPCState *env, target_ulong address) 2571 { 2572 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF); 2573 } 2574 2575 /* PowerPC BookE 2.06 TLB management */ 2576 2577 static ppcmas_tlb_t *booke206_cur_tlb(CPUPPCState *env) 2578 { 2579 uint32_t tlbncfg = 0; 2580 int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT; 2581 int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK); 2582 int tlb; 2583 2584 tlb = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT; 2585 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb]; 2586 2587 if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) { 2588 cpu_abort(env_cpu(env), "we don't support HES yet\n"); 2589 } 2590 2591 return booke206_get_tlbm(env, tlb, ea, esel); 2592 } 2593 2594 void helper_booke_setpid(CPUPPCState *env, uint32_t pidn, target_ulong pid) 2595 { 2596 env->spr[pidn] = pid; 2597 /* changing PIDs mean we're in a different address space now */ 2598 tlb_flush(env_cpu(env)); 2599 } 2600 2601 void helper_booke_set_eplc(CPUPPCState *env, target_ulong val) 2602 { 2603 env->spr[SPR_BOOKE_EPLC] = val & EPID_MASK; 2604 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_LOAD); 2605 } 2606 void helper_booke_set_epsc(CPUPPCState *env, target_ulong val) 2607 { 2608 env->spr[SPR_BOOKE_EPSC] = val & EPID_MASK; 2609 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_STORE); 2610 } 2611 2612 static inline void flush_page(CPUPPCState *env, ppcmas_tlb_t *tlb) 2613 { 2614 if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) { 2615 tlb_flush_page(env_cpu(env), tlb->mas2 & MAS2_EPN_MASK); 2616 } else { 2617 tlb_flush(env_cpu(env)); 2618 } 2619 } 2620 2621 void helper_booke206_tlbwe(CPUPPCState *env) 2622 { 2623 uint32_t tlbncfg, tlbn; 2624 ppcmas_tlb_t *tlb; 2625 uint32_t size_tlb, size_ps; 2626 target_ulong mask; 2627 2628 2629 switch (env->spr[SPR_BOOKE_MAS0] & MAS0_WQ_MASK) { 2630 case MAS0_WQ_ALWAYS: 2631 /* good to go, write that entry */ 2632 break; 2633 case MAS0_WQ_COND: 2634 /* XXX check if reserved */ 2635 if (0) { 2636 return; 2637 } 2638 break; 2639 case MAS0_WQ_CLR_RSRV: 2640 /* XXX clear entry */ 2641 return; 2642 default: 2643 /* no idea what to do */ 2644 return; 2645 } 2646 2647 if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) && 2648 !msr_gs) { 2649 /* XXX we don't support direct LRAT setting yet */ 2650 fprintf(stderr, "cpu: don't support LRAT setting yet\n"); 2651 return; 2652 } 2653 2654 tlbn = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT; 2655 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn]; 2656 2657 tlb = booke206_cur_tlb(env); 2658 2659 if (!tlb) { 2660 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 2661 POWERPC_EXCP_INVAL | 2662 POWERPC_EXCP_INVAL_INVAL, GETPC()); 2663 } 2664 2665 /* check that we support the targeted size */ 2666 size_tlb = (env->spr[SPR_BOOKE_MAS1] & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 2667 size_ps = booke206_tlbnps(env, tlbn); 2668 if ((env->spr[SPR_BOOKE_MAS1] & MAS1_VALID) && (tlbncfg & TLBnCFG_AVAIL) && 2669 !(size_ps & (1 << size_tlb))) { 2670 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 2671 POWERPC_EXCP_INVAL | 2672 POWERPC_EXCP_INVAL_INVAL, GETPC()); 2673 } 2674 2675 if (msr_gs) { 2676 cpu_abort(env_cpu(env), "missing HV implementation\n"); 2677 } 2678 2679 if (tlb->mas1 & MAS1_VALID) { 2680 /* 2681 * Invalidate the page in QEMU TLB if it was a valid entry. 2682 * 2683 * In "PowerPC e500 Core Family Reference Manual, Rev. 1", 2684 * Section "12.4.2 TLB Write Entry (tlbwe) Instruction": 2685 * (https://www.nxp.com/docs/en/reference-manual/E500CORERM.pdf) 2686 * 2687 * "Note that when an L2 TLB entry is written, it may be displacing an 2688 * already valid entry in the same L2 TLB location (a victim). If a 2689 * valid L1 TLB entry corresponds to the L2 MMU victim entry, that L1 2690 * TLB entry is automatically invalidated." 2691 */ 2692 flush_page(env, tlb); 2693 } 2694 2695 tlb->mas7_3 = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) | 2696 env->spr[SPR_BOOKE_MAS3]; 2697 tlb->mas1 = env->spr[SPR_BOOKE_MAS1]; 2698 2699 if ((env->spr[SPR_MMUCFG] & MMUCFG_MAVN) == MMUCFG_MAVN_V2) { 2700 /* For TLB which has a fixed size TSIZE is ignored with MAV2 */ 2701 booke206_fixed_size_tlbn(env, tlbn, tlb); 2702 } else { 2703 if (!(tlbncfg & TLBnCFG_AVAIL)) { 2704 /* force !AVAIL TLB entries to correct page size */ 2705 tlb->mas1 &= ~MAS1_TSIZE_MASK; 2706 /* XXX can be configured in MMUCSR0 */ 2707 tlb->mas1 |= (tlbncfg & TLBnCFG_MINSIZE) >> 12; 2708 } 2709 } 2710 2711 /* Make a mask from TLB size to discard invalid bits in EPN field */ 2712 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); 2713 /* Add a mask for page attributes */ 2714 mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E; 2715 2716 if (!msr_cm) { 2717 /* 2718 * Executing a tlbwe instruction in 32-bit mode will set bits 2719 * 0:31 of the TLB EPN field to zero. 2720 */ 2721 mask &= 0xffffffff; 2722 } 2723 2724 tlb->mas2 = env->spr[SPR_BOOKE_MAS2] & mask; 2725 2726 if (!(tlbncfg & TLBnCFG_IPROT)) { 2727 /* no IPROT supported by TLB */ 2728 tlb->mas1 &= ~MAS1_IPROT; 2729 } 2730 2731 flush_page(env, tlb); 2732 } 2733 2734 static inline void booke206_tlb_to_mas(CPUPPCState *env, ppcmas_tlb_t *tlb) 2735 { 2736 int tlbn = booke206_tlbm_to_tlbn(env, tlb); 2737 int way = booke206_tlbm_to_way(env, tlb); 2738 2739 env->spr[SPR_BOOKE_MAS0] = tlbn << MAS0_TLBSEL_SHIFT; 2740 env->spr[SPR_BOOKE_MAS0] |= way << MAS0_ESEL_SHIFT; 2741 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; 2742 2743 env->spr[SPR_BOOKE_MAS1] = tlb->mas1; 2744 env->spr[SPR_BOOKE_MAS2] = tlb->mas2; 2745 env->spr[SPR_BOOKE_MAS3] = tlb->mas7_3; 2746 env->spr[SPR_BOOKE_MAS7] = tlb->mas7_3 >> 32; 2747 } 2748 2749 void helper_booke206_tlbre(CPUPPCState *env) 2750 { 2751 ppcmas_tlb_t *tlb = NULL; 2752 2753 tlb = booke206_cur_tlb(env); 2754 if (!tlb) { 2755 env->spr[SPR_BOOKE_MAS1] = 0; 2756 } else { 2757 booke206_tlb_to_mas(env, tlb); 2758 } 2759 } 2760 2761 void helper_booke206_tlbsx(CPUPPCState *env, target_ulong address) 2762 { 2763 ppcmas_tlb_t *tlb = NULL; 2764 int i, j; 2765 hwaddr raddr; 2766 uint32_t spid, sas; 2767 2768 spid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID_MASK) >> MAS6_SPID_SHIFT; 2769 sas = env->spr[SPR_BOOKE_MAS6] & MAS6_SAS; 2770 2771 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 2772 int ways = booke206_tlb_ways(env, i); 2773 2774 for (j = 0; j < ways; j++) { 2775 tlb = booke206_get_tlbm(env, i, address, j); 2776 2777 if (!tlb) { 2778 continue; 2779 } 2780 2781 if (ppcmas_tlb_check(env, tlb, &raddr, address, spid)) { 2782 continue; 2783 } 2784 2785 if (sas != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { 2786 continue; 2787 } 2788 2789 booke206_tlb_to_mas(env, tlb); 2790 return; 2791 } 2792 } 2793 2794 /* no entry found, fill with defaults */ 2795 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; 2796 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; 2797 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; 2798 env->spr[SPR_BOOKE_MAS3] = 0; 2799 env->spr[SPR_BOOKE_MAS7] = 0; 2800 2801 if (env->spr[SPR_BOOKE_MAS6] & MAS6_SAS) { 2802 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; 2803 } 2804 2805 env->spr[SPR_BOOKE_MAS1] |= (env->spr[SPR_BOOKE_MAS6] >> 16) 2806 << MAS1_TID_SHIFT; 2807 2808 /* next victim logic */ 2809 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; 2810 env->last_way++; 2811 env->last_way &= booke206_tlb_ways(env, 0) - 1; 2812 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; 2813 } 2814 2815 static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn, 2816 uint32_t ea) 2817 { 2818 int i; 2819 int ways = booke206_tlb_ways(env, tlbn); 2820 target_ulong mask; 2821 2822 for (i = 0; i < ways; i++) { 2823 ppcmas_tlb_t *tlb = booke206_get_tlbm(env, tlbn, ea, i); 2824 if (!tlb) { 2825 continue; 2826 } 2827 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); 2828 if (((tlb->mas2 & MAS2_EPN_MASK) == (ea & mask)) && 2829 !(tlb->mas1 & MAS1_IPROT)) { 2830 tlb->mas1 &= ~MAS1_VALID; 2831 } 2832 } 2833 } 2834 2835 void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address) 2836 { 2837 CPUState *cs; 2838 2839 if (address & 0x4) { 2840 /* flush all entries */ 2841 if (address & 0x8) { 2842 /* flush all of TLB1 */ 2843 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB1, 1); 2844 } else { 2845 /* flush all of TLB0 */ 2846 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB0, 0); 2847 } 2848 return; 2849 } 2850 2851 if (address & 0x8) { 2852 /* flush TLB1 entries */ 2853 booke206_invalidate_ea_tlb(env, 1, address); 2854 CPU_FOREACH(cs) { 2855 tlb_flush(cs); 2856 } 2857 } else { 2858 /* flush TLB0 entries */ 2859 booke206_invalidate_ea_tlb(env, 0, address); 2860 CPU_FOREACH(cs) { 2861 tlb_flush_page(cs, address & MAS2_EPN_MASK); 2862 } 2863 } 2864 } 2865 2866 void helper_booke206_tlbilx0(CPUPPCState *env, target_ulong address) 2867 { 2868 /* XXX missing LPID handling */ 2869 booke206_flush_tlb(env, -1, 1); 2870 } 2871 2872 void helper_booke206_tlbilx1(CPUPPCState *env, target_ulong address) 2873 { 2874 int i, j; 2875 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID); 2876 ppcmas_tlb_t *tlb = env->tlb.tlbm; 2877 int tlb_size; 2878 2879 /* XXX missing LPID handling */ 2880 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 2881 tlb_size = booke206_tlb_size(env, i); 2882 for (j = 0; j < tlb_size; j++) { 2883 if (!(tlb[j].mas1 & MAS1_IPROT) && 2884 ((tlb[j].mas1 & MAS1_TID_MASK) == tid)) { 2885 tlb[j].mas1 &= ~MAS1_VALID; 2886 } 2887 } 2888 tlb += booke206_tlb_size(env, i); 2889 } 2890 tlb_flush(env_cpu(env)); 2891 } 2892 2893 void helper_booke206_tlbilx3(CPUPPCState *env, target_ulong address) 2894 { 2895 int i, j; 2896 ppcmas_tlb_t *tlb; 2897 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID); 2898 int pid = tid >> MAS6_SPID_SHIFT; 2899 int sgs = env->spr[SPR_BOOKE_MAS5] & MAS5_SGS; 2900 int ind = (env->spr[SPR_BOOKE_MAS6] & MAS6_SIND) ? MAS1_IND : 0; 2901 /* XXX check for unsupported isize and raise an invalid opcode then */ 2902 int size = env->spr[SPR_BOOKE_MAS6] & MAS6_ISIZE_MASK; 2903 /* XXX implement MAV2 handling */ 2904 bool mav2 = false; 2905 2906 /* XXX missing LPID handling */ 2907 /* flush by pid and ea */ 2908 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 2909 int ways = booke206_tlb_ways(env, i); 2910 2911 for (j = 0; j < ways; j++) { 2912 tlb = booke206_get_tlbm(env, i, address, j); 2913 if (!tlb) { 2914 continue; 2915 } 2916 if ((ppcmas_tlb_check(env, tlb, NULL, address, pid) != 0) || 2917 (tlb->mas1 & MAS1_IPROT) || 2918 ((tlb->mas1 & MAS1_IND) != ind) || 2919 ((tlb->mas8 & MAS8_TGS) != sgs)) { 2920 continue; 2921 } 2922 if (mav2 && ((tlb->mas1 & MAS1_TSIZE_MASK) != size)) { 2923 /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */ 2924 continue; 2925 } 2926 /* XXX e500mc doesn't match SAS, but other cores might */ 2927 tlb->mas1 &= ~MAS1_VALID; 2928 } 2929 } 2930 tlb_flush(env_cpu(env)); 2931 } 2932 2933 void helper_booke206_tlbflush(CPUPPCState *env, target_ulong type) 2934 { 2935 int flags = 0; 2936 2937 if (type & 2) { 2938 flags |= BOOKE206_FLUSH_TLB1; 2939 } 2940 2941 if (type & 4) { 2942 flags |= BOOKE206_FLUSH_TLB0; 2943 } 2944 2945 booke206_flush_tlb(env, flags, 1); 2946 } 2947 2948 2949 void helper_check_tlb_flush_local(CPUPPCState *env) 2950 { 2951 check_tlb_flush(env, false); 2952 } 2953 2954 void helper_check_tlb_flush_global(CPUPPCState *env) 2955 { 2956 check_tlb_flush(env, true); 2957 } 2958 2959 /*****************************************************************************/ 2960 2961 bool ppc_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, 2962 MMUAccessType access_type, int mmu_idx, 2963 bool probe, uintptr_t retaddr) 2964 { 2965 PowerPCCPU *cpu = POWERPC_CPU(cs); 2966 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs); 2967 CPUPPCState *env = &cpu->env; 2968 int ret; 2969 2970 if (pcc->handle_mmu_fault) { 2971 ret = pcc->handle_mmu_fault(cpu, addr, access_type, mmu_idx); 2972 } else { 2973 ret = cpu_ppc_handle_mmu_fault(env, addr, access_type, mmu_idx); 2974 } 2975 if (unlikely(ret != 0)) { 2976 if (probe) { 2977 return false; 2978 } 2979 raise_exception_err_ra(env, cs->exception_index, env->error_code, 2980 retaddr); 2981 } 2982 return true; 2983 } 2984