1 /* 2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "cpu.h" 23 #include "exec/helper-proto.h" 24 #include "sysemu/kvm.h" 25 #include "kvm_ppc.h" 26 #include "mmu-hash64.h" 27 #include "mmu-hash32.h" 28 #include "exec/exec-all.h" 29 #include "exec/cpu_ldst.h" 30 #include "exec/log.h" 31 #include "helper_regs.h" 32 #include "qemu/error-report.h" 33 #include "qemu/main-loop.h" 34 #include "qemu/qemu-print.h" 35 #include "mmu-book3s-v3.h" 36 #include "mmu-radix64.h" 37 38 /* #define DEBUG_MMU */ 39 /* #define DEBUG_BATS */ 40 /* #define DEBUG_SOFTWARE_TLB */ 41 /* #define DUMP_PAGE_TABLES */ 42 /* #define FLUSH_ALL_TLBS */ 43 44 #ifdef DEBUG_MMU 45 # define LOG_MMU_STATE(cpu) log_cpu_state_mask(CPU_LOG_MMU, (cpu), 0) 46 #else 47 # define LOG_MMU_STATE(cpu) do { } while (0) 48 #endif 49 50 #ifdef DEBUG_SOFTWARE_TLB 51 # define LOG_SWTLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) 52 #else 53 # define LOG_SWTLB(...) do { } while (0) 54 #endif 55 56 #ifdef DEBUG_BATS 57 # define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) 58 #else 59 # define LOG_BATS(...) do { } while (0) 60 #endif 61 62 /*****************************************************************************/ 63 /* PowerPC MMU emulation */ 64 65 /* Context used internally during MMU translations */ 66 typedef struct mmu_ctx_t mmu_ctx_t; 67 struct mmu_ctx_t { 68 hwaddr raddr; /* Real address */ 69 hwaddr eaddr; /* Effective address */ 70 int prot; /* Protection bits */ 71 hwaddr hash[2]; /* Pagetable hash values */ 72 target_ulong ptem; /* Virtual segment ID | API */ 73 int key; /* Access key */ 74 int nx; /* Non-execute area */ 75 }; 76 77 /* Common routines used by software and hardware TLBs emulation */ 78 static inline int pte_is_valid(target_ulong pte0) 79 { 80 return pte0 & 0x80000000 ? 1 : 0; 81 } 82 83 static inline void pte_invalidate(target_ulong *pte0) 84 { 85 *pte0 &= ~0x80000000; 86 } 87 88 #define PTE_PTEM_MASK 0x7FFFFFBF 89 #define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B) 90 91 static int pp_check(int key, int pp, int nx) 92 { 93 int access; 94 95 /* Compute access rights */ 96 access = 0; 97 if (key == 0) { 98 switch (pp) { 99 case 0x0: 100 case 0x1: 101 case 0x2: 102 access |= PAGE_WRITE; 103 /* fall through */ 104 case 0x3: 105 access |= PAGE_READ; 106 break; 107 } 108 } else { 109 switch (pp) { 110 case 0x0: 111 access = 0; 112 break; 113 case 0x1: 114 case 0x3: 115 access = PAGE_READ; 116 break; 117 case 0x2: 118 access = PAGE_READ | PAGE_WRITE; 119 break; 120 } 121 } 122 if (nx == 0) { 123 access |= PAGE_EXEC; 124 } 125 126 return access; 127 } 128 129 static int check_prot(int prot, int rw, int access_type) 130 { 131 int ret; 132 133 if (access_type == ACCESS_CODE) { 134 if (prot & PAGE_EXEC) { 135 ret = 0; 136 } else { 137 ret = -2; 138 } 139 } else if (rw) { 140 if (prot & PAGE_WRITE) { 141 ret = 0; 142 } else { 143 ret = -2; 144 } 145 } else { 146 if (prot & PAGE_READ) { 147 ret = 0; 148 } else { 149 ret = -2; 150 } 151 } 152 153 return ret; 154 } 155 156 static inline int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0, 157 target_ulong pte1, int h, 158 int rw, int type) 159 { 160 target_ulong ptem, mmask; 161 int access, ret, pteh, ptev, pp; 162 163 ret = -1; 164 /* Check validity and table match */ 165 ptev = pte_is_valid(pte0); 166 pteh = (pte0 >> 6) & 1; 167 if (ptev && h == pteh) { 168 /* Check vsid & api */ 169 ptem = pte0 & PTE_PTEM_MASK; 170 mmask = PTE_CHECK_MASK; 171 pp = pte1 & 0x00000003; 172 if (ptem == ctx->ptem) { 173 if (ctx->raddr != (hwaddr)-1ULL) { 174 /* all matches should have equal RPN, WIMG & PP */ 175 if ((ctx->raddr & mmask) != (pte1 & mmask)) { 176 qemu_log_mask(CPU_LOG_MMU, "Bad RPN/WIMG/PP\n"); 177 return -3; 178 } 179 } 180 /* Compute access rights */ 181 access = pp_check(ctx->key, pp, ctx->nx); 182 /* Keep the matching PTE informations */ 183 ctx->raddr = pte1; 184 ctx->prot = access; 185 ret = check_prot(ctx->prot, rw, type); 186 if (ret == 0) { 187 /* Access granted */ 188 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); 189 } else { 190 /* Access right violation */ 191 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); 192 } 193 } 194 } 195 196 return ret; 197 } 198 199 static int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p, 200 int ret, int rw) 201 { 202 int store = 0; 203 204 /* Update page flags */ 205 if (!(*pte1p & 0x00000100)) { 206 /* Update accessed flag */ 207 *pte1p |= 0x00000100; 208 store = 1; 209 } 210 if (!(*pte1p & 0x00000080)) { 211 if (rw == 1 && ret == 0) { 212 /* Update changed flag */ 213 *pte1p |= 0x00000080; 214 store = 1; 215 } else { 216 /* Force page fault for first write access */ 217 ctx->prot &= ~PAGE_WRITE; 218 } 219 } 220 221 return store; 222 } 223 224 /* Software driven TLB helpers */ 225 static inline int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr, 226 int way, int is_code) 227 { 228 int nr; 229 230 /* Select TLB num in a way from address */ 231 nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1); 232 /* Select TLB way */ 233 nr += env->tlb_per_way * way; 234 /* 6xx have separate TLBs for instructions and data */ 235 if (is_code && env->id_tlbs == 1) { 236 nr += env->nb_tlb; 237 } 238 239 return nr; 240 } 241 242 static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env) 243 { 244 ppc6xx_tlb_t *tlb; 245 int nr, max; 246 247 /* LOG_SWTLB("Invalidate all TLBs\n"); */ 248 /* Invalidate all defined software TLB */ 249 max = env->nb_tlb; 250 if (env->id_tlbs == 1) { 251 max *= 2; 252 } 253 for (nr = 0; nr < max; nr++) { 254 tlb = &env->tlb.tlb6[nr]; 255 pte_invalidate(&tlb->pte0); 256 } 257 tlb_flush(env_cpu(env)); 258 } 259 260 static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env, 261 target_ulong eaddr, 262 int is_code, int match_epn) 263 { 264 #if !defined(FLUSH_ALL_TLBS) 265 CPUState *cs = env_cpu(env); 266 ppc6xx_tlb_t *tlb; 267 int way, nr; 268 269 /* Invalidate ITLB + DTLB, all ways */ 270 for (way = 0; way < env->nb_ways; way++) { 271 nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code); 272 tlb = &env->tlb.tlb6[nr]; 273 if (pte_is_valid(tlb->pte0) && (match_epn == 0 || eaddr == tlb->EPN)) { 274 LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx "\n", nr, 275 env->nb_tlb, eaddr); 276 pte_invalidate(&tlb->pte0); 277 tlb_flush_page(cs, tlb->EPN); 278 } 279 } 280 #else 281 /* XXX: PowerPC specification say this is valid as well */ 282 ppc6xx_tlb_invalidate_all(env); 283 #endif 284 } 285 286 static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState *env, 287 target_ulong eaddr, int is_code) 288 { 289 ppc6xx_tlb_invalidate_virt2(env, eaddr, is_code, 0); 290 } 291 292 static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way, 293 int is_code, target_ulong pte0, target_ulong pte1) 294 { 295 ppc6xx_tlb_t *tlb; 296 int nr; 297 298 nr = ppc6xx_tlb_getnum(env, EPN, way, is_code); 299 tlb = &env->tlb.tlb6[nr]; 300 LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx 301 " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb, EPN, pte0, pte1); 302 /* Invalidate any pending reference in QEMU for this virtual address */ 303 ppc6xx_tlb_invalidate_virt2(env, EPN, is_code, 1); 304 tlb->pte0 = pte0; 305 tlb->pte1 = pte1; 306 tlb->EPN = EPN; 307 /* Store last way for LRU mechanism */ 308 env->last_way = way; 309 } 310 311 static inline int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx, 312 target_ulong eaddr, int rw, int access_type) 313 { 314 ppc6xx_tlb_t *tlb; 315 int nr, best, way; 316 int ret; 317 318 best = -1; 319 ret = -1; /* No TLB found */ 320 for (way = 0; way < env->nb_ways; way++) { 321 nr = ppc6xx_tlb_getnum(env, eaddr, way, 322 access_type == ACCESS_CODE ? 1 : 0); 323 tlb = &env->tlb.tlb6[nr]; 324 /* This test "emulates" the PTE index match for hardware TLBs */ 325 if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) { 326 LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx " " TARGET_FMT_lx 327 "] <> " TARGET_FMT_lx "\n", nr, env->nb_tlb, 328 pte_is_valid(tlb->pte0) ? "valid" : "inval", 329 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr); 330 continue; 331 } 332 LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx " <> " TARGET_FMT_lx " " 333 TARGET_FMT_lx " %c %c\n", nr, env->nb_tlb, 334 pte_is_valid(tlb->pte0) ? "valid" : "inval", 335 tlb->EPN, eaddr, tlb->pte1, 336 rw ? 'S' : 'L', access_type == ACCESS_CODE ? 'I' : 'D'); 337 switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1, 338 0, rw, access_type)) { 339 case -3: 340 /* TLB inconsistency */ 341 return -1; 342 case -2: 343 /* Access violation */ 344 ret = -2; 345 best = nr; 346 break; 347 case -1: 348 default: 349 /* No match */ 350 break; 351 case 0: 352 /* access granted */ 353 /* 354 * XXX: we should go on looping to check all TLBs 355 * consistency but we can speed-up the whole thing as 356 * the result would be undefined if TLBs are not 357 * consistent. 358 */ 359 ret = 0; 360 best = nr; 361 goto done; 362 } 363 } 364 if (best != -1) { 365 done: 366 LOG_SWTLB("found TLB at addr " TARGET_FMT_plx " prot=%01x ret=%d\n", 367 ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret); 368 /* Update page flags */ 369 pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, rw); 370 } 371 372 return ret; 373 } 374 375 /* Perform BAT hit & translation */ 376 static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp, 377 int *validp, int *protp, target_ulong *BATu, 378 target_ulong *BATl) 379 { 380 target_ulong bl; 381 int pp, valid, prot; 382 383 bl = (*BATu & 0x00001FFC) << 15; 384 valid = 0; 385 prot = 0; 386 if (((msr_pr == 0) && (*BATu & 0x00000002)) || 387 ((msr_pr != 0) && (*BATu & 0x00000001))) { 388 valid = 1; 389 pp = *BATl & 0x00000003; 390 if (pp != 0) { 391 prot = PAGE_READ | PAGE_EXEC; 392 if (pp == 0x2) { 393 prot |= PAGE_WRITE; 394 } 395 } 396 } 397 *blp = bl; 398 *validp = valid; 399 *protp = prot; 400 } 401 402 static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, 403 target_ulong virtual, int rw, int type) 404 { 405 target_ulong *BATlt, *BATut, *BATu, *BATl; 406 target_ulong BEPIl, BEPIu, bl; 407 int i, valid, prot; 408 int ret = -1; 409 410 LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__, 411 type == ACCESS_CODE ? 'I' : 'D', virtual); 412 switch (type) { 413 case ACCESS_CODE: 414 BATlt = env->IBAT[1]; 415 BATut = env->IBAT[0]; 416 break; 417 default: 418 BATlt = env->DBAT[1]; 419 BATut = env->DBAT[0]; 420 break; 421 } 422 for (i = 0; i < env->nb_BATs; i++) { 423 BATu = &BATut[i]; 424 BATl = &BATlt[i]; 425 BEPIu = *BATu & 0xF0000000; 426 BEPIl = *BATu & 0x0FFE0000; 427 bat_size_prot(env, &bl, &valid, &prot, BATu, BATl); 428 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx 429 " BATl " TARGET_FMT_lx "\n", __func__, 430 type == ACCESS_CODE ? 'I' : 'D', i, virtual, *BATu, *BATl); 431 if ((virtual & 0xF0000000) == BEPIu && 432 ((virtual & 0x0FFE0000) & ~bl) == BEPIl) { 433 /* BAT matches */ 434 if (valid != 0) { 435 /* Get physical address */ 436 ctx->raddr = (*BATl & 0xF0000000) | 437 ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) | 438 (virtual & 0x0001F000); 439 /* Compute access rights */ 440 ctx->prot = prot; 441 ret = check_prot(ctx->prot, rw, type); 442 if (ret == 0) { 443 LOG_BATS("BAT %d match: r " TARGET_FMT_plx " prot=%c%c\n", 444 i, ctx->raddr, ctx->prot & PAGE_READ ? 'R' : '-', 445 ctx->prot & PAGE_WRITE ? 'W' : '-'); 446 } 447 break; 448 } 449 } 450 } 451 if (ret < 0) { 452 #if defined(DEBUG_BATS) 453 if (qemu_log_enabled()) { 454 LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", virtual); 455 for (i = 0; i < 4; i++) { 456 BATu = &BATut[i]; 457 BATl = &BATlt[i]; 458 BEPIu = *BATu & 0xF0000000; 459 BEPIl = *BATu & 0x0FFE0000; 460 bl = (*BATu & 0x00001FFC) << 15; 461 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx 462 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " 463 TARGET_FMT_lx " " TARGET_FMT_lx "\n", 464 __func__, type == ACCESS_CODE ? 'I' : 'D', i, virtual, 465 *BATu, *BATl, BEPIu, BEPIl, bl); 466 } 467 } 468 #endif 469 } 470 /* No hit */ 471 return ret; 472 } 473 474 /* Perform segment based translation */ 475 static inline int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, 476 target_ulong eaddr, int rw, int type) 477 { 478 PowerPCCPU *cpu = env_archcpu(env); 479 hwaddr hash; 480 target_ulong vsid; 481 int ds, pr, target_page_bits; 482 int ret; 483 target_ulong sr, pgidx; 484 485 pr = msr_pr; 486 ctx->eaddr = eaddr; 487 488 sr = env->sr[eaddr >> 28]; 489 ctx->key = (((sr & 0x20000000) && (pr != 0)) || 490 ((sr & 0x40000000) && (pr == 0))) ? 1 : 0; 491 ds = sr & 0x80000000 ? 1 : 0; 492 ctx->nx = sr & 0x10000000 ? 1 : 0; 493 vsid = sr & 0x00FFFFFF; 494 target_page_bits = TARGET_PAGE_BITS; 495 qemu_log_mask(CPU_LOG_MMU, 496 "Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx 497 " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx 498 " ir=%d dr=%d pr=%d %d t=%d\n", 499 eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir, 500 (int)msr_dr, pr != 0 ? 1 : 0, rw, type); 501 pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits; 502 hash = vsid ^ pgidx; 503 ctx->ptem = (vsid << 7) | (pgidx >> 10); 504 505 qemu_log_mask(CPU_LOG_MMU, 506 "pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n", 507 ctx->key, ds, ctx->nx, vsid); 508 ret = -1; 509 if (!ds) { 510 /* Check if instruction fetch is allowed, if needed */ 511 if (type != ACCESS_CODE || ctx->nx == 0) { 512 /* Page address translation */ 513 qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx 514 " htab_mask " TARGET_FMT_plx 515 " hash " TARGET_FMT_plx "\n", 516 ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash); 517 ctx->hash[0] = hash; 518 ctx->hash[1] = ~hash; 519 520 /* Initialize real address with an invalid value */ 521 ctx->raddr = (hwaddr)-1ULL; 522 /* Software TLB search */ 523 ret = ppc6xx_tlb_check(env, ctx, eaddr, rw, type); 524 #if defined(DUMP_PAGE_TABLES) 525 if (qemu_loglevel_mask(CPU_LOG_MMU)) { 526 CPUState *cs = env_cpu(env); 527 hwaddr curaddr; 528 uint32_t a0, a1, a2, a3; 529 530 qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx 531 "\n", ppc_hash32_hpt_base(cpu), 532 ppc_hash32_hpt_mask(env) + 0x80); 533 for (curaddr = ppc_hash32_hpt_base(cpu); 534 curaddr < (ppc_hash32_hpt_base(cpu) 535 + ppc_hash32_hpt_mask(cpu) + 0x80); 536 curaddr += 16) { 537 a0 = ldl_phys(cs->as, curaddr); 538 a1 = ldl_phys(cs->as, curaddr + 4); 539 a2 = ldl_phys(cs->as, curaddr + 8); 540 a3 = ldl_phys(cs->as, curaddr + 12); 541 if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) { 542 qemu_log(TARGET_FMT_plx ": %08x %08x %08x %08x\n", 543 curaddr, a0, a1, a2, a3); 544 } 545 } 546 } 547 #endif 548 } else { 549 qemu_log_mask(CPU_LOG_MMU, "No access allowed\n"); 550 ret = -3; 551 } 552 } else { 553 target_ulong sr; 554 555 qemu_log_mask(CPU_LOG_MMU, "direct store...\n"); 556 /* Direct-store segment : absolutely *BUGGY* for now */ 557 558 /* 559 * Direct-store implies a 32-bit MMU. 560 * Check the Segment Register's bus unit ID (BUID). 561 */ 562 sr = env->sr[eaddr >> 28]; 563 if ((sr & 0x1FF00000) >> 20 == 0x07f) { 564 /* 565 * Memory-forced I/O controller interface access 566 * 567 * If T=1 and BUID=x'07F', the 601 performs a memory 568 * access to SR[28-31] LA[4-31], bypassing all protection 569 * mechanisms. 570 */ 571 ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF); 572 ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 573 return 0; 574 } 575 576 switch (type) { 577 case ACCESS_INT: 578 /* Integer load/store : only access allowed */ 579 break; 580 case ACCESS_CODE: 581 /* No code fetch is allowed in direct-store areas */ 582 return -4; 583 case ACCESS_FLOAT: 584 /* Floating point load/store */ 585 return -4; 586 case ACCESS_RES: 587 /* lwarx, ldarx or srwcx. */ 588 return -4; 589 case ACCESS_CACHE: 590 /* 591 * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi 592 * 593 * Should make the instruction do no-op. As it already do 594 * no-op, it's quite easy :-) 595 */ 596 ctx->raddr = eaddr; 597 return 0; 598 case ACCESS_EXT: 599 /* eciwx or ecowx */ 600 return -4; 601 default: 602 qemu_log_mask(CPU_LOG_MMU, "ERROR: instruction should not need " 603 "address translation\n"); 604 return -4; 605 } 606 if ((rw == 1 || ctx->key != 1) && (rw == 0 || ctx->key != 0)) { 607 ctx->raddr = eaddr; 608 ret = 2; 609 } else { 610 ret = -2; 611 } 612 } 613 614 return ret; 615 } 616 617 /* Generic TLB check function for embedded PowerPC implementations */ 618 static int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb, 619 hwaddr *raddrp, 620 target_ulong address, uint32_t pid, int ext, 621 int i) 622 { 623 target_ulong mask; 624 625 /* Check valid flag */ 626 if (!(tlb->prot & PAGE_VALID)) { 627 return -1; 628 } 629 mask = ~(tlb->size - 1); 630 LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx " PID %u <=> " TARGET_FMT_lx 631 " " TARGET_FMT_lx " %u %x\n", __func__, i, address, pid, tlb->EPN, 632 mask, (uint32_t)tlb->PID, tlb->prot); 633 /* Check PID */ 634 if (tlb->PID != 0 && tlb->PID != pid) { 635 return -1; 636 } 637 /* Check effective address */ 638 if ((address & mask) != tlb->EPN) { 639 return -1; 640 } 641 *raddrp = (tlb->RPN & mask) | (address & ~mask); 642 if (ext) { 643 /* Extend the physical address to 36 bits */ 644 *raddrp |= (uint64_t)(tlb->RPN & 0xF) << 32; 645 } 646 647 return 0; 648 } 649 650 /* Generic TLB search function for PowerPC embedded implementations */ 651 static int ppcemb_tlb_search(CPUPPCState *env, target_ulong address, 652 uint32_t pid) 653 { 654 ppcemb_tlb_t *tlb; 655 hwaddr raddr; 656 int i, ret; 657 658 /* Default return value is no match */ 659 ret = -1; 660 for (i = 0; i < env->nb_tlb; i++) { 661 tlb = &env->tlb.tlbe[i]; 662 if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, 0, i) == 0) { 663 ret = i; 664 break; 665 } 666 } 667 668 return ret; 669 } 670 671 /* Helpers specific to PowerPC 40x implementations */ 672 static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env) 673 { 674 ppcemb_tlb_t *tlb; 675 int i; 676 677 for (i = 0; i < env->nb_tlb; i++) { 678 tlb = &env->tlb.tlbe[i]; 679 tlb->prot &= ~PAGE_VALID; 680 } 681 tlb_flush(env_cpu(env)); 682 } 683 684 static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 685 target_ulong address, int rw, 686 int access_type) 687 { 688 ppcemb_tlb_t *tlb; 689 hwaddr raddr; 690 int i, ret, zsel, zpr, pr; 691 692 ret = -1; 693 raddr = (hwaddr)-1ULL; 694 pr = msr_pr; 695 for (i = 0; i < env->nb_tlb; i++) { 696 tlb = &env->tlb.tlbe[i]; 697 if (ppcemb_tlb_check(env, tlb, &raddr, address, 698 env->spr[SPR_40x_PID], 0, i) < 0) { 699 continue; 700 } 701 zsel = (tlb->attr >> 4) & 0xF; 702 zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3; 703 LOG_SWTLB("%s: TLB %d zsel %d zpr %d rw %d attr %08x\n", 704 __func__, i, zsel, zpr, rw, tlb->attr); 705 /* Check execute enable bit */ 706 switch (zpr) { 707 case 0x2: 708 if (pr != 0) { 709 goto check_perms; 710 } 711 /* fall through */ 712 case 0x3: 713 /* All accesses granted */ 714 ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 715 ret = 0; 716 break; 717 case 0x0: 718 if (pr != 0) { 719 /* Raise Zone protection fault. */ 720 env->spr[SPR_40x_ESR] = 1 << 22; 721 ctx->prot = 0; 722 ret = -2; 723 break; 724 } 725 /* fall through */ 726 case 0x1: 727 check_perms: 728 /* Check from TLB entry */ 729 ctx->prot = tlb->prot; 730 ret = check_prot(ctx->prot, rw, access_type); 731 if (ret == -2) { 732 env->spr[SPR_40x_ESR] = 0; 733 } 734 break; 735 } 736 if (ret >= 0) { 737 ctx->raddr = raddr; 738 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx 739 " %d %d\n", __func__, address, ctx->raddr, ctx->prot, 740 ret); 741 return 0; 742 } 743 } 744 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx 745 " %d %d\n", __func__, address, raddr, ctx->prot, ret); 746 747 return ret; 748 } 749 750 void store_40x_sler(CPUPPCState *env, uint32_t val) 751 { 752 /* XXX: TO BE FIXED */ 753 if (val != 0x00000000) { 754 cpu_abort(env_cpu(env), 755 "Little-endian regions are not supported by now\n"); 756 } 757 env->spr[SPR_405_SLER] = val; 758 } 759 760 static inline int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb, 761 hwaddr *raddr, int *prot, 762 target_ulong address, int rw, 763 int access_type, int i) 764 { 765 int ret, prot2; 766 767 if (ppcemb_tlb_check(env, tlb, raddr, address, 768 env->spr[SPR_BOOKE_PID], 769 !env->nb_pids, i) >= 0) { 770 goto found_tlb; 771 } 772 773 if (env->spr[SPR_BOOKE_PID1] && 774 ppcemb_tlb_check(env, tlb, raddr, address, 775 env->spr[SPR_BOOKE_PID1], 0, i) >= 0) { 776 goto found_tlb; 777 } 778 779 if (env->spr[SPR_BOOKE_PID2] && 780 ppcemb_tlb_check(env, tlb, raddr, address, 781 env->spr[SPR_BOOKE_PID2], 0, i) >= 0) { 782 goto found_tlb; 783 } 784 785 LOG_SWTLB("%s: TLB entry not found\n", __func__); 786 return -1; 787 788 found_tlb: 789 790 if (msr_pr != 0) { 791 prot2 = tlb->prot & 0xF; 792 } else { 793 prot2 = (tlb->prot >> 4) & 0xF; 794 } 795 796 /* Check the address space */ 797 if (access_type == ACCESS_CODE) { 798 if (msr_ir != (tlb->attr & 1)) { 799 LOG_SWTLB("%s: AS doesn't match\n", __func__); 800 return -1; 801 } 802 803 *prot = prot2; 804 if (prot2 & PAGE_EXEC) { 805 LOG_SWTLB("%s: good TLB!\n", __func__); 806 return 0; 807 } 808 809 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__, prot2); 810 ret = -3; 811 } else { 812 if (msr_dr != (tlb->attr & 1)) { 813 LOG_SWTLB("%s: AS doesn't match\n", __func__); 814 return -1; 815 } 816 817 *prot = prot2; 818 if ((!rw && prot2 & PAGE_READ) || (rw && (prot2 & PAGE_WRITE))) { 819 LOG_SWTLB("%s: found TLB!\n", __func__); 820 return 0; 821 } 822 823 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__, prot2); 824 ret = -2; 825 } 826 827 return ret; 828 } 829 830 static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 831 target_ulong address, int rw, 832 int access_type) 833 { 834 ppcemb_tlb_t *tlb; 835 hwaddr raddr; 836 int i, ret; 837 838 ret = -1; 839 raddr = (hwaddr)-1ULL; 840 for (i = 0; i < env->nb_tlb; i++) { 841 tlb = &env->tlb.tlbe[i]; 842 ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address, rw, 843 access_type, i); 844 if (ret != -1) { 845 break; 846 } 847 } 848 849 if (ret >= 0) { 850 ctx->raddr = raddr; 851 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx 852 " %d %d\n", __func__, address, ctx->raddr, ctx->prot, 853 ret); 854 } else { 855 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx 856 " %d %d\n", __func__, address, raddr, ctx->prot, ret); 857 } 858 859 return ret; 860 } 861 862 static void booke206_flush_tlb(CPUPPCState *env, int flags, 863 const int check_iprot) 864 { 865 int tlb_size; 866 int i, j; 867 ppcmas_tlb_t *tlb = env->tlb.tlbm; 868 869 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 870 if (flags & (1 << i)) { 871 tlb_size = booke206_tlb_size(env, i); 872 for (j = 0; j < tlb_size; j++) { 873 if (!check_iprot || !(tlb[j].mas1 & MAS1_IPROT)) { 874 tlb[j].mas1 &= ~MAS1_VALID; 875 } 876 } 877 } 878 tlb += booke206_tlb_size(env, i); 879 } 880 881 tlb_flush(env_cpu(env)); 882 } 883 884 static hwaddr booke206_tlb_to_page_size(CPUPPCState *env, 885 ppcmas_tlb_t *tlb) 886 { 887 int tlbm_size; 888 889 tlbm_size = (tlb->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 890 891 return 1024ULL << tlbm_size; 892 } 893 894 /* TLB check function for MAS based SoftTLBs */ 895 static int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb, 896 hwaddr *raddrp, target_ulong address, 897 uint32_t pid) 898 { 899 hwaddr mask; 900 uint32_t tlb_pid; 901 902 if (!msr_cm) { 903 /* In 32bit mode we can only address 32bit EAs */ 904 address = (uint32_t)address; 905 } 906 907 /* Check valid flag */ 908 if (!(tlb->mas1 & MAS1_VALID)) { 909 return -1; 910 } 911 912 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); 913 LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx " PID=0x%x MAS1=0x%x MAS2=0x%" 914 PRIx64 " mask=0x%" HWADDR_PRIx " MAS7_3=0x%" PRIx64 " MAS8=0x%" 915 PRIx32 "\n", __func__, address, pid, tlb->mas1, tlb->mas2, mask, 916 tlb->mas7_3, tlb->mas8); 917 918 /* Check PID */ 919 tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT; 920 if (tlb_pid != 0 && tlb_pid != pid) { 921 return -1; 922 } 923 924 /* Check effective address */ 925 if ((address & mask) != (tlb->mas2 & MAS2_EPN_MASK)) { 926 return -1; 927 } 928 929 if (raddrp) { 930 *raddrp = (tlb->mas7_3 & mask) | (address & ~mask); 931 } 932 933 return 0; 934 } 935 936 static bool is_epid_mmu(int mmu_idx) 937 { 938 return mmu_idx == PPC_TLB_EPID_STORE || mmu_idx == PPC_TLB_EPID_LOAD; 939 } 940 941 static uint32_t mmubooke206_esr(int mmu_idx, bool rw) 942 { 943 uint32_t esr = 0; 944 if (rw) { 945 esr |= ESR_ST; 946 } 947 if (is_epid_mmu(mmu_idx)) { 948 esr |= ESR_EPID; 949 } 950 return esr; 951 } 952 953 /* 954 * Get EPID register given the mmu_idx. If this is regular load, 955 * construct the EPID access bits from current processor state 956 * 957 * Get the effective AS and PR bits and the PID. The PID is returned 958 * only if EPID load is requested, otherwise the caller must detect 959 * the correct EPID. Return true if valid EPID is returned. 960 */ 961 static bool mmubooke206_get_as(CPUPPCState *env, 962 int mmu_idx, uint32_t *epid_out, 963 bool *as_out, bool *pr_out) 964 { 965 if (is_epid_mmu(mmu_idx)) { 966 uint32_t epidr; 967 if (mmu_idx == PPC_TLB_EPID_STORE) { 968 epidr = env->spr[SPR_BOOKE_EPSC]; 969 } else { 970 epidr = env->spr[SPR_BOOKE_EPLC]; 971 } 972 *epid_out = (epidr & EPID_EPID) >> EPID_EPID_SHIFT; 973 *as_out = !!(epidr & EPID_EAS); 974 *pr_out = !!(epidr & EPID_EPR); 975 return true; 976 } else { 977 *as_out = msr_ds; 978 *pr_out = msr_pr; 979 return false; 980 } 981 } 982 983 /* Check if the tlb found by hashing really matches */ 984 static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb, 985 hwaddr *raddr, int *prot, 986 target_ulong address, int rw, 987 int access_type, int mmu_idx) 988 { 989 int ret; 990 int prot2 = 0; 991 uint32_t epid; 992 bool as, pr; 993 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); 994 995 if (!use_epid) { 996 if (ppcmas_tlb_check(env, tlb, raddr, address, 997 env->spr[SPR_BOOKE_PID]) >= 0) { 998 goto found_tlb; 999 } 1000 1001 if (env->spr[SPR_BOOKE_PID1] && 1002 ppcmas_tlb_check(env, tlb, raddr, address, 1003 env->spr[SPR_BOOKE_PID1]) >= 0) { 1004 goto found_tlb; 1005 } 1006 1007 if (env->spr[SPR_BOOKE_PID2] && 1008 ppcmas_tlb_check(env, tlb, raddr, address, 1009 env->spr[SPR_BOOKE_PID2]) >= 0) { 1010 goto found_tlb; 1011 } 1012 } else { 1013 if (ppcmas_tlb_check(env, tlb, raddr, address, epid) >= 0) { 1014 goto found_tlb; 1015 } 1016 } 1017 1018 LOG_SWTLB("%s: TLB entry not found\n", __func__); 1019 return -1; 1020 1021 found_tlb: 1022 1023 if (pr) { 1024 if (tlb->mas7_3 & MAS3_UR) { 1025 prot2 |= PAGE_READ; 1026 } 1027 if (tlb->mas7_3 & MAS3_UW) { 1028 prot2 |= PAGE_WRITE; 1029 } 1030 if (tlb->mas7_3 & MAS3_UX) { 1031 prot2 |= PAGE_EXEC; 1032 } 1033 } else { 1034 if (tlb->mas7_3 & MAS3_SR) { 1035 prot2 |= PAGE_READ; 1036 } 1037 if (tlb->mas7_3 & MAS3_SW) { 1038 prot2 |= PAGE_WRITE; 1039 } 1040 if (tlb->mas7_3 & MAS3_SX) { 1041 prot2 |= PAGE_EXEC; 1042 } 1043 } 1044 1045 /* Check the address space and permissions */ 1046 if (access_type == ACCESS_CODE) { 1047 /* There is no way to fetch code using epid load */ 1048 assert(!use_epid); 1049 if (msr_ir != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { 1050 LOG_SWTLB("%s: AS doesn't match\n", __func__); 1051 return -1; 1052 } 1053 1054 *prot = prot2; 1055 if (prot2 & PAGE_EXEC) { 1056 LOG_SWTLB("%s: good TLB!\n", __func__); 1057 return 0; 1058 } 1059 1060 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__, prot2); 1061 ret = -3; 1062 } else { 1063 if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { 1064 LOG_SWTLB("%s: AS doesn't match\n", __func__); 1065 return -1; 1066 } 1067 1068 *prot = prot2; 1069 if ((!rw && prot2 & PAGE_READ) || (rw && (prot2 & PAGE_WRITE))) { 1070 LOG_SWTLB("%s: found TLB!\n", __func__); 1071 return 0; 1072 } 1073 1074 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__, prot2); 1075 ret = -2; 1076 } 1077 1078 return ret; 1079 } 1080 1081 static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 1082 target_ulong address, int rw, 1083 int access_type, int mmu_idx) 1084 { 1085 ppcmas_tlb_t *tlb; 1086 hwaddr raddr; 1087 int i, j, ret; 1088 1089 ret = -1; 1090 raddr = (hwaddr)-1ULL; 1091 1092 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 1093 int ways = booke206_tlb_ways(env, i); 1094 1095 for (j = 0; j < ways; j++) { 1096 tlb = booke206_get_tlbm(env, i, address, j); 1097 if (!tlb) { 1098 continue; 1099 } 1100 ret = mmubooke206_check_tlb(env, tlb, &raddr, &ctx->prot, address, 1101 rw, access_type, mmu_idx); 1102 if (ret != -1) { 1103 goto found_tlb; 1104 } 1105 } 1106 } 1107 1108 found_tlb: 1109 1110 if (ret >= 0) { 1111 ctx->raddr = raddr; 1112 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx 1113 " %d %d\n", __func__, address, ctx->raddr, ctx->prot, 1114 ret); 1115 } else { 1116 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx 1117 " %d %d\n", __func__, address, raddr, ctx->prot, ret); 1118 } 1119 1120 return ret; 1121 } 1122 1123 static const char *book3e_tsize_to_str[32] = { 1124 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", 1125 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", 1126 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G", 1127 "1T", "2T" 1128 }; 1129 1130 static void mmubooke_dump_mmu(CPUPPCState *env) 1131 { 1132 ppcemb_tlb_t *entry; 1133 int i; 1134 1135 if (kvm_enabled() && !env->kvm_sw_tlb) { 1136 qemu_printf("Cannot access KVM TLB\n"); 1137 return; 1138 } 1139 1140 qemu_printf("\nTLB:\n"); 1141 qemu_printf("Effective Physical Size PID Prot " 1142 "Attr\n"); 1143 1144 entry = &env->tlb.tlbe[0]; 1145 for (i = 0; i < env->nb_tlb; i++, entry++) { 1146 hwaddr ea, pa; 1147 target_ulong mask; 1148 uint64_t size = (uint64_t)entry->size; 1149 char size_buf[20]; 1150 1151 /* Check valid flag */ 1152 if (!(entry->prot & PAGE_VALID)) { 1153 continue; 1154 } 1155 1156 mask = ~(entry->size - 1); 1157 ea = entry->EPN & mask; 1158 pa = entry->RPN & mask; 1159 /* Extend the physical address to 36 bits */ 1160 pa |= (hwaddr)(entry->RPN & 0xF) << 32; 1161 if (size >= 1 * MiB) { 1162 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "M", size / MiB); 1163 } else { 1164 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "k", size / KiB); 1165 } 1166 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n", 1167 (uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID, 1168 entry->prot, entry->attr); 1169 } 1170 1171 } 1172 1173 static void mmubooke206_dump_one_tlb(CPUPPCState *env, int tlbn, int offset, 1174 int tlbsize) 1175 { 1176 ppcmas_tlb_t *entry; 1177 int i; 1178 1179 qemu_printf("\nTLB%d:\n", tlbn); 1180 qemu_printf("Effective Physical Size TID TS SRWX" 1181 " URWX WIMGE U0123\n"); 1182 1183 entry = &env->tlb.tlbm[offset]; 1184 for (i = 0; i < tlbsize; i++, entry++) { 1185 hwaddr ea, pa, size; 1186 int tsize; 1187 1188 if (!(entry->mas1 & MAS1_VALID)) { 1189 continue; 1190 } 1191 1192 tsize = (entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 1193 size = 1024ULL << tsize; 1194 ea = entry->mas2 & ~(size - 1); 1195 pa = entry->mas7_3 & ~(size - 1); 1196 1197 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c" 1198 "U%c%c%c %c%c%c%c%c U%c%c%c%c\n", 1199 (uint64_t)ea, (uint64_t)pa, 1200 book3e_tsize_to_str[tsize], 1201 (entry->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT, 1202 (entry->mas1 & MAS1_TS) >> MAS1_TS_SHIFT, 1203 entry->mas7_3 & MAS3_SR ? 'R' : '-', 1204 entry->mas7_3 & MAS3_SW ? 'W' : '-', 1205 entry->mas7_3 & MAS3_SX ? 'X' : '-', 1206 entry->mas7_3 & MAS3_UR ? 'R' : '-', 1207 entry->mas7_3 & MAS3_UW ? 'W' : '-', 1208 entry->mas7_3 & MAS3_UX ? 'X' : '-', 1209 entry->mas2 & MAS2_W ? 'W' : '-', 1210 entry->mas2 & MAS2_I ? 'I' : '-', 1211 entry->mas2 & MAS2_M ? 'M' : '-', 1212 entry->mas2 & MAS2_G ? 'G' : '-', 1213 entry->mas2 & MAS2_E ? 'E' : '-', 1214 entry->mas7_3 & MAS3_U0 ? '0' : '-', 1215 entry->mas7_3 & MAS3_U1 ? '1' : '-', 1216 entry->mas7_3 & MAS3_U2 ? '2' : '-', 1217 entry->mas7_3 & MAS3_U3 ? '3' : '-'); 1218 } 1219 } 1220 1221 static void mmubooke206_dump_mmu(CPUPPCState *env) 1222 { 1223 int offset = 0; 1224 int i; 1225 1226 if (kvm_enabled() && !env->kvm_sw_tlb) { 1227 qemu_printf("Cannot access KVM TLB\n"); 1228 return; 1229 } 1230 1231 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 1232 int size = booke206_tlb_size(env, i); 1233 1234 if (size == 0) { 1235 continue; 1236 } 1237 1238 mmubooke206_dump_one_tlb(env, i, offset, size); 1239 offset += size; 1240 } 1241 } 1242 1243 static void mmu6xx_dump_BATs(CPUPPCState *env, int type) 1244 { 1245 target_ulong *BATlt, *BATut, *BATu, *BATl; 1246 target_ulong BEPIl, BEPIu, bl; 1247 int i; 1248 1249 switch (type) { 1250 case ACCESS_CODE: 1251 BATlt = env->IBAT[1]; 1252 BATut = env->IBAT[0]; 1253 break; 1254 default: 1255 BATlt = env->DBAT[1]; 1256 BATut = env->DBAT[0]; 1257 break; 1258 } 1259 1260 for (i = 0; i < env->nb_BATs; i++) { 1261 BATu = &BATut[i]; 1262 BATl = &BATlt[i]; 1263 BEPIu = *BATu & 0xF0000000; 1264 BEPIl = *BATu & 0x0FFE0000; 1265 bl = (*BATu & 0x00001FFC) << 15; 1266 qemu_printf("%s BAT%d BATu " TARGET_FMT_lx 1267 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " 1268 TARGET_FMT_lx " " TARGET_FMT_lx "\n", 1269 type == ACCESS_CODE ? "code" : "data", i, 1270 *BATu, *BATl, BEPIu, BEPIl, bl); 1271 } 1272 } 1273 1274 static void mmu6xx_dump_mmu(CPUPPCState *env) 1275 { 1276 PowerPCCPU *cpu = env_archcpu(env); 1277 ppc6xx_tlb_t *tlb; 1278 target_ulong sr; 1279 int type, way, entry, i; 1280 1281 qemu_printf("HTAB base = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_base(cpu)); 1282 qemu_printf("HTAB mask = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_mask(cpu)); 1283 1284 qemu_printf("\nSegment registers:\n"); 1285 for (i = 0; i < 32; i++) { 1286 sr = env->sr[i]; 1287 if (sr & 0x80000000) { 1288 qemu_printf("%02d T=%d Ks=%d Kp=%d BUID=0x%03x " 1289 "CNTLR_SPEC=0x%05x\n", i, 1290 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, 1291 sr & 0x20000000 ? 1 : 0, (uint32_t)((sr >> 20) & 0x1FF), 1292 (uint32_t)(sr & 0xFFFFF)); 1293 } else { 1294 qemu_printf("%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i, 1295 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, 1296 sr & 0x20000000 ? 1 : 0, sr & 0x10000000 ? 1 : 0, 1297 (uint32_t)(sr & 0x00FFFFFF)); 1298 } 1299 } 1300 1301 qemu_printf("\nBATs:\n"); 1302 mmu6xx_dump_BATs(env, ACCESS_INT); 1303 mmu6xx_dump_BATs(env, ACCESS_CODE); 1304 1305 if (env->id_tlbs != 1) { 1306 qemu_printf("ERROR: 6xx MMU should have separated TLB" 1307 " for code and data\n"); 1308 } 1309 1310 qemu_printf("\nTLBs [EPN EPN + SIZE]\n"); 1311 1312 for (type = 0; type < 2; type++) { 1313 for (way = 0; way < env->nb_ways; way++) { 1314 for (entry = env->nb_tlb * type + env->tlb_per_way * way; 1315 entry < (env->nb_tlb * type + env->tlb_per_way * (way + 1)); 1316 entry++) { 1317 1318 tlb = &env->tlb.tlb6[entry]; 1319 qemu_printf("%s TLB %02d/%02d way:%d %s [" 1320 TARGET_FMT_lx " " TARGET_FMT_lx "]\n", 1321 type ? "code" : "data", entry % env->nb_tlb, 1322 env->nb_tlb, way, 1323 pte_is_valid(tlb->pte0) ? "valid" : "inval", 1324 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE); 1325 } 1326 } 1327 } 1328 } 1329 1330 void dump_mmu(CPUPPCState *env) 1331 { 1332 switch (env->mmu_model) { 1333 case POWERPC_MMU_BOOKE: 1334 mmubooke_dump_mmu(env); 1335 break; 1336 case POWERPC_MMU_BOOKE206: 1337 mmubooke206_dump_mmu(env); 1338 break; 1339 case POWERPC_MMU_SOFT_6xx: 1340 case POWERPC_MMU_SOFT_74xx: 1341 mmu6xx_dump_mmu(env); 1342 break; 1343 #if defined(TARGET_PPC64) 1344 case POWERPC_MMU_64B: 1345 case POWERPC_MMU_2_03: 1346 case POWERPC_MMU_2_06: 1347 case POWERPC_MMU_2_07: 1348 dump_slb(env_archcpu(env)); 1349 break; 1350 case POWERPC_MMU_3_00: 1351 if (ppc64_v3_radix(env_archcpu(env))) { 1352 /* TODO - Unsupported */ 1353 } else { 1354 dump_slb(env_archcpu(env)); 1355 break; 1356 } 1357 #endif 1358 default: 1359 qemu_log_mask(LOG_UNIMP, "%s: unimplemented\n", __func__); 1360 } 1361 } 1362 1363 static inline int check_physical(CPUPPCState *env, mmu_ctx_t *ctx, 1364 target_ulong eaddr, int rw) 1365 { 1366 int in_plb, ret; 1367 1368 ctx->raddr = eaddr; 1369 ctx->prot = PAGE_READ | PAGE_EXEC; 1370 ret = 0; 1371 switch (env->mmu_model) { 1372 case POWERPC_MMU_SOFT_6xx: 1373 case POWERPC_MMU_SOFT_74xx: 1374 case POWERPC_MMU_SOFT_4xx: 1375 case POWERPC_MMU_REAL: 1376 case POWERPC_MMU_BOOKE: 1377 ctx->prot |= PAGE_WRITE; 1378 break; 1379 1380 case POWERPC_MMU_SOFT_4xx_Z: 1381 if (unlikely(msr_pe != 0)) { 1382 /* 1383 * 403 family add some particular protections, using 1384 * PBL/PBU registers for accesses with no translation. 1385 */ 1386 in_plb = 1387 /* Check PLB validity */ 1388 (env->pb[0] < env->pb[1] && 1389 /* and address in plb area */ 1390 eaddr >= env->pb[0] && eaddr < env->pb[1]) || 1391 (env->pb[2] < env->pb[3] && 1392 eaddr >= env->pb[2] && eaddr < env->pb[3]) ? 1 : 0; 1393 if (in_plb ^ msr_px) { 1394 /* Access in protected area */ 1395 if (rw == 1) { 1396 /* Access is not allowed */ 1397 ret = -2; 1398 } 1399 } else { 1400 /* Read-write access is allowed */ 1401 ctx->prot |= PAGE_WRITE; 1402 } 1403 } 1404 break; 1405 1406 default: 1407 /* Caller's checks mean we should never get here for other models */ 1408 abort(); 1409 return -1; 1410 } 1411 1412 return ret; 1413 } 1414 1415 static int get_physical_address_wtlb( 1416 CPUPPCState *env, mmu_ctx_t *ctx, 1417 target_ulong eaddr, int rw, int access_type, 1418 int mmu_idx) 1419 { 1420 int ret = -1; 1421 bool real_mode = (access_type == ACCESS_CODE && msr_ir == 0) 1422 || (access_type != ACCESS_CODE && msr_dr == 0); 1423 1424 switch (env->mmu_model) { 1425 case POWERPC_MMU_SOFT_6xx: 1426 case POWERPC_MMU_SOFT_74xx: 1427 if (real_mode) { 1428 ret = check_physical(env, ctx, eaddr, rw); 1429 } else { 1430 /* Try to find a BAT */ 1431 if (env->nb_BATs != 0) { 1432 ret = get_bat_6xx_tlb(env, ctx, eaddr, rw, access_type); 1433 } 1434 if (ret < 0) { 1435 /* We didn't match any BAT entry or don't have BATs */ 1436 ret = get_segment_6xx_tlb(env, ctx, eaddr, rw, access_type); 1437 } 1438 } 1439 break; 1440 1441 case POWERPC_MMU_SOFT_4xx: 1442 case POWERPC_MMU_SOFT_4xx_Z: 1443 if (real_mode) { 1444 ret = check_physical(env, ctx, eaddr, rw); 1445 } else { 1446 ret = mmu40x_get_physical_address(env, ctx, eaddr, 1447 rw, access_type); 1448 } 1449 break; 1450 case POWERPC_MMU_BOOKE: 1451 ret = mmubooke_get_physical_address(env, ctx, eaddr, 1452 rw, access_type); 1453 break; 1454 case POWERPC_MMU_BOOKE206: 1455 ret = mmubooke206_get_physical_address(env, ctx, eaddr, rw, 1456 access_type, mmu_idx); 1457 break; 1458 case POWERPC_MMU_MPC8xx: 1459 /* XXX: TODO */ 1460 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n"); 1461 break; 1462 case POWERPC_MMU_REAL: 1463 if (real_mode) { 1464 ret = check_physical(env, ctx, eaddr, rw); 1465 } else { 1466 cpu_abort(env_cpu(env), 1467 "PowerPC in real mode do not do any translation\n"); 1468 } 1469 return -1; 1470 default: 1471 cpu_abort(env_cpu(env), "Unknown or invalid MMU model\n"); 1472 return -1; 1473 } 1474 1475 return ret; 1476 } 1477 1478 static int get_physical_address( 1479 CPUPPCState *env, mmu_ctx_t *ctx, 1480 target_ulong eaddr, int rw, int access_type) 1481 { 1482 return get_physical_address_wtlb(env, ctx, eaddr, rw, access_type, 0); 1483 } 1484 1485 hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 1486 { 1487 PowerPCCPU *cpu = POWERPC_CPU(cs); 1488 CPUPPCState *env = &cpu->env; 1489 mmu_ctx_t ctx; 1490 1491 switch (env->mmu_model) { 1492 #if defined(TARGET_PPC64) 1493 case POWERPC_MMU_64B: 1494 case POWERPC_MMU_2_03: 1495 case POWERPC_MMU_2_06: 1496 case POWERPC_MMU_2_07: 1497 return ppc_hash64_get_phys_page_debug(cpu, addr); 1498 case POWERPC_MMU_3_00: 1499 return ppc64_v3_get_phys_page_debug(cpu, addr); 1500 #endif 1501 1502 case POWERPC_MMU_32B: 1503 case POWERPC_MMU_601: 1504 return ppc_hash32_get_phys_page_debug(cpu, addr); 1505 1506 default: 1507 ; 1508 } 1509 1510 if (unlikely(get_physical_address(env, &ctx, addr, 0, ACCESS_INT) != 0)) { 1511 1512 /* 1513 * Some MMUs have separate TLBs for code and data. If we only 1514 * try an ACCESS_INT, we may not be able to read instructions 1515 * mapped by code TLBs, so we also try a ACCESS_CODE. 1516 */ 1517 if (unlikely(get_physical_address(env, &ctx, addr, 0, 1518 ACCESS_CODE) != 0)) { 1519 return -1; 1520 } 1521 } 1522 1523 return ctx.raddr & TARGET_PAGE_MASK; 1524 } 1525 1526 static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address, 1527 int rw, int mmu_idx) 1528 { 1529 uint32_t epid; 1530 bool as, pr; 1531 uint32_t missed_tid = 0; 1532 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); 1533 if (rw == 2) { 1534 as = msr_ir; 1535 } 1536 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; 1537 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; 1538 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; 1539 env->spr[SPR_BOOKE_MAS3] = 0; 1540 env->spr[SPR_BOOKE_MAS6] = 0; 1541 env->spr[SPR_BOOKE_MAS7] = 0; 1542 1543 /* AS */ 1544 if (as) { 1545 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; 1546 env->spr[SPR_BOOKE_MAS6] |= MAS6_SAS; 1547 } 1548 1549 env->spr[SPR_BOOKE_MAS1] |= MAS1_VALID; 1550 env->spr[SPR_BOOKE_MAS2] |= address & MAS2_EPN_MASK; 1551 1552 if (!use_epid) { 1553 switch (env->spr[SPR_BOOKE_MAS4] & MAS4_TIDSELD_PIDZ) { 1554 case MAS4_TIDSELD_PID0: 1555 missed_tid = env->spr[SPR_BOOKE_PID]; 1556 break; 1557 case MAS4_TIDSELD_PID1: 1558 missed_tid = env->spr[SPR_BOOKE_PID1]; 1559 break; 1560 case MAS4_TIDSELD_PID2: 1561 missed_tid = env->spr[SPR_BOOKE_PID2]; 1562 break; 1563 } 1564 env->spr[SPR_BOOKE_MAS6] |= env->spr[SPR_BOOKE_PID] << 16; 1565 } else { 1566 missed_tid = epid; 1567 env->spr[SPR_BOOKE_MAS6] |= missed_tid << 16; 1568 } 1569 env->spr[SPR_BOOKE_MAS1] |= (missed_tid << MAS1_TID_SHIFT); 1570 1571 1572 /* next victim logic */ 1573 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; 1574 env->last_way++; 1575 env->last_way &= booke206_tlb_ways(env, 0) - 1; 1576 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; 1577 } 1578 1579 /* Perform address translation */ 1580 static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address, 1581 int rw, int mmu_idx) 1582 { 1583 CPUState *cs = env_cpu(env); 1584 PowerPCCPU *cpu = POWERPC_CPU(cs); 1585 mmu_ctx_t ctx; 1586 int access_type; 1587 int ret = 0; 1588 1589 if (rw == 2) { 1590 /* code access */ 1591 rw = 0; 1592 access_type = ACCESS_CODE; 1593 } else { 1594 /* data access */ 1595 access_type = env->access_type; 1596 } 1597 ret = get_physical_address_wtlb(env, &ctx, address, rw, 1598 access_type, mmu_idx); 1599 if (ret == 0) { 1600 tlb_set_page(cs, address & TARGET_PAGE_MASK, 1601 ctx.raddr & TARGET_PAGE_MASK, ctx.prot, 1602 mmu_idx, TARGET_PAGE_SIZE); 1603 ret = 0; 1604 } else if (ret < 0) { 1605 LOG_MMU_STATE(cs); 1606 if (access_type == ACCESS_CODE) { 1607 switch (ret) { 1608 case -1: 1609 /* No matches in page tables or TLB */ 1610 switch (env->mmu_model) { 1611 case POWERPC_MMU_SOFT_6xx: 1612 cs->exception_index = POWERPC_EXCP_IFTLB; 1613 env->error_code = 1 << 18; 1614 env->spr[SPR_IMISS] = address; 1615 env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem; 1616 goto tlb_miss; 1617 case POWERPC_MMU_SOFT_74xx: 1618 cs->exception_index = POWERPC_EXCP_IFTLB; 1619 goto tlb_miss_74xx; 1620 case POWERPC_MMU_SOFT_4xx: 1621 case POWERPC_MMU_SOFT_4xx_Z: 1622 cs->exception_index = POWERPC_EXCP_ITLB; 1623 env->error_code = 0; 1624 env->spr[SPR_40x_DEAR] = address; 1625 env->spr[SPR_40x_ESR] = 0x00000000; 1626 break; 1627 case POWERPC_MMU_BOOKE206: 1628 booke206_update_mas_tlb_miss(env, address, 2, mmu_idx); 1629 /* fall through */ 1630 case POWERPC_MMU_BOOKE: 1631 cs->exception_index = POWERPC_EXCP_ITLB; 1632 env->error_code = 0; 1633 env->spr[SPR_BOOKE_DEAR] = address; 1634 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, 0); 1635 return -1; 1636 case POWERPC_MMU_MPC8xx: 1637 /* XXX: TODO */ 1638 cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); 1639 break; 1640 case POWERPC_MMU_REAL: 1641 cpu_abort(cs, "PowerPC in real mode should never raise " 1642 "any MMU exceptions\n"); 1643 return -1; 1644 default: 1645 cpu_abort(cs, "Unknown or invalid MMU model\n"); 1646 return -1; 1647 } 1648 break; 1649 case -2: 1650 /* Access rights violation */ 1651 cs->exception_index = POWERPC_EXCP_ISI; 1652 env->error_code = 0x08000000; 1653 break; 1654 case -3: 1655 /* No execute protection violation */ 1656 if ((env->mmu_model == POWERPC_MMU_BOOKE) || 1657 (env->mmu_model == POWERPC_MMU_BOOKE206)) { 1658 env->spr[SPR_BOOKE_ESR] = 0x00000000; 1659 } 1660 cs->exception_index = POWERPC_EXCP_ISI; 1661 env->error_code = 0x10000000; 1662 break; 1663 case -4: 1664 /* Direct store exception */ 1665 /* No code fetch is allowed in direct-store areas */ 1666 cs->exception_index = POWERPC_EXCP_ISI; 1667 env->error_code = 0x10000000; 1668 break; 1669 } 1670 } else { 1671 switch (ret) { 1672 case -1: 1673 /* No matches in page tables or TLB */ 1674 switch (env->mmu_model) { 1675 case POWERPC_MMU_SOFT_6xx: 1676 if (rw == 1) { 1677 cs->exception_index = POWERPC_EXCP_DSTLB; 1678 env->error_code = 1 << 16; 1679 } else { 1680 cs->exception_index = POWERPC_EXCP_DLTLB; 1681 env->error_code = 0; 1682 } 1683 env->spr[SPR_DMISS] = address; 1684 env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem; 1685 tlb_miss: 1686 env->error_code |= ctx.key << 19; 1687 env->spr[SPR_HASH1] = ppc_hash32_hpt_base(cpu) + 1688 get_pteg_offset32(cpu, ctx.hash[0]); 1689 env->spr[SPR_HASH2] = ppc_hash32_hpt_base(cpu) + 1690 get_pteg_offset32(cpu, ctx.hash[1]); 1691 break; 1692 case POWERPC_MMU_SOFT_74xx: 1693 if (rw == 1) { 1694 cs->exception_index = POWERPC_EXCP_DSTLB; 1695 } else { 1696 cs->exception_index = POWERPC_EXCP_DLTLB; 1697 } 1698 tlb_miss_74xx: 1699 /* Implement LRU algorithm */ 1700 env->error_code = ctx.key << 19; 1701 env->spr[SPR_TLBMISS] = (address & ~((target_ulong)0x3)) | 1702 ((env->last_way + 1) & (env->nb_ways - 1)); 1703 env->spr[SPR_PTEHI] = 0x80000000 | ctx.ptem; 1704 break; 1705 case POWERPC_MMU_SOFT_4xx: 1706 case POWERPC_MMU_SOFT_4xx_Z: 1707 cs->exception_index = POWERPC_EXCP_DTLB; 1708 env->error_code = 0; 1709 env->spr[SPR_40x_DEAR] = address; 1710 if (rw) { 1711 env->spr[SPR_40x_ESR] = 0x00800000; 1712 } else { 1713 env->spr[SPR_40x_ESR] = 0x00000000; 1714 } 1715 break; 1716 case POWERPC_MMU_MPC8xx: 1717 /* XXX: TODO */ 1718 cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); 1719 break; 1720 case POWERPC_MMU_BOOKE206: 1721 booke206_update_mas_tlb_miss(env, address, rw, mmu_idx); 1722 /* fall through */ 1723 case POWERPC_MMU_BOOKE: 1724 cs->exception_index = POWERPC_EXCP_DTLB; 1725 env->error_code = 0; 1726 env->spr[SPR_BOOKE_DEAR] = address; 1727 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, rw); 1728 return -1; 1729 case POWERPC_MMU_REAL: 1730 cpu_abort(cs, "PowerPC in real mode should never raise " 1731 "any MMU exceptions\n"); 1732 return -1; 1733 default: 1734 cpu_abort(cs, "Unknown or invalid MMU model\n"); 1735 return -1; 1736 } 1737 break; 1738 case -2: 1739 /* Access rights violation */ 1740 cs->exception_index = POWERPC_EXCP_DSI; 1741 env->error_code = 0; 1742 if (env->mmu_model == POWERPC_MMU_SOFT_4xx 1743 || env->mmu_model == POWERPC_MMU_SOFT_4xx_Z) { 1744 env->spr[SPR_40x_DEAR] = address; 1745 if (rw) { 1746 env->spr[SPR_40x_ESR] |= 0x00800000; 1747 } 1748 } else if ((env->mmu_model == POWERPC_MMU_BOOKE) || 1749 (env->mmu_model == POWERPC_MMU_BOOKE206)) { 1750 env->spr[SPR_BOOKE_DEAR] = address; 1751 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, rw); 1752 } else { 1753 env->spr[SPR_DAR] = address; 1754 if (rw == 1) { 1755 env->spr[SPR_DSISR] = 0x0A000000; 1756 } else { 1757 env->spr[SPR_DSISR] = 0x08000000; 1758 } 1759 } 1760 break; 1761 case -4: 1762 /* Direct store exception */ 1763 switch (access_type) { 1764 case ACCESS_FLOAT: 1765 /* Floating point load/store */ 1766 cs->exception_index = POWERPC_EXCP_ALIGN; 1767 env->error_code = POWERPC_EXCP_ALIGN_FP; 1768 env->spr[SPR_DAR] = address; 1769 break; 1770 case ACCESS_RES: 1771 /* lwarx, ldarx or stwcx. */ 1772 cs->exception_index = POWERPC_EXCP_DSI; 1773 env->error_code = 0; 1774 env->spr[SPR_DAR] = address; 1775 if (rw == 1) { 1776 env->spr[SPR_DSISR] = 0x06000000; 1777 } else { 1778 env->spr[SPR_DSISR] = 0x04000000; 1779 } 1780 break; 1781 case ACCESS_EXT: 1782 /* eciwx or ecowx */ 1783 cs->exception_index = POWERPC_EXCP_DSI; 1784 env->error_code = 0; 1785 env->spr[SPR_DAR] = address; 1786 if (rw == 1) { 1787 env->spr[SPR_DSISR] = 0x06100000; 1788 } else { 1789 env->spr[SPR_DSISR] = 0x04100000; 1790 } 1791 break; 1792 default: 1793 printf("DSI: invalid exception (%d)\n", ret); 1794 cs->exception_index = POWERPC_EXCP_PROGRAM; 1795 env->error_code = 1796 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL; 1797 env->spr[SPR_DAR] = address; 1798 break; 1799 } 1800 break; 1801 } 1802 } 1803 ret = 1; 1804 } 1805 1806 return ret; 1807 } 1808 1809 /*****************************************************************************/ 1810 /* BATs management */ 1811 #if !defined(FLUSH_ALL_TLBS) 1812 static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu, 1813 target_ulong mask) 1814 { 1815 CPUState *cs = env_cpu(env); 1816 target_ulong base, end, page; 1817 1818 base = BATu & ~0x0001FFFF; 1819 end = base + mask + 0x00020000; 1820 if (((end - base) >> TARGET_PAGE_BITS) > 1024) { 1821 /* Flushing 1024 4K pages is slower than a complete flush */ 1822 LOG_BATS("Flush all BATs\n"); 1823 tlb_flush(cs); 1824 LOG_BATS("Flush done\n"); 1825 return; 1826 } 1827 LOG_BATS("Flush BAT from " TARGET_FMT_lx " to " TARGET_FMT_lx " (" 1828 TARGET_FMT_lx ")\n", base, end, mask); 1829 for (page = base; page != end; page += TARGET_PAGE_SIZE) { 1830 tlb_flush_page(cs, page); 1831 } 1832 LOG_BATS("Flush done\n"); 1833 } 1834 #endif 1835 1836 static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr, 1837 target_ulong value) 1838 { 1839 LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", ID, 1840 nr, ul == 0 ? 'u' : 'l', value, env->nip); 1841 } 1842 1843 void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value) 1844 { 1845 target_ulong mask; 1846 #if defined(FLUSH_ALL_TLBS) 1847 PowerPCCPU *cpu = env_archcpu(env); 1848 #endif 1849 1850 dump_store_bat(env, 'I', 0, nr, value); 1851 if (env->IBAT[0][nr] != value) { 1852 mask = (value << 15) & 0x0FFE0000UL; 1853 #if !defined(FLUSH_ALL_TLBS) 1854 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 1855 #endif 1856 /* 1857 * When storing valid upper BAT, mask BEPI and BRPN and 1858 * invalidate all TLBs covered by this BAT 1859 */ 1860 mask = (value << 15) & 0x0FFE0000UL; 1861 env->IBAT[0][nr] = (value & 0x00001FFFUL) | 1862 (value & ~0x0001FFFFUL & ~mask); 1863 env->IBAT[1][nr] = (env->IBAT[1][nr] & 0x0000007B) | 1864 (env->IBAT[1][nr] & ~0x0001FFFF & ~mask); 1865 #if !defined(FLUSH_ALL_TLBS) 1866 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 1867 #else 1868 tlb_flush(env_cpu(env)); 1869 #endif 1870 } 1871 } 1872 1873 void helper_store_ibatl(CPUPPCState *env, uint32_t nr, target_ulong value) 1874 { 1875 dump_store_bat(env, 'I', 1, nr, value); 1876 env->IBAT[1][nr] = value; 1877 } 1878 1879 void helper_store_dbatu(CPUPPCState *env, uint32_t nr, target_ulong value) 1880 { 1881 target_ulong mask; 1882 #if defined(FLUSH_ALL_TLBS) 1883 PowerPCCPU *cpu = env_archcpu(env); 1884 #endif 1885 1886 dump_store_bat(env, 'D', 0, nr, value); 1887 if (env->DBAT[0][nr] != value) { 1888 /* 1889 * When storing valid upper BAT, mask BEPI and BRPN and 1890 * invalidate all TLBs covered by this BAT 1891 */ 1892 mask = (value << 15) & 0x0FFE0000UL; 1893 #if !defined(FLUSH_ALL_TLBS) 1894 do_invalidate_BAT(env, env->DBAT[0][nr], mask); 1895 #endif 1896 mask = (value << 15) & 0x0FFE0000UL; 1897 env->DBAT[0][nr] = (value & 0x00001FFFUL) | 1898 (value & ~0x0001FFFFUL & ~mask); 1899 env->DBAT[1][nr] = (env->DBAT[1][nr] & 0x0000007B) | 1900 (env->DBAT[1][nr] & ~0x0001FFFF & ~mask); 1901 #if !defined(FLUSH_ALL_TLBS) 1902 do_invalidate_BAT(env, env->DBAT[0][nr], mask); 1903 #else 1904 tlb_flush(env_cpu(env)); 1905 #endif 1906 } 1907 } 1908 1909 void helper_store_dbatl(CPUPPCState *env, uint32_t nr, target_ulong value) 1910 { 1911 dump_store_bat(env, 'D', 1, nr, value); 1912 env->DBAT[1][nr] = value; 1913 } 1914 1915 void helper_store_601_batu(CPUPPCState *env, uint32_t nr, target_ulong value) 1916 { 1917 target_ulong mask; 1918 #if defined(FLUSH_ALL_TLBS) 1919 PowerPCCPU *cpu = env_archcpu(env); 1920 int do_inval; 1921 #endif 1922 1923 dump_store_bat(env, 'I', 0, nr, value); 1924 if (env->IBAT[0][nr] != value) { 1925 #if defined(FLUSH_ALL_TLBS) 1926 do_inval = 0; 1927 #endif 1928 mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL; 1929 if (env->IBAT[1][nr] & 0x40) { 1930 /* Invalidate BAT only if it is valid */ 1931 #if !defined(FLUSH_ALL_TLBS) 1932 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 1933 #else 1934 do_inval = 1; 1935 #endif 1936 } 1937 /* 1938 * When storing valid upper BAT, mask BEPI and BRPN and 1939 * invalidate all TLBs covered by this BAT 1940 */ 1941 env->IBAT[0][nr] = (value & 0x00001FFFUL) | 1942 (value & ~0x0001FFFFUL & ~mask); 1943 env->DBAT[0][nr] = env->IBAT[0][nr]; 1944 if (env->IBAT[1][nr] & 0x40) { 1945 #if !defined(FLUSH_ALL_TLBS) 1946 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 1947 #else 1948 do_inval = 1; 1949 #endif 1950 } 1951 #if defined(FLUSH_ALL_TLBS) 1952 if (do_inval) { 1953 tlb_flush(env_cpu(env)); 1954 } 1955 #endif 1956 } 1957 } 1958 1959 void helper_store_601_batl(CPUPPCState *env, uint32_t nr, target_ulong value) 1960 { 1961 #if !defined(FLUSH_ALL_TLBS) 1962 target_ulong mask; 1963 #else 1964 PowerPCCPU *cpu = env_archcpu(env); 1965 int do_inval; 1966 #endif 1967 1968 dump_store_bat(env, 'I', 1, nr, value); 1969 if (env->IBAT[1][nr] != value) { 1970 #if defined(FLUSH_ALL_TLBS) 1971 do_inval = 0; 1972 #endif 1973 if (env->IBAT[1][nr] & 0x40) { 1974 #if !defined(FLUSH_ALL_TLBS) 1975 mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL; 1976 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 1977 #else 1978 do_inval = 1; 1979 #endif 1980 } 1981 if (value & 0x40) { 1982 #if !defined(FLUSH_ALL_TLBS) 1983 mask = (value << 17) & 0x0FFE0000UL; 1984 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 1985 #else 1986 do_inval = 1; 1987 #endif 1988 } 1989 env->IBAT[1][nr] = value; 1990 env->DBAT[1][nr] = value; 1991 #if defined(FLUSH_ALL_TLBS) 1992 if (do_inval) { 1993 tlb_flush(env_cpu(env)); 1994 } 1995 #endif 1996 } 1997 } 1998 1999 /*****************************************************************************/ 2000 /* TLB management */ 2001 void ppc_tlb_invalidate_all(CPUPPCState *env) 2002 { 2003 #if defined(TARGET_PPC64) 2004 if (env->mmu_model & POWERPC_MMU_64) { 2005 env->tlb_need_flush = 0; 2006 tlb_flush(env_cpu(env)); 2007 } else 2008 #endif /* defined(TARGET_PPC64) */ 2009 switch (env->mmu_model) { 2010 case POWERPC_MMU_SOFT_6xx: 2011 case POWERPC_MMU_SOFT_74xx: 2012 ppc6xx_tlb_invalidate_all(env); 2013 break; 2014 case POWERPC_MMU_SOFT_4xx: 2015 case POWERPC_MMU_SOFT_4xx_Z: 2016 ppc4xx_tlb_invalidate_all(env); 2017 break; 2018 case POWERPC_MMU_REAL: 2019 cpu_abort(env_cpu(env), "No TLB for PowerPC 4xx in real mode\n"); 2020 break; 2021 case POWERPC_MMU_MPC8xx: 2022 /* XXX: TODO */ 2023 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n"); 2024 break; 2025 case POWERPC_MMU_BOOKE: 2026 tlb_flush(env_cpu(env)); 2027 break; 2028 case POWERPC_MMU_BOOKE206: 2029 booke206_flush_tlb(env, -1, 0); 2030 break; 2031 case POWERPC_MMU_32B: 2032 case POWERPC_MMU_601: 2033 env->tlb_need_flush = 0; 2034 tlb_flush(env_cpu(env)); 2035 break; 2036 default: 2037 /* XXX: TODO */ 2038 cpu_abort(env_cpu(env), "Unknown MMU model %x\n", env->mmu_model); 2039 break; 2040 } 2041 } 2042 2043 void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr) 2044 { 2045 #if !defined(FLUSH_ALL_TLBS) 2046 addr &= TARGET_PAGE_MASK; 2047 #if defined(TARGET_PPC64) 2048 if (env->mmu_model & POWERPC_MMU_64) { 2049 /* tlbie invalidate TLBs for all segments */ 2050 /* 2051 * XXX: given the fact that there are too many segments to invalidate, 2052 * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU, 2053 * we just invalidate all TLBs 2054 */ 2055 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 2056 } else 2057 #endif /* defined(TARGET_PPC64) */ 2058 switch (env->mmu_model) { 2059 case POWERPC_MMU_SOFT_6xx: 2060 case POWERPC_MMU_SOFT_74xx: 2061 ppc6xx_tlb_invalidate_virt(env, addr, 0); 2062 if (env->id_tlbs == 1) { 2063 ppc6xx_tlb_invalidate_virt(env, addr, 1); 2064 } 2065 break; 2066 case POWERPC_MMU_32B: 2067 case POWERPC_MMU_601: 2068 /* 2069 * Actual CPUs invalidate entire congruence classes based on 2070 * the geometry of their TLBs and some OSes take that into 2071 * account, we just mark the TLB to be flushed later (context 2072 * synchronizing event or sync instruction on 32-bit). 2073 */ 2074 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 2075 break; 2076 default: 2077 /* Should never reach here with other MMU models */ 2078 assert(0); 2079 } 2080 #else 2081 ppc_tlb_invalidate_all(env); 2082 #endif 2083 } 2084 2085 /*****************************************************************************/ 2086 /* Special registers manipulation */ 2087 void ppc_store_sdr1(CPUPPCState *env, target_ulong value) 2088 { 2089 PowerPCCPU *cpu = env_archcpu(env); 2090 qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value); 2091 assert(!cpu->vhyp); 2092 #if defined(TARGET_PPC64) 2093 if (env->mmu_model & POWERPC_MMU_64) { 2094 target_ulong sdr_mask = SDR_64_HTABORG | SDR_64_HTABSIZE; 2095 target_ulong htabsize = value & SDR_64_HTABSIZE; 2096 2097 if (value & ~sdr_mask) { 2098 error_report("Invalid bits 0x"TARGET_FMT_lx" set in SDR1", 2099 value & ~sdr_mask); 2100 value &= sdr_mask; 2101 } 2102 if (htabsize > 28) { 2103 error_report("Invalid HTABSIZE 0x" TARGET_FMT_lx" stored in SDR1", 2104 htabsize); 2105 return; 2106 } 2107 } 2108 #endif /* defined(TARGET_PPC64) */ 2109 /* FIXME: Should check for valid HTABMASK values in 32-bit case */ 2110 env->spr[SPR_SDR1] = value; 2111 } 2112 2113 #if defined(TARGET_PPC64) 2114 void ppc_store_ptcr(CPUPPCState *env, target_ulong value) 2115 { 2116 PowerPCCPU *cpu = env_archcpu(env); 2117 target_ulong ptcr_mask = PTCR_PATB | PTCR_PATS; 2118 target_ulong patbsize = value & PTCR_PATS; 2119 2120 qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value); 2121 2122 assert(!cpu->vhyp); 2123 assert(env->mmu_model & POWERPC_MMU_3_00); 2124 2125 if (value & ~ptcr_mask) { 2126 error_report("Invalid bits 0x"TARGET_FMT_lx" set in PTCR", 2127 value & ~ptcr_mask); 2128 value &= ptcr_mask; 2129 } 2130 2131 if (patbsize > 24) { 2132 error_report("Invalid Partition Table size 0x" TARGET_FMT_lx 2133 " stored in PTCR", patbsize); 2134 return; 2135 } 2136 2137 env->spr[SPR_PTCR] = value; 2138 } 2139 2140 #endif /* defined(TARGET_PPC64) */ 2141 2142 /* Segment registers load and store */ 2143 target_ulong helper_load_sr(CPUPPCState *env, target_ulong sr_num) 2144 { 2145 #if defined(TARGET_PPC64) 2146 if (env->mmu_model & POWERPC_MMU_64) { 2147 /* XXX */ 2148 return 0; 2149 } 2150 #endif 2151 return env->sr[sr_num]; 2152 } 2153 2154 void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value) 2155 { 2156 qemu_log_mask(CPU_LOG_MMU, 2157 "%s: reg=%d " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__, 2158 (int)srnum, value, env->sr[srnum]); 2159 #if defined(TARGET_PPC64) 2160 if (env->mmu_model & POWERPC_MMU_64) { 2161 PowerPCCPU *cpu = env_archcpu(env); 2162 uint64_t esid, vsid; 2163 2164 /* ESID = srnum */ 2165 esid = ((uint64_t)(srnum & 0xf) << 28) | SLB_ESID_V; 2166 2167 /* VSID = VSID */ 2168 vsid = (value & 0xfffffff) << 12; 2169 /* flags = flags */ 2170 vsid |= ((value >> 27) & 0xf) << 8; 2171 2172 ppc_store_slb(cpu, srnum, esid, vsid); 2173 } else 2174 #endif 2175 if (env->sr[srnum] != value) { 2176 env->sr[srnum] = value; 2177 /* 2178 * Invalidating 256MB of virtual memory in 4kB pages is way 2179 * longer than flusing the whole TLB. 2180 */ 2181 #if !defined(FLUSH_ALL_TLBS) && 0 2182 { 2183 target_ulong page, end; 2184 /* Invalidate 256 MB of virtual memory */ 2185 page = (16 << 20) * srnum; 2186 end = page + (16 << 20); 2187 for (; page != end; page += TARGET_PAGE_SIZE) { 2188 tlb_flush_page(env_cpu(env), page); 2189 } 2190 } 2191 #else 2192 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 2193 #endif 2194 } 2195 } 2196 2197 /* TLB management */ 2198 void helper_tlbia(CPUPPCState *env) 2199 { 2200 ppc_tlb_invalidate_all(env); 2201 } 2202 2203 void helper_tlbie(CPUPPCState *env, target_ulong addr) 2204 { 2205 ppc_tlb_invalidate_one(env, addr); 2206 } 2207 2208 void helper_tlbiva(CPUPPCState *env, target_ulong addr) 2209 { 2210 /* tlbiva instruction only exists on BookE */ 2211 assert(env->mmu_model == POWERPC_MMU_BOOKE); 2212 /* XXX: TODO */ 2213 cpu_abort(env_cpu(env), "BookE MMU model is not implemented\n"); 2214 } 2215 2216 /* Software driven TLBs management */ 2217 /* PowerPC 602/603 software TLB load instructions helpers */ 2218 static void do_6xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code) 2219 { 2220 target_ulong RPN, CMP, EPN; 2221 int way; 2222 2223 RPN = env->spr[SPR_RPA]; 2224 if (is_code) { 2225 CMP = env->spr[SPR_ICMP]; 2226 EPN = env->spr[SPR_IMISS]; 2227 } else { 2228 CMP = env->spr[SPR_DCMP]; 2229 EPN = env->spr[SPR_DMISS]; 2230 } 2231 way = (env->spr[SPR_SRR1] >> 17) & 1; 2232 (void)EPN; /* avoid a compiler warning */ 2233 LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx 2234 " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP, 2235 RPN, way); 2236 /* Store this TLB */ 2237 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK), 2238 way, is_code, CMP, RPN); 2239 } 2240 2241 void helper_6xx_tlbd(CPUPPCState *env, target_ulong EPN) 2242 { 2243 do_6xx_tlb(env, EPN, 0); 2244 } 2245 2246 void helper_6xx_tlbi(CPUPPCState *env, target_ulong EPN) 2247 { 2248 do_6xx_tlb(env, EPN, 1); 2249 } 2250 2251 /* PowerPC 74xx software TLB load instructions helpers */ 2252 static void do_74xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code) 2253 { 2254 target_ulong RPN, CMP, EPN; 2255 int way; 2256 2257 RPN = env->spr[SPR_PTELO]; 2258 CMP = env->spr[SPR_PTEHI]; 2259 EPN = env->spr[SPR_TLBMISS] & ~0x3; 2260 way = env->spr[SPR_TLBMISS] & 0x3; 2261 (void)EPN; /* avoid a compiler warning */ 2262 LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx 2263 " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP, 2264 RPN, way); 2265 /* Store this TLB */ 2266 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK), 2267 way, is_code, CMP, RPN); 2268 } 2269 2270 void helper_74xx_tlbd(CPUPPCState *env, target_ulong EPN) 2271 { 2272 do_74xx_tlb(env, EPN, 0); 2273 } 2274 2275 void helper_74xx_tlbi(CPUPPCState *env, target_ulong EPN) 2276 { 2277 do_74xx_tlb(env, EPN, 1); 2278 } 2279 2280 /*****************************************************************************/ 2281 /* PowerPC 601 specific instructions (POWER bridge) */ 2282 2283 target_ulong helper_rac(CPUPPCState *env, target_ulong addr) 2284 { 2285 mmu_ctx_t ctx; 2286 int nb_BATs; 2287 target_ulong ret = 0; 2288 2289 /* 2290 * We don't have to generate many instances of this instruction, 2291 * as rac is supervisor only. 2292 * 2293 * XXX: FIX THIS: Pretend we have no BAT 2294 */ 2295 nb_BATs = env->nb_BATs; 2296 env->nb_BATs = 0; 2297 if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0) { 2298 ret = ctx.raddr; 2299 } 2300 env->nb_BATs = nb_BATs; 2301 return ret; 2302 } 2303 2304 static inline target_ulong booke_tlb_to_page_size(int size) 2305 { 2306 return 1024 << (2 * size); 2307 } 2308 2309 static inline int booke_page_size_to_tlb(target_ulong page_size) 2310 { 2311 int size; 2312 2313 switch (page_size) { 2314 case 0x00000400UL: 2315 size = 0x0; 2316 break; 2317 case 0x00001000UL: 2318 size = 0x1; 2319 break; 2320 case 0x00004000UL: 2321 size = 0x2; 2322 break; 2323 case 0x00010000UL: 2324 size = 0x3; 2325 break; 2326 case 0x00040000UL: 2327 size = 0x4; 2328 break; 2329 case 0x00100000UL: 2330 size = 0x5; 2331 break; 2332 case 0x00400000UL: 2333 size = 0x6; 2334 break; 2335 case 0x01000000UL: 2336 size = 0x7; 2337 break; 2338 case 0x04000000UL: 2339 size = 0x8; 2340 break; 2341 case 0x10000000UL: 2342 size = 0x9; 2343 break; 2344 case 0x40000000UL: 2345 size = 0xA; 2346 break; 2347 #if defined(TARGET_PPC64) 2348 case 0x000100000000ULL: 2349 size = 0xB; 2350 break; 2351 case 0x000400000000ULL: 2352 size = 0xC; 2353 break; 2354 case 0x001000000000ULL: 2355 size = 0xD; 2356 break; 2357 case 0x004000000000ULL: 2358 size = 0xE; 2359 break; 2360 case 0x010000000000ULL: 2361 size = 0xF; 2362 break; 2363 #endif 2364 default: 2365 size = -1; 2366 break; 2367 } 2368 2369 return size; 2370 } 2371 2372 /* Helpers for 4xx TLB management */ 2373 #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */ 2374 2375 #define PPC4XX_TLBHI_V 0x00000040 2376 #define PPC4XX_TLBHI_E 0x00000020 2377 #define PPC4XX_TLBHI_SIZE_MIN 0 2378 #define PPC4XX_TLBHI_SIZE_MAX 7 2379 #define PPC4XX_TLBHI_SIZE_DEFAULT 1 2380 #define PPC4XX_TLBHI_SIZE_SHIFT 7 2381 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007 2382 2383 #define PPC4XX_TLBLO_EX 0x00000200 2384 #define PPC4XX_TLBLO_WR 0x00000100 2385 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF 2386 #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00 2387 2388 target_ulong helper_4xx_tlbre_hi(CPUPPCState *env, target_ulong entry) 2389 { 2390 ppcemb_tlb_t *tlb; 2391 target_ulong ret; 2392 int size; 2393 2394 entry &= PPC4XX_TLB_ENTRY_MASK; 2395 tlb = &env->tlb.tlbe[entry]; 2396 ret = tlb->EPN; 2397 if (tlb->prot & PAGE_VALID) { 2398 ret |= PPC4XX_TLBHI_V; 2399 } 2400 size = booke_page_size_to_tlb(tlb->size); 2401 if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) { 2402 size = PPC4XX_TLBHI_SIZE_DEFAULT; 2403 } 2404 ret |= size << PPC4XX_TLBHI_SIZE_SHIFT; 2405 env->spr[SPR_40x_PID] = tlb->PID; 2406 return ret; 2407 } 2408 2409 target_ulong helper_4xx_tlbre_lo(CPUPPCState *env, target_ulong entry) 2410 { 2411 ppcemb_tlb_t *tlb; 2412 target_ulong ret; 2413 2414 entry &= PPC4XX_TLB_ENTRY_MASK; 2415 tlb = &env->tlb.tlbe[entry]; 2416 ret = tlb->RPN; 2417 if (tlb->prot & PAGE_EXEC) { 2418 ret |= PPC4XX_TLBLO_EX; 2419 } 2420 if (tlb->prot & PAGE_WRITE) { 2421 ret |= PPC4XX_TLBLO_WR; 2422 } 2423 return ret; 2424 } 2425 2426 void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry, 2427 target_ulong val) 2428 { 2429 CPUState *cs = env_cpu(env); 2430 ppcemb_tlb_t *tlb; 2431 target_ulong page, end; 2432 2433 LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry, 2434 val); 2435 entry &= PPC4XX_TLB_ENTRY_MASK; 2436 tlb = &env->tlb.tlbe[entry]; 2437 /* Invalidate previous TLB (if it's valid) */ 2438 if (tlb->prot & PAGE_VALID) { 2439 end = tlb->EPN + tlb->size; 2440 LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end " 2441 TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end); 2442 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) { 2443 tlb_flush_page(cs, page); 2444 } 2445 } 2446 tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT) 2447 & PPC4XX_TLBHI_SIZE_MASK); 2448 /* 2449 * We cannot handle TLB size < TARGET_PAGE_SIZE. 2450 * If this ever occurs, we should implement TARGET_PAGE_BITS_VARY 2451 */ 2452 if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) { 2453 cpu_abort(cs, "TLB size " TARGET_FMT_lu " < %u " 2454 "are not supported (%d)\n" 2455 "Please implement TARGET_PAGE_BITS_VARY\n", 2456 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7)); 2457 } 2458 tlb->EPN = val & ~(tlb->size - 1); 2459 if (val & PPC4XX_TLBHI_V) { 2460 tlb->prot |= PAGE_VALID; 2461 if (val & PPC4XX_TLBHI_E) { 2462 /* XXX: TO BE FIXED */ 2463 cpu_abort(cs, 2464 "Little-endian TLB entries are not supported by now\n"); 2465 } 2466 } else { 2467 tlb->prot &= ~PAGE_VALID; 2468 } 2469 tlb->PID = env->spr[SPR_40x_PID]; /* PID */ 2470 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx 2471 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__, 2472 (int)entry, tlb->RPN, tlb->EPN, tlb->size, 2473 tlb->prot & PAGE_READ ? 'r' : '-', 2474 tlb->prot & PAGE_WRITE ? 'w' : '-', 2475 tlb->prot & PAGE_EXEC ? 'x' : '-', 2476 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID); 2477 /* Invalidate new TLB (if valid) */ 2478 if (tlb->prot & PAGE_VALID) { 2479 end = tlb->EPN + tlb->size; 2480 LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end " 2481 TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end); 2482 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) { 2483 tlb_flush_page(cs, page); 2484 } 2485 } 2486 } 2487 2488 void helper_4xx_tlbwe_lo(CPUPPCState *env, target_ulong entry, 2489 target_ulong val) 2490 { 2491 ppcemb_tlb_t *tlb; 2492 2493 LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry, 2494 val); 2495 entry &= PPC4XX_TLB_ENTRY_MASK; 2496 tlb = &env->tlb.tlbe[entry]; 2497 tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK; 2498 tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK; 2499 tlb->prot = PAGE_READ; 2500 if (val & PPC4XX_TLBLO_EX) { 2501 tlb->prot |= PAGE_EXEC; 2502 } 2503 if (val & PPC4XX_TLBLO_WR) { 2504 tlb->prot |= PAGE_WRITE; 2505 } 2506 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx 2507 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__, 2508 (int)entry, tlb->RPN, tlb->EPN, tlb->size, 2509 tlb->prot & PAGE_READ ? 'r' : '-', 2510 tlb->prot & PAGE_WRITE ? 'w' : '-', 2511 tlb->prot & PAGE_EXEC ? 'x' : '-', 2512 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID); 2513 } 2514 2515 target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address) 2516 { 2517 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]); 2518 } 2519 2520 /* PowerPC 440 TLB management */ 2521 void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry, 2522 target_ulong value) 2523 { 2524 ppcemb_tlb_t *tlb; 2525 target_ulong EPN, RPN, size; 2526 int do_flush_tlbs; 2527 2528 LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n", 2529 __func__, word, (int)entry, value); 2530 do_flush_tlbs = 0; 2531 entry &= 0x3F; 2532 tlb = &env->tlb.tlbe[entry]; 2533 switch (word) { 2534 default: 2535 /* Just here to please gcc */ 2536 case 0: 2537 EPN = value & 0xFFFFFC00; 2538 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN) { 2539 do_flush_tlbs = 1; 2540 } 2541 tlb->EPN = EPN; 2542 size = booke_tlb_to_page_size((value >> 4) & 0xF); 2543 if ((tlb->prot & PAGE_VALID) && tlb->size < size) { 2544 do_flush_tlbs = 1; 2545 } 2546 tlb->size = size; 2547 tlb->attr &= ~0x1; 2548 tlb->attr |= (value >> 8) & 1; 2549 if (value & 0x200) { 2550 tlb->prot |= PAGE_VALID; 2551 } else { 2552 if (tlb->prot & PAGE_VALID) { 2553 tlb->prot &= ~PAGE_VALID; 2554 do_flush_tlbs = 1; 2555 } 2556 } 2557 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF; 2558 if (do_flush_tlbs) { 2559 tlb_flush(env_cpu(env)); 2560 } 2561 break; 2562 case 1: 2563 RPN = value & 0xFFFFFC0F; 2564 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN) { 2565 tlb_flush(env_cpu(env)); 2566 } 2567 tlb->RPN = RPN; 2568 break; 2569 case 2: 2570 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00); 2571 tlb->prot = tlb->prot & PAGE_VALID; 2572 if (value & 0x1) { 2573 tlb->prot |= PAGE_READ << 4; 2574 } 2575 if (value & 0x2) { 2576 tlb->prot |= PAGE_WRITE << 4; 2577 } 2578 if (value & 0x4) { 2579 tlb->prot |= PAGE_EXEC << 4; 2580 } 2581 if (value & 0x8) { 2582 tlb->prot |= PAGE_READ; 2583 } 2584 if (value & 0x10) { 2585 tlb->prot |= PAGE_WRITE; 2586 } 2587 if (value & 0x20) { 2588 tlb->prot |= PAGE_EXEC; 2589 } 2590 break; 2591 } 2592 } 2593 2594 target_ulong helper_440_tlbre(CPUPPCState *env, uint32_t word, 2595 target_ulong entry) 2596 { 2597 ppcemb_tlb_t *tlb; 2598 target_ulong ret; 2599 int size; 2600 2601 entry &= 0x3F; 2602 tlb = &env->tlb.tlbe[entry]; 2603 switch (word) { 2604 default: 2605 /* Just here to please gcc */ 2606 case 0: 2607 ret = tlb->EPN; 2608 size = booke_page_size_to_tlb(tlb->size); 2609 if (size < 0 || size > 0xF) { 2610 size = 1; 2611 } 2612 ret |= size << 4; 2613 if (tlb->attr & 0x1) { 2614 ret |= 0x100; 2615 } 2616 if (tlb->prot & PAGE_VALID) { 2617 ret |= 0x200; 2618 } 2619 env->spr[SPR_440_MMUCR] &= ~0x000000FF; 2620 env->spr[SPR_440_MMUCR] |= tlb->PID; 2621 break; 2622 case 1: 2623 ret = tlb->RPN; 2624 break; 2625 case 2: 2626 ret = tlb->attr & ~0x1; 2627 if (tlb->prot & (PAGE_READ << 4)) { 2628 ret |= 0x1; 2629 } 2630 if (tlb->prot & (PAGE_WRITE << 4)) { 2631 ret |= 0x2; 2632 } 2633 if (tlb->prot & (PAGE_EXEC << 4)) { 2634 ret |= 0x4; 2635 } 2636 if (tlb->prot & PAGE_READ) { 2637 ret |= 0x8; 2638 } 2639 if (tlb->prot & PAGE_WRITE) { 2640 ret |= 0x10; 2641 } 2642 if (tlb->prot & PAGE_EXEC) { 2643 ret |= 0x20; 2644 } 2645 break; 2646 } 2647 return ret; 2648 } 2649 2650 target_ulong helper_440_tlbsx(CPUPPCState *env, target_ulong address) 2651 { 2652 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF); 2653 } 2654 2655 /* PowerPC BookE 2.06 TLB management */ 2656 2657 static ppcmas_tlb_t *booke206_cur_tlb(CPUPPCState *env) 2658 { 2659 uint32_t tlbncfg = 0; 2660 int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT; 2661 int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK); 2662 int tlb; 2663 2664 tlb = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT; 2665 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb]; 2666 2667 if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) { 2668 cpu_abort(env_cpu(env), "we don't support HES yet\n"); 2669 } 2670 2671 return booke206_get_tlbm(env, tlb, ea, esel); 2672 } 2673 2674 void helper_booke_setpid(CPUPPCState *env, uint32_t pidn, target_ulong pid) 2675 { 2676 env->spr[pidn] = pid; 2677 /* changing PIDs mean we're in a different address space now */ 2678 tlb_flush(env_cpu(env)); 2679 } 2680 2681 void helper_booke_set_eplc(CPUPPCState *env, target_ulong val) 2682 { 2683 env->spr[SPR_BOOKE_EPLC] = val & EPID_MASK; 2684 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_LOAD); 2685 } 2686 void helper_booke_set_epsc(CPUPPCState *env, target_ulong val) 2687 { 2688 env->spr[SPR_BOOKE_EPSC] = val & EPID_MASK; 2689 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_STORE); 2690 } 2691 2692 static inline void flush_page(CPUPPCState *env, ppcmas_tlb_t *tlb) 2693 { 2694 if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) { 2695 tlb_flush_page(env_cpu(env), tlb->mas2 & MAS2_EPN_MASK); 2696 } else { 2697 tlb_flush(env_cpu(env)); 2698 } 2699 } 2700 2701 void helper_booke206_tlbwe(CPUPPCState *env) 2702 { 2703 uint32_t tlbncfg, tlbn; 2704 ppcmas_tlb_t *tlb; 2705 uint32_t size_tlb, size_ps; 2706 target_ulong mask; 2707 2708 2709 switch (env->spr[SPR_BOOKE_MAS0] & MAS0_WQ_MASK) { 2710 case MAS0_WQ_ALWAYS: 2711 /* good to go, write that entry */ 2712 break; 2713 case MAS0_WQ_COND: 2714 /* XXX check if reserved */ 2715 if (0) { 2716 return; 2717 } 2718 break; 2719 case MAS0_WQ_CLR_RSRV: 2720 /* XXX clear entry */ 2721 return; 2722 default: 2723 /* no idea what to do */ 2724 return; 2725 } 2726 2727 if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) && 2728 !msr_gs) { 2729 /* XXX we don't support direct LRAT setting yet */ 2730 fprintf(stderr, "cpu: don't support LRAT setting yet\n"); 2731 return; 2732 } 2733 2734 tlbn = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT; 2735 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn]; 2736 2737 tlb = booke206_cur_tlb(env); 2738 2739 if (!tlb) { 2740 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 2741 POWERPC_EXCP_INVAL | 2742 POWERPC_EXCP_INVAL_INVAL, GETPC()); 2743 } 2744 2745 /* check that we support the targeted size */ 2746 size_tlb = (env->spr[SPR_BOOKE_MAS1] & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 2747 size_ps = booke206_tlbnps(env, tlbn); 2748 if ((env->spr[SPR_BOOKE_MAS1] & MAS1_VALID) && (tlbncfg & TLBnCFG_AVAIL) && 2749 !(size_ps & (1 << size_tlb))) { 2750 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 2751 POWERPC_EXCP_INVAL | 2752 POWERPC_EXCP_INVAL_INVAL, GETPC()); 2753 } 2754 2755 if (msr_gs) { 2756 cpu_abort(env_cpu(env), "missing HV implementation\n"); 2757 } 2758 2759 if (tlb->mas1 & MAS1_VALID) { 2760 /* 2761 * Invalidate the page in QEMU TLB if it was a valid entry. 2762 * 2763 * In "PowerPC e500 Core Family Reference Manual, Rev. 1", 2764 * Section "12.4.2 TLB Write Entry (tlbwe) Instruction": 2765 * (https://www.nxp.com/docs/en/reference-manual/E500CORERM.pdf) 2766 * 2767 * "Note that when an L2 TLB entry is written, it may be displacing an 2768 * already valid entry in the same L2 TLB location (a victim). If a 2769 * valid L1 TLB entry corresponds to the L2 MMU victim entry, that L1 2770 * TLB entry is automatically invalidated." 2771 */ 2772 flush_page(env, tlb); 2773 } 2774 2775 tlb->mas7_3 = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) | 2776 env->spr[SPR_BOOKE_MAS3]; 2777 tlb->mas1 = env->spr[SPR_BOOKE_MAS1]; 2778 2779 if ((env->spr[SPR_MMUCFG] & MMUCFG_MAVN) == MMUCFG_MAVN_V2) { 2780 /* For TLB which has a fixed size TSIZE is ignored with MAV2 */ 2781 booke206_fixed_size_tlbn(env, tlbn, tlb); 2782 } else { 2783 if (!(tlbncfg & TLBnCFG_AVAIL)) { 2784 /* force !AVAIL TLB entries to correct page size */ 2785 tlb->mas1 &= ~MAS1_TSIZE_MASK; 2786 /* XXX can be configured in MMUCSR0 */ 2787 tlb->mas1 |= (tlbncfg & TLBnCFG_MINSIZE) >> 12; 2788 } 2789 } 2790 2791 /* Make a mask from TLB size to discard invalid bits in EPN field */ 2792 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); 2793 /* Add a mask for page attributes */ 2794 mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E; 2795 2796 if (!msr_cm) { 2797 /* 2798 * Executing a tlbwe instruction in 32-bit mode will set bits 2799 * 0:31 of the TLB EPN field to zero. 2800 */ 2801 mask &= 0xffffffff; 2802 } 2803 2804 tlb->mas2 = env->spr[SPR_BOOKE_MAS2] & mask; 2805 2806 if (!(tlbncfg & TLBnCFG_IPROT)) { 2807 /* no IPROT supported by TLB */ 2808 tlb->mas1 &= ~MAS1_IPROT; 2809 } 2810 2811 flush_page(env, tlb); 2812 } 2813 2814 static inline void booke206_tlb_to_mas(CPUPPCState *env, ppcmas_tlb_t *tlb) 2815 { 2816 int tlbn = booke206_tlbm_to_tlbn(env, tlb); 2817 int way = booke206_tlbm_to_way(env, tlb); 2818 2819 env->spr[SPR_BOOKE_MAS0] = tlbn << MAS0_TLBSEL_SHIFT; 2820 env->spr[SPR_BOOKE_MAS0] |= way << MAS0_ESEL_SHIFT; 2821 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; 2822 2823 env->spr[SPR_BOOKE_MAS1] = tlb->mas1; 2824 env->spr[SPR_BOOKE_MAS2] = tlb->mas2; 2825 env->spr[SPR_BOOKE_MAS3] = tlb->mas7_3; 2826 env->spr[SPR_BOOKE_MAS7] = tlb->mas7_3 >> 32; 2827 } 2828 2829 void helper_booke206_tlbre(CPUPPCState *env) 2830 { 2831 ppcmas_tlb_t *tlb = NULL; 2832 2833 tlb = booke206_cur_tlb(env); 2834 if (!tlb) { 2835 env->spr[SPR_BOOKE_MAS1] = 0; 2836 } else { 2837 booke206_tlb_to_mas(env, tlb); 2838 } 2839 } 2840 2841 void helper_booke206_tlbsx(CPUPPCState *env, target_ulong address) 2842 { 2843 ppcmas_tlb_t *tlb = NULL; 2844 int i, j; 2845 hwaddr raddr; 2846 uint32_t spid, sas; 2847 2848 spid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID_MASK) >> MAS6_SPID_SHIFT; 2849 sas = env->spr[SPR_BOOKE_MAS6] & MAS6_SAS; 2850 2851 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 2852 int ways = booke206_tlb_ways(env, i); 2853 2854 for (j = 0; j < ways; j++) { 2855 tlb = booke206_get_tlbm(env, i, address, j); 2856 2857 if (!tlb) { 2858 continue; 2859 } 2860 2861 if (ppcmas_tlb_check(env, tlb, &raddr, address, spid)) { 2862 continue; 2863 } 2864 2865 if (sas != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { 2866 continue; 2867 } 2868 2869 booke206_tlb_to_mas(env, tlb); 2870 return; 2871 } 2872 } 2873 2874 /* no entry found, fill with defaults */ 2875 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; 2876 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; 2877 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; 2878 env->spr[SPR_BOOKE_MAS3] = 0; 2879 env->spr[SPR_BOOKE_MAS7] = 0; 2880 2881 if (env->spr[SPR_BOOKE_MAS6] & MAS6_SAS) { 2882 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; 2883 } 2884 2885 env->spr[SPR_BOOKE_MAS1] |= (env->spr[SPR_BOOKE_MAS6] >> 16) 2886 << MAS1_TID_SHIFT; 2887 2888 /* next victim logic */ 2889 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; 2890 env->last_way++; 2891 env->last_way &= booke206_tlb_ways(env, 0) - 1; 2892 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; 2893 } 2894 2895 static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn, 2896 uint32_t ea) 2897 { 2898 int i; 2899 int ways = booke206_tlb_ways(env, tlbn); 2900 target_ulong mask; 2901 2902 for (i = 0; i < ways; i++) { 2903 ppcmas_tlb_t *tlb = booke206_get_tlbm(env, tlbn, ea, i); 2904 if (!tlb) { 2905 continue; 2906 } 2907 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); 2908 if (((tlb->mas2 & MAS2_EPN_MASK) == (ea & mask)) && 2909 !(tlb->mas1 & MAS1_IPROT)) { 2910 tlb->mas1 &= ~MAS1_VALID; 2911 } 2912 } 2913 } 2914 2915 void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address) 2916 { 2917 CPUState *cs; 2918 2919 if (address & 0x4) { 2920 /* flush all entries */ 2921 if (address & 0x8) { 2922 /* flush all of TLB1 */ 2923 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB1, 1); 2924 } else { 2925 /* flush all of TLB0 */ 2926 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB0, 0); 2927 } 2928 return; 2929 } 2930 2931 if (address & 0x8) { 2932 /* flush TLB1 entries */ 2933 booke206_invalidate_ea_tlb(env, 1, address); 2934 CPU_FOREACH(cs) { 2935 tlb_flush(cs); 2936 } 2937 } else { 2938 /* flush TLB0 entries */ 2939 booke206_invalidate_ea_tlb(env, 0, address); 2940 CPU_FOREACH(cs) { 2941 tlb_flush_page(cs, address & MAS2_EPN_MASK); 2942 } 2943 } 2944 } 2945 2946 void helper_booke206_tlbilx0(CPUPPCState *env, target_ulong address) 2947 { 2948 /* XXX missing LPID handling */ 2949 booke206_flush_tlb(env, -1, 1); 2950 } 2951 2952 void helper_booke206_tlbilx1(CPUPPCState *env, target_ulong address) 2953 { 2954 int i, j; 2955 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID); 2956 ppcmas_tlb_t *tlb = env->tlb.tlbm; 2957 int tlb_size; 2958 2959 /* XXX missing LPID handling */ 2960 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 2961 tlb_size = booke206_tlb_size(env, i); 2962 for (j = 0; j < tlb_size; j++) { 2963 if (!(tlb[j].mas1 & MAS1_IPROT) && 2964 ((tlb[j].mas1 & MAS1_TID_MASK) == tid)) { 2965 tlb[j].mas1 &= ~MAS1_VALID; 2966 } 2967 } 2968 tlb += booke206_tlb_size(env, i); 2969 } 2970 tlb_flush(env_cpu(env)); 2971 } 2972 2973 void helper_booke206_tlbilx3(CPUPPCState *env, target_ulong address) 2974 { 2975 int i, j; 2976 ppcmas_tlb_t *tlb; 2977 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID); 2978 int pid = tid >> MAS6_SPID_SHIFT; 2979 int sgs = env->spr[SPR_BOOKE_MAS5] & MAS5_SGS; 2980 int ind = (env->spr[SPR_BOOKE_MAS6] & MAS6_SIND) ? MAS1_IND : 0; 2981 /* XXX check for unsupported isize and raise an invalid opcode then */ 2982 int size = env->spr[SPR_BOOKE_MAS6] & MAS6_ISIZE_MASK; 2983 /* XXX implement MAV2 handling */ 2984 bool mav2 = false; 2985 2986 /* XXX missing LPID handling */ 2987 /* flush by pid and ea */ 2988 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 2989 int ways = booke206_tlb_ways(env, i); 2990 2991 for (j = 0; j < ways; j++) { 2992 tlb = booke206_get_tlbm(env, i, address, j); 2993 if (!tlb) { 2994 continue; 2995 } 2996 if ((ppcmas_tlb_check(env, tlb, NULL, address, pid) != 0) || 2997 (tlb->mas1 & MAS1_IPROT) || 2998 ((tlb->mas1 & MAS1_IND) != ind) || 2999 ((tlb->mas8 & MAS8_TGS) != sgs)) { 3000 continue; 3001 } 3002 if (mav2 && ((tlb->mas1 & MAS1_TSIZE_MASK) != size)) { 3003 /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */ 3004 continue; 3005 } 3006 /* XXX e500mc doesn't match SAS, but other cores might */ 3007 tlb->mas1 &= ~MAS1_VALID; 3008 } 3009 } 3010 tlb_flush(env_cpu(env)); 3011 } 3012 3013 void helper_booke206_tlbflush(CPUPPCState *env, target_ulong type) 3014 { 3015 int flags = 0; 3016 3017 if (type & 2) { 3018 flags |= BOOKE206_FLUSH_TLB1; 3019 } 3020 3021 if (type & 4) { 3022 flags |= BOOKE206_FLUSH_TLB0; 3023 } 3024 3025 booke206_flush_tlb(env, flags, 1); 3026 } 3027 3028 3029 void helper_check_tlb_flush_local(CPUPPCState *env) 3030 { 3031 check_tlb_flush(env, false); 3032 } 3033 3034 void helper_check_tlb_flush_global(CPUPPCState *env) 3035 { 3036 check_tlb_flush(env, true); 3037 } 3038 3039 /*****************************************************************************/ 3040 3041 bool ppc_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, 3042 MMUAccessType access_type, int mmu_idx, 3043 bool probe, uintptr_t retaddr) 3044 { 3045 PowerPCCPU *cpu = POWERPC_CPU(cs); 3046 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs); 3047 CPUPPCState *env = &cpu->env; 3048 int ret; 3049 3050 if (pcc->handle_mmu_fault) { 3051 ret = pcc->handle_mmu_fault(cpu, addr, access_type, mmu_idx); 3052 } else { 3053 ret = cpu_ppc_handle_mmu_fault(env, addr, access_type, mmu_idx); 3054 } 3055 if (unlikely(ret != 0)) { 3056 if (probe) { 3057 return false; 3058 } 3059 raise_exception_err_ra(env, cs->exception_index, env->error_code, 3060 retaddr); 3061 } 3062 return true; 3063 } 3064