1 /* 2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "cpu.h" 23 #include "sysemu/kvm.h" 24 #include "kvm_ppc.h" 25 #include "mmu-hash64.h" 26 #include "mmu-hash32.h" 27 #include "exec/exec-all.h" 28 #include "exec/log.h" 29 #include "helper_regs.h" 30 #include "qemu/error-report.h" 31 #include "qemu/main-loop.h" 32 #include "qemu/qemu-print.h" 33 #include "internal.h" 34 #include "mmu-book3s-v3.h" 35 #include "mmu-radix64.h" 36 37 #ifdef CONFIG_TCG 38 #include "exec/helper-proto.h" 39 #include "exec/cpu_ldst.h" 40 #endif 41 /* #define DEBUG_MMU */ 42 /* #define DEBUG_BATS */ 43 /* #define DEBUG_SOFTWARE_TLB */ 44 /* #define DUMP_PAGE_TABLES */ 45 /* #define FLUSH_ALL_TLBS */ 46 47 #ifdef DEBUG_MMU 48 # define LOG_MMU_STATE(cpu) log_cpu_state_mask(CPU_LOG_MMU, (cpu), 0) 49 #else 50 # define LOG_MMU_STATE(cpu) do { } while (0) 51 #endif 52 53 #ifdef DEBUG_SOFTWARE_TLB 54 # define LOG_SWTLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) 55 #else 56 # define LOG_SWTLB(...) do { } while (0) 57 #endif 58 59 #ifdef DEBUG_BATS 60 # define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) 61 #else 62 # define LOG_BATS(...) do { } while (0) 63 #endif 64 65 /*****************************************************************************/ 66 /* PowerPC MMU emulation */ 67 68 /* Context used internally during MMU translations */ 69 typedef struct mmu_ctx_t mmu_ctx_t; 70 struct mmu_ctx_t { 71 hwaddr raddr; /* Real address */ 72 hwaddr eaddr; /* Effective address */ 73 int prot; /* Protection bits */ 74 hwaddr hash[2]; /* Pagetable hash values */ 75 target_ulong ptem; /* Virtual segment ID | API */ 76 int key; /* Access key */ 77 int nx; /* Non-execute area */ 78 }; 79 80 /* Common routines used by software and hardware TLBs emulation */ 81 static inline int pte_is_valid(target_ulong pte0) 82 { 83 return pte0 & 0x80000000 ? 1 : 0; 84 } 85 86 static inline void pte_invalidate(target_ulong *pte0) 87 { 88 *pte0 &= ~0x80000000; 89 } 90 91 #define PTE_PTEM_MASK 0x7FFFFFBF 92 #define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B) 93 94 static int pp_check(int key, int pp, int nx) 95 { 96 int access; 97 98 /* Compute access rights */ 99 access = 0; 100 if (key == 0) { 101 switch (pp) { 102 case 0x0: 103 case 0x1: 104 case 0x2: 105 access |= PAGE_WRITE; 106 /* fall through */ 107 case 0x3: 108 access |= PAGE_READ; 109 break; 110 } 111 } else { 112 switch (pp) { 113 case 0x0: 114 access = 0; 115 break; 116 case 0x1: 117 case 0x3: 118 access = PAGE_READ; 119 break; 120 case 0x2: 121 access = PAGE_READ | PAGE_WRITE; 122 break; 123 } 124 } 125 if (nx == 0) { 126 access |= PAGE_EXEC; 127 } 128 129 return access; 130 } 131 132 static int check_prot(int prot, MMUAccessType access_type) 133 { 134 return prot & prot_for_access_type(access_type) ? 0 : -2; 135 } 136 137 static int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0, 138 target_ulong pte1, int h, 139 MMUAccessType access_type) 140 { 141 target_ulong ptem, mmask; 142 int access, ret, pteh, ptev, pp; 143 144 ret = -1; 145 /* Check validity and table match */ 146 ptev = pte_is_valid(pte0); 147 pteh = (pte0 >> 6) & 1; 148 if (ptev && h == pteh) { 149 /* Check vsid & api */ 150 ptem = pte0 & PTE_PTEM_MASK; 151 mmask = PTE_CHECK_MASK; 152 pp = pte1 & 0x00000003; 153 if (ptem == ctx->ptem) { 154 if (ctx->raddr != (hwaddr)-1ULL) { 155 /* all matches should have equal RPN, WIMG & PP */ 156 if ((ctx->raddr & mmask) != (pte1 & mmask)) { 157 qemu_log_mask(CPU_LOG_MMU, "Bad RPN/WIMG/PP\n"); 158 return -3; 159 } 160 } 161 /* Compute access rights */ 162 access = pp_check(ctx->key, pp, ctx->nx); 163 /* Keep the matching PTE information */ 164 ctx->raddr = pte1; 165 ctx->prot = access; 166 ret = check_prot(ctx->prot, access_type); 167 if (ret == 0) { 168 /* Access granted */ 169 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); 170 } else { 171 /* Access right violation */ 172 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); 173 } 174 } 175 } 176 177 return ret; 178 } 179 180 static int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p, 181 int ret, MMUAccessType access_type) 182 { 183 int store = 0; 184 185 /* Update page flags */ 186 if (!(*pte1p & 0x00000100)) { 187 /* Update accessed flag */ 188 *pte1p |= 0x00000100; 189 store = 1; 190 } 191 if (!(*pte1p & 0x00000080)) { 192 if (access_type == MMU_DATA_STORE && ret == 0) { 193 /* Update changed flag */ 194 *pte1p |= 0x00000080; 195 store = 1; 196 } else { 197 /* Force page fault for first write access */ 198 ctx->prot &= ~PAGE_WRITE; 199 } 200 } 201 202 return store; 203 } 204 205 /* Software driven TLB helpers */ 206 static inline int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr, 207 int way, int is_code) 208 { 209 int nr; 210 211 /* Select TLB num in a way from address */ 212 nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1); 213 /* Select TLB way */ 214 nr += env->tlb_per_way * way; 215 /* 6xx have separate TLBs for instructions and data */ 216 if (is_code && env->id_tlbs == 1) { 217 nr += env->nb_tlb; 218 } 219 220 return nr; 221 } 222 223 static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env) 224 { 225 ppc6xx_tlb_t *tlb; 226 int nr, max; 227 228 /* LOG_SWTLB("Invalidate all TLBs\n"); */ 229 /* Invalidate all defined software TLB */ 230 max = env->nb_tlb; 231 if (env->id_tlbs == 1) { 232 max *= 2; 233 } 234 for (nr = 0; nr < max; nr++) { 235 tlb = &env->tlb.tlb6[nr]; 236 pte_invalidate(&tlb->pte0); 237 } 238 tlb_flush(env_cpu(env)); 239 } 240 241 static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env, 242 target_ulong eaddr, 243 int is_code, int match_epn) 244 { 245 #if !defined(FLUSH_ALL_TLBS) 246 CPUState *cs = env_cpu(env); 247 ppc6xx_tlb_t *tlb; 248 int way, nr; 249 250 /* Invalidate ITLB + DTLB, all ways */ 251 for (way = 0; way < env->nb_ways; way++) { 252 nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code); 253 tlb = &env->tlb.tlb6[nr]; 254 if (pte_is_valid(tlb->pte0) && (match_epn == 0 || eaddr == tlb->EPN)) { 255 LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx "\n", nr, 256 env->nb_tlb, eaddr); 257 pte_invalidate(&tlb->pte0); 258 tlb_flush_page(cs, tlb->EPN); 259 } 260 } 261 #else 262 /* XXX: PowerPC specification say this is valid as well */ 263 ppc6xx_tlb_invalidate_all(env); 264 #endif 265 } 266 267 static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState *env, 268 target_ulong eaddr, int is_code) 269 { 270 ppc6xx_tlb_invalidate_virt2(env, eaddr, is_code, 0); 271 } 272 273 #ifdef CONFIG_TCG 274 static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way, 275 int is_code, target_ulong pte0, target_ulong pte1) 276 { 277 ppc6xx_tlb_t *tlb; 278 int nr; 279 280 nr = ppc6xx_tlb_getnum(env, EPN, way, is_code); 281 tlb = &env->tlb.tlb6[nr]; 282 LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx 283 " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb, EPN, pte0, pte1); 284 /* Invalidate any pending reference in QEMU for this virtual address */ 285 ppc6xx_tlb_invalidate_virt2(env, EPN, is_code, 1); 286 tlb->pte0 = pte0; 287 tlb->pte1 = pte1; 288 tlb->EPN = EPN; 289 /* Store last way for LRU mechanism */ 290 env->last_way = way; 291 } 292 #endif 293 294 static int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx, 295 target_ulong eaddr, MMUAccessType access_type) 296 { 297 ppc6xx_tlb_t *tlb; 298 int nr, best, way; 299 int ret; 300 301 best = -1; 302 ret = -1; /* No TLB found */ 303 for (way = 0; way < env->nb_ways; way++) { 304 nr = ppc6xx_tlb_getnum(env, eaddr, way, access_type == MMU_INST_FETCH); 305 tlb = &env->tlb.tlb6[nr]; 306 /* This test "emulates" the PTE index match for hardware TLBs */ 307 if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) { 308 LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx " " TARGET_FMT_lx 309 "] <> " TARGET_FMT_lx "\n", nr, env->nb_tlb, 310 pte_is_valid(tlb->pte0) ? "valid" : "inval", 311 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr); 312 continue; 313 } 314 LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx " <> " TARGET_FMT_lx " " 315 TARGET_FMT_lx " %c %c\n", nr, env->nb_tlb, 316 pte_is_valid(tlb->pte0) ? "valid" : "inval", 317 tlb->EPN, eaddr, tlb->pte1, 318 access_type == MMU_DATA_STORE ? 'S' : 'L', 319 access_type == MMU_INST_FETCH ? 'I' : 'D'); 320 switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1, 321 0, access_type)) { 322 case -3: 323 /* TLB inconsistency */ 324 return -1; 325 case -2: 326 /* Access violation */ 327 ret = -2; 328 best = nr; 329 break; 330 case -1: 331 default: 332 /* No match */ 333 break; 334 case 0: 335 /* access granted */ 336 /* 337 * XXX: we should go on looping to check all TLBs 338 * consistency but we can speed-up the whole thing as 339 * the result would be undefined if TLBs are not 340 * consistent. 341 */ 342 ret = 0; 343 best = nr; 344 goto done; 345 } 346 } 347 if (best != -1) { 348 done: 349 LOG_SWTLB("found TLB at addr " TARGET_FMT_plx " prot=%01x ret=%d\n", 350 ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret); 351 /* Update page flags */ 352 pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, access_type); 353 } 354 355 return ret; 356 } 357 358 /* Perform BAT hit & translation */ 359 static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp, 360 int *validp, int *protp, target_ulong *BATu, 361 target_ulong *BATl) 362 { 363 target_ulong bl; 364 int pp, valid, prot; 365 366 bl = (*BATu & 0x00001FFC) << 15; 367 valid = 0; 368 prot = 0; 369 if (((msr_pr == 0) && (*BATu & 0x00000002)) || 370 ((msr_pr != 0) && (*BATu & 0x00000001))) { 371 valid = 1; 372 pp = *BATl & 0x00000003; 373 if (pp != 0) { 374 prot = PAGE_READ | PAGE_EXEC; 375 if (pp == 0x2) { 376 prot |= PAGE_WRITE; 377 } 378 } 379 } 380 *blp = bl; 381 *validp = valid; 382 *protp = prot; 383 } 384 385 static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, 386 target_ulong virtual, MMUAccessType access_type) 387 { 388 target_ulong *BATlt, *BATut, *BATu, *BATl; 389 target_ulong BEPIl, BEPIu, bl; 390 int i, valid, prot; 391 int ret = -1; 392 bool ifetch = access_type == MMU_INST_FETCH; 393 394 LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__, 395 ifetch ? 'I' : 'D', virtual); 396 if (ifetch) { 397 BATlt = env->IBAT[1]; 398 BATut = env->IBAT[0]; 399 } else { 400 BATlt = env->DBAT[1]; 401 BATut = env->DBAT[0]; 402 } 403 for (i = 0; i < env->nb_BATs; i++) { 404 BATu = &BATut[i]; 405 BATl = &BATlt[i]; 406 BEPIu = *BATu & 0xF0000000; 407 BEPIl = *BATu & 0x0FFE0000; 408 bat_size_prot(env, &bl, &valid, &prot, BATu, BATl); 409 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx 410 " BATl " TARGET_FMT_lx "\n", __func__, 411 ifetch ? 'I' : 'D', i, virtual, *BATu, *BATl); 412 if ((virtual & 0xF0000000) == BEPIu && 413 ((virtual & 0x0FFE0000) & ~bl) == BEPIl) { 414 /* BAT matches */ 415 if (valid != 0) { 416 /* Get physical address */ 417 ctx->raddr = (*BATl & 0xF0000000) | 418 ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) | 419 (virtual & 0x0001F000); 420 /* Compute access rights */ 421 ctx->prot = prot; 422 ret = check_prot(ctx->prot, access_type); 423 if (ret == 0) { 424 LOG_BATS("BAT %d match: r " TARGET_FMT_plx " prot=%c%c\n", 425 i, ctx->raddr, ctx->prot & PAGE_READ ? 'R' : '-', 426 ctx->prot & PAGE_WRITE ? 'W' : '-'); 427 } 428 break; 429 } 430 } 431 } 432 if (ret < 0) { 433 #if defined(DEBUG_BATS) 434 if (qemu_log_enabled()) { 435 LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", virtual); 436 for (i = 0; i < 4; i++) { 437 BATu = &BATut[i]; 438 BATl = &BATlt[i]; 439 BEPIu = *BATu & 0xF0000000; 440 BEPIl = *BATu & 0x0FFE0000; 441 bl = (*BATu & 0x00001FFC) << 15; 442 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx 443 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " 444 TARGET_FMT_lx " " TARGET_FMT_lx "\n", 445 __func__, ifetch ? 'I' : 'D', i, virtual, 446 *BATu, *BATl, BEPIu, BEPIl, bl); 447 } 448 } 449 #endif 450 } 451 /* No hit */ 452 return ret; 453 } 454 455 /* Perform segment based translation */ 456 static int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, 457 target_ulong eaddr, MMUAccessType access_type, 458 int type) 459 { 460 PowerPCCPU *cpu = env_archcpu(env); 461 hwaddr hash; 462 target_ulong vsid; 463 int ds, pr, target_page_bits; 464 int ret; 465 target_ulong sr, pgidx; 466 467 pr = msr_pr; 468 ctx->eaddr = eaddr; 469 470 sr = env->sr[eaddr >> 28]; 471 ctx->key = (((sr & 0x20000000) && (pr != 0)) || 472 ((sr & 0x40000000) && (pr == 0))) ? 1 : 0; 473 ds = sr & 0x80000000 ? 1 : 0; 474 ctx->nx = sr & 0x10000000 ? 1 : 0; 475 vsid = sr & 0x00FFFFFF; 476 target_page_bits = TARGET_PAGE_BITS; 477 qemu_log_mask(CPU_LOG_MMU, 478 "Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx 479 " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx 480 " ir=%d dr=%d pr=%d %d t=%d\n", 481 eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir, 482 (int)msr_dr, pr != 0 ? 1 : 0, access_type == MMU_DATA_STORE, type); 483 pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits; 484 hash = vsid ^ pgidx; 485 ctx->ptem = (vsid << 7) | (pgidx >> 10); 486 487 qemu_log_mask(CPU_LOG_MMU, 488 "pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n", 489 ctx->key, ds, ctx->nx, vsid); 490 ret = -1; 491 if (!ds) { 492 /* Check if instruction fetch is allowed, if needed */ 493 if (type != ACCESS_CODE || ctx->nx == 0) { 494 /* Page address translation */ 495 qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx 496 " htab_mask " TARGET_FMT_plx 497 " hash " TARGET_FMT_plx "\n", 498 ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash); 499 ctx->hash[0] = hash; 500 ctx->hash[1] = ~hash; 501 502 /* Initialize real address with an invalid value */ 503 ctx->raddr = (hwaddr)-1ULL; 504 /* Software TLB search */ 505 ret = ppc6xx_tlb_check(env, ctx, eaddr, access_type); 506 #if defined(DUMP_PAGE_TABLES) 507 if (qemu_loglevel_mask(CPU_LOG_MMU)) { 508 CPUState *cs = env_cpu(env); 509 hwaddr curaddr; 510 uint32_t a0, a1, a2, a3; 511 512 qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx 513 "\n", ppc_hash32_hpt_base(cpu), 514 ppc_hash32_hpt_mask(cpu) + 0x80); 515 for (curaddr = ppc_hash32_hpt_base(cpu); 516 curaddr < (ppc_hash32_hpt_base(cpu) 517 + ppc_hash32_hpt_mask(cpu) + 0x80); 518 curaddr += 16) { 519 a0 = ldl_phys(cs->as, curaddr); 520 a1 = ldl_phys(cs->as, curaddr + 4); 521 a2 = ldl_phys(cs->as, curaddr + 8); 522 a3 = ldl_phys(cs->as, curaddr + 12); 523 if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) { 524 qemu_log(TARGET_FMT_plx ": %08x %08x %08x %08x\n", 525 curaddr, a0, a1, a2, a3); 526 } 527 } 528 } 529 #endif 530 } else { 531 qemu_log_mask(CPU_LOG_MMU, "No access allowed\n"); 532 ret = -3; 533 } 534 } else { 535 target_ulong sr; 536 537 qemu_log_mask(CPU_LOG_MMU, "direct store...\n"); 538 /* Direct-store segment : absolutely *BUGGY* for now */ 539 540 /* 541 * Direct-store implies a 32-bit MMU. 542 * Check the Segment Register's bus unit ID (BUID). 543 */ 544 sr = env->sr[eaddr >> 28]; 545 if ((sr & 0x1FF00000) >> 20 == 0x07f) { 546 /* 547 * Memory-forced I/O controller interface access 548 * 549 * If T=1 and BUID=x'07F', the 601 performs a memory 550 * access to SR[28-31] LA[4-31], bypassing all protection 551 * mechanisms. 552 */ 553 ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF); 554 ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 555 return 0; 556 } 557 558 switch (type) { 559 case ACCESS_INT: 560 /* Integer load/store : only access allowed */ 561 break; 562 case ACCESS_CODE: 563 /* No code fetch is allowed in direct-store areas */ 564 return -4; 565 case ACCESS_FLOAT: 566 /* Floating point load/store */ 567 return -4; 568 case ACCESS_RES: 569 /* lwarx, ldarx or srwcx. */ 570 return -4; 571 case ACCESS_CACHE: 572 /* 573 * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi 574 * 575 * Should make the instruction do no-op. As it already do 576 * no-op, it's quite easy :-) 577 */ 578 ctx->raddr = eaddr; 579 return 0; 580 case ACCESS_EXT: 581 /* eciwx or ecowx */ 582 return -4; 583 default: 584 qemu_log_mask(CPU_LOG_MMU, "ERROR: instruction should not need " 585 "address translation\n"); 586 return -4; 587 } 588 if ((access_type == MMU_DATA_STORE || ctx->key != 1) && 589 (access_type == MMU_DATA_LOAD || ctx->key != 0)) { 590 ctx->raddr = eaddr; 591 ret = 2; 592 } else { 593 ret = -2; 594 } 595 } 596 597 return ret; 598 } 599 600 /* Generic TLB check function for embedded PowerPC implementations */ 601 static int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb, 602 hwaddr *raddrp, 603 target_ulong address, uint32_t pid, int ext, 604 int i) 605 { 606 target_ulong mask; 607 608 /* Check valid flag */ 609 if (!(tlb->prot & PAGE_VALID)) { 610 return -1; 611 } 612 mask = ~(tlb->size - 1); 613 LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx " PID %u <=> " TARGET_FMT_lx 614 " " TARGET_FMT_lx " %u %x\n", __func__, i, address, pid, tlb->EPN, 615 mask, (uint32_t)tlb->PID, tlb->prot); 616 /* Check PID */ 617 if (tlb->PID != 0 && tlb->PID != pid) { 618 return -1; 619 } 620 /* Check effective address */ 621 if ((address & mask) != tlb->EPN) { 622 return -1; 623 } 624 *raddrp = (tlb->RPN & mask) | (address & ~mask); 625 if (ext) { 626 /* Extend the physical address to 36 bits */ 627 *raddrp |= (uint64_t)(tlb->RPN & 0xF) << 32; 628 } 629 630 return 0; 631 } 632 633 #ifdef CONFIG_TCG 634 /* Generic TLB search function for PowerPC embedded implementations */ 635 static int ppcemb_tlb_search(CPUPPCState *env, target_ulong address, 636 uint32_t pid) 637 { 638 ppcemb_tlb_t *tlb; 639 hwaddr raddr; 640 int i, ret; 641 642 /* Default return value is no match */ 643 ret = -1; 644 for (i = 0; i < env->nb_tlb; i++) { 645 tlb = &env->tlb.tlbe[i]; 646 if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, 0, i) == 0) { 647 ret = i; 648 break; 649 } 650 } 651 652 return ret; 653 } 654 #endif 655 656 /* Helpers specific to PowerPC 40x implementations */ 657 static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env) 658 { 659 ppcemb_tlb_t *tlb; 660 int i; 661 662 for (i = 0; i < env->nb_tlb; i++) { 663 tlb = &env->tlb.tlbe[i]; 664 tlb->prot &= ~PAGE_VALID; 665 } 666 tlb_flush(env_cpu(env)); 667 } 668 669 static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 670 target_ulong address, 671 MMUAccessType access_type) 672 { 673 ppcemb_tlb_t *tlb; 674 hwaddr raddr; 675 int i, ret, zsel, zpr, pr; 676 677 ret = -1; 678 raddr = (hwaddr)-1ULL; 679 pr = msr_pr; 680 for (i = 0; i < env->nb_tlb; i++) { 681 tlb = &env->tlb.tlbe[i]; 682 if (ppcemb_tlb_check(env, tlb, &raddr, address, 683 env->spr[SPR_40x_PID], 0, i) < 0) { 684 continue; 685 } 686 zsel = (tlb->attr >> 4) & 0xF; 687 zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3; 688 LOG_SWTLB("%s: TLB %d zsel %d zpr %d ty %d attr %08x\n", 689 __func__, i, zsel, zpr, access_type, tlb->attr); 690 /* Check execute enable bit */ 691 switch (zpr) { 692 case 0x2: 693 if (pr != 0) { 694 goto check_perms; 695 } 696 /* fall through */ 697 case 0x3: 698 /* All accesses granted */ 699 ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 700 ret = 0; 701 break; 702 case 0x0: 703 if (pr != 0) { 704 /* Raise Zone protection fault. */ 705 env->spr[SPR_40x_ESR] = 1 << 22; 706 ctx->prot = 0; 707 ret = -2; 708 break; 709 } 710 /* fall through */ 711 case 0x1: 712 check_perms: 713 /* Check from TLB entry */ 714 ctx->prot = tlb->prot; 715 ret = check_prot(ctx->prot, access_type); 716 if (ret == -2) { 717 env->spr[SPR_40x_ESR] = 0; 718 } 719 break; 720 } 721 if (ret >= 0) { 722 ctx->raddr = raddr; 723 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx 724 " %d %d\n", __func__, address, ctx->raddr, ctx->prot, 725 ret); 726 return 0; 727 } 728 } 729 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx 730 " %d %d\n", __func__, address, raddr, ctx->prot, ret); 731 732 return ret; 733 } 734 735 void store_40x_sler(CPUPPCState *env, uint32_t val) 736 { 737 /* XXX: TO BE FIXED */ 738 if (val != 0x00000000) { 739 cpu_abort(env_cpu(env), 740 "Little-endian regions are not supported by now\n"); 741 } 742 env->spr[SPR_405_SLER] = val; 743 } 744 745 static int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb, 746 hwaddr *raddr, int *prot, target_ulong address, 747 MMUAccessType access_type, int i) 748 { 749 int prot2; 750 751 if (ppcemb_tlb_check(env, tlb, raddr, address, 752 env->spr[SPR_BOOKE_PID], 753 !env->nb_pids, i) >= 0) { 754 goto found_tlb; 755 } 756 757 if (env->spr[SPR_BOOKE_PID1] && 758 ppcemb_tlb_check(env, tlb, raddr, address, 759 env->spr[SPR_BOOKE_PID1], 0, i) >= 0) { 760 goto found_tlb; 761 } 762 763 if (env->spr[SPR_BOOKE_PID2] && 764 ppcemb_tlb_check(env, tlb, raddr, address, 765 env->spr[SPR_BOOKE_PID2], 0, i) >= 0) { 766 goto found_tlb; 767 } 768 769 LOG_SWTLB("%s: TLB entry not found\n", __func__); 770 return -1; 771 772 found_tlb: 773 774 if (msr_pr != 0) { 775 prot2 = tlb->prot & 0xF; 776 } else { 777 prot2 = (tlb->prot >> 4) & 0xF; 778 } 779 780 /* Check the address space */ 781 if ((access_type == MMU_INST_FETCH ? msr_ir : msr_dr) != (tlb->attr & 1)) { 782 LOG_SWTLB("%s: AS doesn't match\n", __func__); 783 return -1; 784 } 785 786 *prot = prot2; 787 if (prot2 & prot_for_access_type(access_type)) { 788 LOG_SWTLB("%s: good TLB!\n", __func__); 789 return 0; 790 } 791 792 LOG_SWTLB("%s: no prot match: %x\n", __func__, prot2); 793 return access_type == MMU_INST_FETCH ? -3 : -2; 794 } 795 796 static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 797 target_ulong address, 798 MMUAccessType access_type) 799 { 800 ppcemb_tlb_t *tlb; 801 hwaddr raddr; 802 int i, ret; 803 804 ret = -1; 805 raddr = (hwaddr)-1ULL; 806 for (i = 0; i < env->nb_tlb; i++) { 807 tlb = &env->tlb.tlbe[i]; 808 ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address, 809 access_type, i); 810 if (ret != -1) { 811 break; 812 } 813 } 814 815 if (ret >= 0) { 816 ctx->raddr = raddr; 817 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx 818 " %d %d\n", __func__, address, ctx->raddr, ctx->prot, 819 ret); 820 } else { 821 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx 822 " %d %d\n", __func__, address, raddr, ctx->prot, ret); 823 } 824 825 return ret; 826 } 827 828 static void booke206_flush_tlb(CPUPPCState *env, int flags, 829 const int check_iprot) 830 { 831 int tlb_size; 832 int i, j; 833 ppcmas_tlb_t *tlb = env->tlb.tlbm; 834 835 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 836 if (flags & (1 << i)) { 837 tlb_size = booke206_tlb_size(env, i); 838 for (j = 0; j < tlb_size; j++) { 839 if (!check_iprot || !(tlb[j].mas1 & MAS1_IPROT)) { 840 tlb[j].mas1 &= ~MAS1_VALID; 841 } 842 } 843 } 844 tlb += booke206_tlb_size(env, i); 845 } 846 847 tlb_flush(env_cpu(env)); 848 } 849 850 static hwaddr booke206_tlb_to_page_size(CPUPPCState *env, 851 ppcmas_tlb_t *tlb) 852 { 853 int tlbm_size; 854 855 tlbm_size = (tlb->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 856 857 return 1024ULL << tlbm_size; 858 } 859 860 /* TLB check function for MAS based SoftTLBs */ 861 static int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb, 862 hwaddr *raddrp, target_ulong address, 863 uint32_t pid) 864 { 865 hwaddr mask; 866 uint32_t tlb_pid; 867 868 if (!msr_cm) { 869 /* In 32bit mode we can only address 32bit EAs */ 870 address = (uint32_t)address; 871 } 872 873 /* Check valid flag */ 874 if (!(tlb->mas1 & MAS1_VALID)) { 875 return -1; 876 } 877 878 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); 879 LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx " PID=0x%x MAS1=0x%x MAS2=0x%" 880 PRIx64 " mask=0x%" HWADDR_PRIx " MAS7_3=0x%" PRIx64 " MAS8=0x%" 881 PRIx32 "\n", __func__, address, pid, tlb->mas1, tlb->mas2, mask, 882 tlb->mas7_3, tlb->mas8); 883 884 /* Check PID */ 885 tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT; 886 if (tlb_pid != 0 && tlb_pid != pid) { 887 return -1; 888 } 889 890 /* Check effective address */ 891 if ((address & mask) != (tlb->mas2 & MAS2_EPN_MASK)) { 892 return -1; 893 } 894 895 if (raddrp) { 896 *raddrp = (tlb->mas7_3 & mask) | (address & ~mask); 897 } 898 899 return 0; 900 } 901 902 static bool is_epid_mmu(int mmu_idx) 903 { 904 return mmu_idx == PPC_TLB_EPID_STORE || mmu_idx == PPC_TLB_EPID_LOAD; 905 } 906 907 static uint32_t mmubooke206_esr(int mmu_idx, MMUAccessType access_type) 908 { 909 uint32_t esr = 0; 910 if (access_type == MMU_DATA_STORE) { 911 esr |= ESR_ST; 912 } 913 if (is_epid_mmu(mmu_idx)) { 914 esr |= ESR_EPID; 915 } 916 return esr; 917 } 918 919 /* 920 * Get EPID register given the mmu_idx. If this is regular load, 921 * construct the EPID access bits from current processor state 922 * 923 * Get the effective AS and PR bits and the PID. The PID is returned 924 * only if EPID load is requested, otherwise the caller must detect 925 * the correct EPID. Return true if valid EPID is returned. 926 */ 927 static bool mmubooke206_get_as(CPUPPCState *env, 928 int mmu_idx, uint32_t *epid_out, 929 bool *as_out, bool *pr_out) 930 { 931 if (is_epid_mmu(mmu_idx)) { 932 uint32_t epidr; 933 if (mmu_idx == PPC_TLB_EPID_STORE) { 934 epidr = env->spr[SPR_BOOKE_EPSC]; 935 } else { 936 epidr = env->spr[SPR_BOOKE_EPLC]; 937 } 938 *epid_out = (epidr & EPID_EPID) >> EPID_EPID_SHIFT; 939 *as_out = !!(epidr & EPID_EAS); 940 *pr_out = !!(epidr & EPID_EPR); 941 return true; 942 } else { 943 *as_out = msr_ds; 944 *pr_out = msr_pr; 945 return false; 946 } 947 } 948 949 /* Check if the tlb found by hashing really matches */ 950 static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb, 951 hwaddr *raddr, int *prot, 952 target_ulong address, 953 MMUAccessType access_type, int mmu_idx) 954 { 955 int prot2 = 0; 956 uint32_t epid; 957 bool as, pr; 958 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); 959 960 if (!use_epid) { 961 if (ppcmas_tlb_check(env, tlb, raddr, address, 962 env->spr[SPR_BOOKE_PID]) >= 0) { 963 goto found_tlb; 964 } 965 966 if (env->spr[SPR_BOOKE_PID1] && 967 ppcmas_tlb_check(env, tlb, raddr, address, 968 env->spr[SPR_BOOKE_PID1]) >= 0) { 969 goto found_tlb; 970 } 971 972 if (env->spr[SPR_BOOKE_PID2] && 973 ppcmas_tlb_check(env, tlb, raddr, address, 974 env->spr[SPR_BOOKE_PID2]) >= 0) { 975 goto found_tlb; 976 } 977 } else { 978 if (ppcmas_tlb_check(env, tlb, raddr, address, epid) >= 0) { 979 goto found_tlb; 980 } 981 } 982 983 LOG_SWTLB("%s: TLB entry not found\n", __func__); 984 return -1; 985 986 found_tlb: 987 988 if (pr) { 989 if (tlb->mas7_3 & MAS3_UR) { 990 prot2 |= PAGE_READ; 991 } 992 if (tlb->mas7_3 & MAS3_UW) { 993 prot2 |= PAGE_WRITE; 994 } 995 if (tlb->mas7_3 & MAS3_UX) { 996 prot2 |= PAGE_EXEC; 997 } 998 } else { 999 if (tlb->mas7_3 & MAS3_SR) { 1000 prot2 |= PAGE_READ; 1001 } 1002 if (tlb->mas7_3 & MAS3_SW) { 1003 prot2 |= PAGE_WRITE; 1004 } 1005 if (tlb->mas7_3 & MAS3_SX) { 1006 prot2 |= PAGE_EXEC; 1007 } 1008 } 1009 1010 /* Check the address space and permissions */ 1011 if (access_type == MMU_INST_FETCH) { 1012 /* There is no way to fetch code using epid load */ 1013 assert(!use_epid); 1014 as = msr_ir; 1015 } 1016 1017 if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { 1018 LOG_SWTLB("%s: AS doesn't match\n", __func__); 1019 return -1; 1020 } 1021 1022 *prot = prot2; 1023 if (prot2 & prot_for_access_type(access_type)) { 1024 LOG_SWTLB("%s: good TLB!\n", __func__); 1025 return 0; 1026 } 1027 1028 LOG_SWTLB("%s: no prot match: %x\n", __func__, prot2); 1029 return access_type == MMU_INST_FETCH ? -3 : -2; 1030 } 1031 1032 static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 1033 target_ulong address, 1034 MMUAccessType access_type, 1035 int mmu_idx) 1036 { 1037 ppcmas_tlb_t *tlb; 1038 hwaddr raddr; 1039 int i, j, ret; 1040 1041 ret = -1; 1042 raddr = (hwaddr)-1ULL; 1043 1044 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 1045 int ways = booke206_tlb_ways(env, i); 1046 1047 for (j = 0; j < ways; j++) { 1048 tlb = booke206_get_tlbm(env, i, address, j); 1049 if (!tlb) { 1050 continue; 1051 } 1052 ret = mmubooke206_check_tlb(env, tlb, &raddr, &ctx->prot, address, 1053 access_type, mmu_idx); 1054 if (ret != -1) { 1055 goto found_tlb; 1056 } 1057 } 1058 } 1059 1060 found_tlb: 1061 1062 if (ret >= 0) { 1063 ctx->raddr = raddr; 1064 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx 1065 " %d %d\n", __func__, address, ctx->raddr, ctx->prot, 1066 ret); 1067 } else { 1068 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx 1069 " %d %d\n", __func__, address, raddr, ctx->prot, ret); 1070 } 1071 1072 return ret; 1073 } 1074 1075 static const char *book3e_tsize_to_str[32] = { 1076 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", 1077 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", 1078 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G", 1079 "1T", "2T" 1080 }; 1081 1082 static void mmubooke_dump_mmu(CPUPPCState *env) 1083 { 1084 ppcemb_tlb_t *entry; 1085 int i; 1086 1087 if (kvm_enabled() && !env->kvm_sw_tlb) { 1088 qemu_printf("Cannot access KVM TLB\n"); 1089 return; 1090 } 1091 1092 qemu_printf("\nTLB:\n"); 1093 qemu_printf("Effective Physical Size PID Prot " 1094 "Attr\n"); 1095 1096 entry = &env->tlb.tlbe[0]; 1097 for (i = 0; i < env->nb_tlb; i++, entry++) { 1098 hwaddr ea, pa; 1099 target_ulong mask; 1100 uint64_t size = (uint64_t)entry->size; 1101 char size_buf[20]; 1102 1103 /* Check valid flag */ 1104 if (!(entry->prot & PAGE_VALID)) { 1105 continue; 1106 } 1107 1108 mask = ~(entry->size - 1); 1109 ea = entry->EPN & mask; 1110 pa = entry->RPN & mask; 1111 /* Extend the physical address to 36 bits */ 1112 pa |= (hwaddr)(entry->RPN & 0xF) << 32; 1113 if (size >= 1 * MiB) { 1114 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "M", size / MiB); 1115 } else { 1116 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "k", size / KiB); 1117 } 1118 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n", 1119 (uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID, 1120 entry->prot, entry->attr); 1121 } 1122 1123 } 1124 1125 static void mmubooke206_dump_one_tlb(CPUPPCState *env, int tlbn, int offset, 1126 int tlbsize) 1127 { 1128 ppcmas_tlb_t *entry; 1129 int i; 1130 1131 qemu_printf("\nTLB%d:\n", tlbn); 1132 qemu_printf("Effective Physical Size TID TS SRWX" 1133 " URWX WIMGE U0123\n"); 1134 1135 entry = &env->tlb.tlbm[offset]; 1136 for (i = 0; i < tlbsize; i++, entry++) { 1137 hwaddr ea, pa, size; 1138 int tsize; 1139 1140 if (!(entry->mas1 & MAS1_VALID)) { 1141 continue; 1142 } 1143 1144 tsize = (entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 1145 size = 1024ULL << tsize; 1146 ea = entry->mas2 & ~(size - 1); 1147 pa = entry->mas7_3 & ~(size - 1); 1148 1149 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c" 1150 "U%c%c%c %c%c%c%c%c U%c%c%c%c\n", 1151 (uint64_t)ea, (uint64_t)pa, 1152 book3e_tsize_to_str[tsize], 1153 (entry->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT, 1154 (entry->mas1 & MAS1_TS) >> MAS1_TS_SHIFT, 1155 entry->mas7_3 & MAS3_SR ? 'R' : '-', 1156 entry->mas7_3 & MAS3_SW ? 'W' : '-', 1157 entry->mas7_3 & MAS3_SX ? 'X' : '-', 1158 entry->mas7_3 & MAS3_UR ? 'R' : '-', 1159 entry->mas7_3 & MAS3_UW ? 'W' : '-', 1160 entry->mas7_3 & MAS3_UX ? 'X' : '-', 1161 entry->mas2 & MAS2_W ? 'W' : '-', 1162 entry->mas2 & MAS2_I ? 'I' : '-', 1163 entry->mas2 & MAS2_M ? 'M' : '-', 1164 entry->mas2 & MAS2_G ? 'G' : '-', 1165 entry->mas2 & MAS2_E ? 'E' : '-', 1166 entry->mas7_3 & MAS3_U0 ? '0' : '-', 1167 entry->mas7_3 & MAS3_U1 ? '1' : '-', 1168 entry->mas7_3 & MAS3_U2 ? '2' : '-', 1169 entry->mas7_3 & MAS3_U3 ? '3' : '-'); 1170 } 1171 } 1172 1173 static void mmubooke206_dump_mmu(CPUPPCState *env) 1174 { 1175 int offset = 0; 1176 int i; 1177 1178 if (kvm_enabled() && !env->kvm_sw_tlb) { 1179 qemu_printf("Cannot access KVM TLB\n"); 1180 return; 1181 } 1182 1183 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 1184 int size = booke206_tlb_size(env, i); 1185 1186 if (size == 0) { 1187 continue; 1188 } 1189 1190 mmubooke206_dump_one_tlb(env, i, offset, size); 1191 offset += size; 1192 } 1193 } 1194 1195 static void mmu6xx_dump_BATs(CPUPPCState *env, int type) 1196 { 1197 target_ulong *BATlt, *BATut, *BATu, *BATl; 1198 target_ulong BEPIl, BEPIu, bl; 1199 int i; 1200 1201 switch (type) { 1202 case ACCESS_CODE: 1203 BATlt = env->IBAT[1]; 1204 BATut = env->IBAT[0]; 1205 break; 1206 default: 1207 BATlt = env->DBAT[1]; 1208 BATut = env->DBAT[0]; 1209 break; 1210 } 1211 1212 for (i = 0; i < env->nb_BATs; i++) { 1213 BATu = &BATut[i]; 1214 BATl = &BATlt[i]; 1215 BEPIu = *BATu & 0xF0000000; 1216 BEPIl = *BATu & 0x0FFE0000; 1217 bl = (*BATu & 0x00001FFC) << 15; 1218 qemu_printf("%s BAT%d BATu " TARGET_FMT_lx 1219 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " 1220 TARGET_FMT_lx " " TARGET_FMT_lx "\n", 1221 type == ACCESS_CODE ? "code" : "data", i, 1222 *BATu, *BATl, BEPIu, BEPIl, bl); 1223 } 1224 } 1225 1226 static void mmu6xx_dump_mmu(CPUPPCState *env) 1227 { 1228 PowerPCCPU *cpu = env_archcpu(env); 1229 ppc6xx_tlb_t *tlb; 1230 target_ulong sr; 1231 int type, way, entry, i; 1232 1233 qemu_printf("HTAB base = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_base(cpu)); 1234 qemu_printf("HTAB mask = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_mask(cpu)); 1235 1236 qemu_printf("\nSegment registers:\n"); 1237 for (i = 0; i < 32; i++) { 1238 sr = env->sr[i]; 1239 if (sr & 0x80000000) { 1240 qemu_printf("%02d T=%d Ks=%d Kp=%d BUID=0x%03x " 1241 "CNTLR_SPEC=0x%05x\n", i, 1242 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, 1243 sr & 0x20000000 ? 1 : 0, (uint32_t)((sr >> 20) & 0x1FF), 1244 (uint32_t)(sr & 0xFFFFF)); 1245 } else { 1246 qemu_printf("%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i, 1247 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, 1248 sr & 0x20000000 ? 1 : 0, sr & 0x10000000 ? 1 : 0, 1249 (uint32_t)(sr & 0x00FFFFFF)); 1250 } 1251 } 1252 1253 qemu_printf("\nBATs:\n"); 1254 mmu6xx_dump_BATs(env, ACCESS_INT); 1255 mmu6xx_dump_BATs(env, ACCESS_CODE); 1256 1257 if (env->id_tlbs != 1) { 1258 qemu_printf("ERROR: 6xx MMU should have separated TLB" 1259 " for code and data\n"); 1260 } 1261 1262 qemu_printf("\nTLBs [EPN EPN + SIZE]\n"); 1263 1264 for (type = 0; type < 2; type++) { 1265 for (way = 0; way < env->nb_ways; way++) { 1266 for (entry = env->nb_tlb * type + env->tlb_per_way * way; 1267 entry < (env->nb_tlb * type + env->tlb_per_way * (way + 1)); 1268 entry++) { 1269 1270 tlb = &env->tlb.tlb6[entry]; 1271 qemu_printf("%s TLB %02d/%02d way:%d %s [" 1272 TARGET_FMT_lx " " TARGET_FMT_lx "]\n", 1273 type ? "code" : "data", entry % env->nb_tlb, 1274 env->nb_tlb, way, 1275 pte_is_valid(tlb->pte0) ? "valid" : "inval", 1276 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE); 1277 } 1278 } 1279 } 1280 } 1281 1282 void dump_mmu(CPUPPCState *env) 1283 { 1284 switch (env->mmu_model) { 1285 case POWERPC_MMU_BOOKE: 1286 mmubooke_dump_mmu(env); 1287 break; 1288 case POWERPC_MMU_BOOKE206: 1289 mmubooke206_dump_mmu(env); 1290 break; 1291 case POWERPC_MMU_SOFT_6xx: 1292 case POWERPC_MMU_SOFT_74xx: 1293 mmu6xx_dump_mmu(env); 1294 break; 1295 #if defined(TARGET_PPC64) 1296 case POWERPC_MMU_64B: 1297 case POWERPC_MMU_2_03: 1298 case POWERPC_MMU_2_06: 1299 case POWERPC_MMU_2_07: 1300 dump_slb(env_archcpu(env)); 1301 break; 1302 case POWERPC_MMU_3_00: 1303 if (ppc64_v3_radix(env_archcpu(env))) { 1304 qemu_log_mask(LOG_UNIMP, "%s: the PPC64 MMU is unsupported\n", 1305 __func__); 1306 } else { 1307 dump_slb(env_archcpu(env)); 1308 } 1309 break; 1310 #endif 1311 default: 1312 qemu_log_mask(LOG_UNIMP, "%s: unimplemented\n", __func__); 1313 } 1314 } 1315 1316 static int check_physical(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr, 1317 MMUAccessType access_type) 1318 { 1319 int in_plb, ret; 1320 1321 ctx->raddr = eaddr; 1322 ctx->prot = PAGE_READ | PAGE_EXEC; 1323 ret = 0; 1324 switch (env->mmu_model) { 1325 case POWERPC_MMU_SOFT_6xx: 1326 case POWERPC_MMU_SOFT_74xx: 1327 case POWERPC_MMU_SOFT_4xx: 1328 case POWERPC_MMU_REAL: 1329 case POWERPC_MMU_BOOKE: 1330 ctx->prot |= PAGE_WRITE; 1331 break; 1332 1333 case POWERPC_MMU_SOFT_4xx_Z: 1334 if (unlikely(msr_pe != 0)) { 1335 /* 1336 * 403 family add some particular protections, using 1337 * PBL/PBU registers for accesses with no translation. 1338 */ 1339 in_plb = 1340 /* Check PLB validity */ 1341 (env->pb[0] < env->pb[1] && 1342 /* and address in plb area */ 1343 eaddr >= env->pb[0] && eaddr < env->pb[1]) || 1344 (env->pb[2] < env->pb[3] && 1345 eaddr >= env->pb[2] && eaddr < env->pb[3]) ? 1 : 0; 1346 if (in_plb ^ msr_px) { 1347 /* Access in protected area */ 1348 if (access_type == MMU_DATA_STORE) { 1349 /* Access is not allowed */ 1350 ret = -2; 1351 } 1352 } else { 1353 /* Read-write access is allowed */ 1354 ctx->prot |= PAGE_WRITE; 1355 } 1356 } 1357 break; 1358 1359 default: 1360 /* Caller's checks mean we should never get here for other models */ 1361 abort(); 1362 return -1; 1363 } 1364 1365 return ret; 1366 } 1367 1368 static int get_physical_address_wtlb(CPUPPCState *env, mmu_ctx_t *ctx, 1369 target_ulong eaddr, 1370 MMUAccessType access_type, int type, 1371 int mmu_idx) 1372 { 1373 int ret = -1; 1374 bool real_mode = (type == ACCESS_CODE && msr_ir == 0) 1375 || (type != ACCESS_CODE && msr_dr == 0); 1376 1377 switch (env->mmu_model) { 1378 case POWERPC_MMU_SOFT_6xx: 1379 case POWERPC_MMU_SOFT_74xx: 1380 if (real_mode) { 1381 ret = check_physical(env, ctx, eaddr, access_type); 1382 } else { 1383 /* Try to find a BAT */ 1384 if (env->nb_BATs != 0) { 1385 ret = get_bat_6xx_tlb(env, ctx, eaddr, access_type); 1386 } 1387 if (ret < 0) { 1388 /* We didn't match any BAT entry or don't have BATs */ 1389 ret = get_segment_6xx_tlb(env, ctx, eaddr, access_type, type); 1390 } 1391 } 1392 break; 1393 1394 case POWERPC_MMU_SOFT_4xx: 1395 case POWERPC_MMU_SOFT_4xx_Z: 1396 if (real_mode) { 1397 ret = check_physical(env, ctx, eaddr, access_type); 1398 } else { 1399 ret = mmu40x_get_physical_address(env, ctx, eaddr, access_type); 1400 } 1401 break; 1402 case POWERPC_MMU_BOOKE: 1403 ret = mmubooke_get_physical_address(env, ctx, eaddr, access_type); 1404 break; 1405 case POWERPC_MMU_BOOKE206: 1406 ret = mmubooke206_get_physical_address(env, ctx, eaddr, access_type, 1407 mmu_idx); 1408 break; 1409 case POWERPC_MMU_MPC8xx: 1410 /* XXX: TODO */ 1411 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n"); 1412 break; 1413 case POWERPC_MMU_REAL: 1414 if (real_mode) { 1415 ret = check_physical(env, ctx, eaddr, access_type); 1416 } else { 1417 cpu_abort(env_cpu(env), 1418 "PowerPC in real mode do not do any translation\n"); 1419 } 1420 return -1; 1421 default: 1422 cpu_abort(env_cpu(env), "Unknown or invalid MMU model\n"); 1423 return -1; 1424 } 1425 1426 return ret; 1427 } 1428 1429 #ifdef CONFIG_TCG 1430 static int get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 1431 target_ulong eaddr, MMUAccessType access_type, 1432 int type) 1433 { 1434 return get_physical_address_wtlb(env, ctx, eaddr, access_type, type, 0); 1435 } 1436 #endif 1437 1438 static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address, 1439 MMUAccessType access_type, int mmu_idx) 1440 { 1441 uint32_t epid; 1442 bool as, pr; 1443 uint32_t missed_tid = 0; 1444 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); 1445 1446 if (access_type == MMU_INST_FETCH) { 1447 as = msr_ir; 1448 } 1449 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; 1450 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; 1451 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; 1452 env->spr[SPR_BOOKE_MAS3] = 0; 1453 env->spr[SPR_BOOKE_MAS6] = 0; 1454 env->spr[SPR_BOOKE_MAS7] = 0; 1455 1456 /* AS */ 1457 if (as) { 1458 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; 1459 env->spr[SPR_BOOKE_MAS6] |= MAS6_SAS; 1460 } 1461 1462 env->spr[SPR_BOOKE_MAS1] |= MAS1_VALID; 1463 env->spr[SPR_BOOKE_MAS2] |= address & MAS2_EPN_MASK; 1464 1465 if (!use_epid) { 1466 switch (env->spr[SPR_BOOKE_MAS4] & MAS4_TIDSELD_PIDZ) { 1467 case MAS4_TIDSELD_PID0: 1468 missed_tid = env->spr[SPR_BOOKE_PID]; 1469 break; 1470 case MAS4_TIDSELD_PID1: 1471 missed_tid = env->spr[SPR_BOOKE_PID1]; 1472 break; 1473 case MAS4_TIDSELD_PID2: 1474 missed_tid = env->spr[SPR_BOOKE_PID2]; 1475 break; 1476 } 1477 env->spr[SPR_BOOKE_MAS6] |= env->spr[SPR_BOOKE_PID] << 16; 1478 } else { 1479 missed_tid = epid; 1480 env->spr[SPR_BOOKE_MAS6] |= missed_tid << 16; 1481 } 1482 env->spr[SPR_BOOKE_MAS1] |= (missed_tid << MAS1_TID_SHIFT); 1483 1484 1485 /* next victim logic */ 1486 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; 1487 env->last_way++; 1488 env->last_way &= booke206_tlb_ways(env, 0) - 1; 1489 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; 1490 } 1491 1492 /* Perform address translation */ 1493 /* TODO: Split this by mmu_model. */ 1494 static bool ppc_jumbo_xlate(PowerPCCPU *cpu, vaddr eaddr, 1495 MMUAccessType access_type, 1496 hwaddr *raddrp, int *psizep, int *protp, 1497 int mmu_idx, bool guest_visible) 1498 { 1499 CPUState *cs = CPU(cpu); 1500 CPUPPCState *env = &cpu->env; 1501 mmu_ctx_t ctx; 1502 int type; 1503 int ret; 1504 1505 if (access_type == MMU_INST_FETCH) { 1506 /* code access */ 1507 type = ACCESS_CODE; 1508 } else if (guest_visible) { 1509 /* data access */ 1510 type = env->access_type; 1511 } else { 1512 type = ACCESS_INT; 1513 } 1514 1515 ret = get_physical_address_wtlb(env, &ctx, eaddr, access_type, 1516 type, mmu_idx); 1517 if (ret == 0) { 1518 *raddrp = ctx.raddr; 1519 *protp = ctx.prot; 1520 *psizep = TARGET_PAGE_BITS; 1521 return true; 1522 } 1523 1524 if (guest_visible) { 1525 LOG_MMU_STATE(cs); 1526 if (type == ACCESS_CODE) { 1527 switch (ret) { 1528 case -1: 1529 /* No matches in page tables or TLB */ 1530 switch (env->mmu_model) { 1531 case POWERPC_MMU_SOFT_6xx: 1532 cs->exception_index = POWERPC_EXCP_IFTLB; 1533 env->error_code = 1 << 18; 1534 env->spr[SPR_IMISS] = eaddr; 1535 env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem; 1536 goto tlb_miss; 1537 case POWERPC_MMU_SOFT_74xx: 1538 cs->exception_index = POWERPC_EXCP_IFTLB; 1539 goto tlb_miss_74xx; 1540 case POWERPC_MMU_SOFT_4xx: 1541 case POWERPC_MMU_SOFT_4xx_Z: 1542 cs->exception_index = POWERPC_EXCP_ITLB; 1543 env->error_code = 0; 1544 env->spr[SPR_40x_DEAR] = eaddr; 1545 env->spr[SPR_40x_ESR] = 0x00000000; 1546 break; 1547 case POWERPC_MMU_BOOKE206: 1548 booke206_update_mas_tlb_miss(env, eaddr, 2, mmu_idx); 1549 /* fall through */ 1550 case POWERPC_MMU_BOOKE: 1551 cs->exception_index = POWERPC_EXCP_ITLB; 1552 env->error_code = 0; 1553 env->spr[SPR_BOOKE_DEAR] = eaddr; 1554 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, MMU_DATA_LOAD); 1555 break; 1556 case POWERPC_MMU_MPC8xx: 1557 cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); 1558 case POWERPC_MMU_REAL: 1559 cpu_abort(cs, "PowerPC in real mode should never raise " 1560 "any MMU exceptions\n"); 1561 default: 1562 cpu_abort(cs, "Unknown or invalid MMU model\n"); 1563 } 1564 break; 1565 case -2: 1566 /* Access rights violation */ 1567 cs->exception_index = POWERPC_EXCP_ISI; 1568 env->error_code = 0x08000000; 1569 break; 1570 case -3: 1571 /* No execute protection violation */ 1572 if ((env->mmu_model == POWERPC_MMU_BOOKE) || 1573 (env->mmu_model == POWERPC_MMU_BOOKE206)) { 1574 env->spr[SPR_BOOKE_ESR] = 0x00000000; 1575 } 1576 cs->exception_index = POWERPC_EXCP_ISI; 1577 env->error_code = 0x10000000; 1578 break; 1579 case -4: 1580 /* Direct store exception */ 1581 /* No code fetch is allowed in direct-store areas */ 1582 cs->exception_index = POWERPC_EXCP_ISI; 1583 env->error_code = 0x10000000; 1584 break; 1585 } 1586 } else { 1587 switch (ret) { 1588 case -1: 1589 /* No matches in page tables or TLB */ 1590 switch (env->mmu_model) { 1591 case POWERPC_MMU_SOFT_6xx: 1592 if (access_type == MMU_DATA_STORE) { 1593 cs->exception_index = POWERPC_EXCP_DSTLB; 1594 env->error_code = 1 << 16; 1595 } else { 1596 cs->exception_index = POWERPC_EXCP_DLTLB; 1597 env->error_code = 0; 1598 } 1599 env->spr[SPR_DMISS] = eaddr; 1600 env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem; 1601 tlb_miss: 1602 env->error_code |= ctx.key << 19; 1603 env->spr[SPR_HASH1] = ppc_hash32_hpt_base(cpu) + 1604 get_pteg_offset32(cpu, ctx.hash[0]); 1605 env->spr[SPR_HASH2] = ppc_hash32_hpt_base(cpu) + 1606 get_pteg_offset32(cpu, ctx.hash[1]); 1607 break; 1608 case POWERPC_MMU_SOFT_74xx: 1609 if (access_type == MMU_DATA_STORE) { 1610 cs->exception_index = POWERPC_EXCP_DSTLB; 1611 } else { 1612 cs->exception_index = POWERPC_EXCP_DLTLB; 1613 } 1614 tlb_miss_74xx: 1615 /* Implement LRU algorithm */ 1616 env->error_code = ctx.key << 19; 1617 env->spr[SPR_TLBMISS] = (eaddr & ~((target_ulong)0x3)) | 1618 ((env->last_way + 1) & (env->nb_ways - 1)); 1619 env->spr[SPR_PTEHI] = 0x80000000 | ctx.ptem; 1620 break; 1621 case POWERPC_MMU_SOFT_4xx: 1622 case POWERPC_MMU_SOFT_4xx_Z: 1623 cs->exception_index = POWERPC_EXCP_DTLB; 1624 env->error_code = 0; 1625 env->spr[SPR_40x_DEAR] = eaddr; 1626 if (access_type == MMU_DATA_STORE) { 1627 env->spr[SPR_40x_ESR] = 0x00800000; 1628 } else { 1629 env->spr[SPR_40x_ESR] = 0x00000000; 1630 } 1631 break; 1632 case POWERPC_MMU_MPC8xx: 1633 /* XXX: TODO */ 1634 cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); 1635 case POWERPC_MMU_BOOKE206: 1636 booke206_update_mas_tlb_miss(env, eaddr, access_type, mmu_idx); 1637 /* fall through */ 1638 case POWERPC_MMU_BOOKE: 1639 cs->exception_index = POWERPC_EXCP_DTLB; 1640 env->error_code = 0; 1641 env->spr[SPR_BOOKE_DEAR] = eaddr; 1642 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); 1643 break; 1644 case POWERPC_MMU_REAL: 1645 cpu_abort(cs, "PowerPC in real mode should never raise " 1646 "any MMU exceptions\n"); 1647 default: 1648 cpu_abort(cs, "Unknown or invalid MMU model\n"); 1649 } 1650 break; 1651 case -2: 1652 /* Access rights violation */ 1653 cs->exception_index = POWERPC_EXCP_DSI; 1654 env->error_code = 0; 1655 if (env->mmu_model == POWERPC_MMU_SOFT_4xx 1656 || env->mmu_model == POWERPC_MMU_SOFT_4xx_Z) { 1657 env->spr[SPR_40x_DEAR] = eaddr; 1658 if (access_type == MMU_DATA_STORE) { 1659 env->spr[SPR_40x_ESR] |= 0x00800000; 1660 } 1661 } else if ((env->mmu_model == POWERPC_MMU_BOOKE) || 1662 (env->mmu_model == POWERPC_MMU_BOOKE206)) { 1663 env->spr[SPR_BOOKE_DEAR] = eaddr; 1664 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); 1665 } else { 1666 env->spr[SPR_DAR] = eaddr; 1667 if (access_type == MMU_DATA_STORE) { 1668 env->spr[SPR_DSISR] = 0x0A000000; 1669 } else { 1670 env->spr[SPR_DSISR] = 0x08000000; 1671 } 1672 } 1673 break; 1674 case -4: 1675 /* Direct store exception */ 1676 switch (type) { 1677 case ACCESS_FLOAT: 1678 /* Floating point load/store */ 1679 cs->exception_index = POWERPC_EXCP_ALIGN; 1680 env->error_code = POWERPC_EXCP_ALIGN_FP; 1681 env->spr[SPR_DAR] = eaddr; 1682 break; 1683 case ACCESS_RES: 1684 /* lwarx, ldarx or stwcx. */ 1685 cs->exception_index = POWERPC_EXCP_DSI; 1686 env->error_code = 0; 1687 env->spr[SPR_DAR] = eaddr; 1688 if (access_type == MMU_DATA_STORE) { 1689 env->spr[SPR_DSISR] = 0x06000000; 1690 } else { 1691 env->spr[SPR_DSISR] = 0x04000000; 1692 } 1693 break; 1694 case ACCESS_EXT: 1695 /* eciwx or ecowx */ 1696 cs->exception_index = POWERPC_EXCP_DSI; 1697 env->error_code = 0; 1698 env->spr[SPR_DAR] = eaddr; 1699 if (access_type == MMU_DATA_STORE) { 1700 env->spr[SPR_DSISR] = 0x06100000; 1701 } else { 1702 env->spr[SPR_DSISR] = 0x04100000; 1703 } 1704 break; 1705 default: 1706 printf("DSI: invalid exception (%d)\n", ret); 1707 cs->exception_index = POWERPC_EXCP_PROGRAM; 1708 env->error_code = 1709 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL; 1710 env->spr[SPR_DAR] = eaddr; 1711 break; 1712 } 1713 break; 1714 } 1715 } 1716 } 1717 return false; 1718 } 1719 1720 #ifdef CONFIG_TCG 1721 /*****************************************************************************/ 1722 /* BATs management */ 1723 #if !defined(FLUSH_ALL_TLBS) 1724 static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu, 1725 target_ulong mask) 1726 { 1727 CPUState *cs = env_cpu(env); 1728 target_ulong base, end, page; 1729 1730 base = BATu & ~0x0001FFFF; 1731 end = base + mask + 0x00020000; 1732 if (((end - base) >> TARGET_PAGE_BITS) > 1024) { 1733 /* Flushing 1024 4K pages is slower than a complete flush */ 1734 LOG_BATS("Flush all BATs\n"); 1735 tlb_flush(cs); 1736 LOG_BATS("Flush done\n"); 1737 return; 1738 } 1739 LOG_BATS("Flush BAT from " TARGET_FMT_lx " to " TARGET_FMT_lx " (" 1740 TARGET_FMT_lx ")\n", base, end, mask); 1741 for (page = base; page != end; page += TARGET_PAGE_SIZE) { 1742 tlb_flush_page(cs, page); 1743 } 1744 LOG_BATS("Flush done\n"); 1745 } 1746 #endif 1747 1748 static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr, 1749 target_ulong value) 1750 { 1751 LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", ID, 1752 nr, ul == 0 ? 'u' : 'l', value, env->nip); 1753 } 1754 1755 void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value) 1756 { 1757 target_ulong mask; 1758 1759 dump_store_bat(env, 'I', 0, nr, value); 1760 if (env->IBAT[0][nr] != value) { 1761 mask = (value << 15) & 0x0FFE0000UL; 1762 #if !defined(FLUSH_ALL_TLBS) 1763 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 1764 #endif 1765 /* 1766 * When storing valid upper BAT, mask BEPI and BRPN and 1767 * invalidate all TLBs covered by this BAT 1768 */ 1769 mask = (value << 15) & 0x0FFE0000UL; 1770 env->IBAT[0][nr] = (value & 0x00001FFFUL) | 1771 (value & ~0x0001FFFFUL & ~mask); 1772 env->IBAT[1][nr] = (env->IBAT[1][nr] & 0x0000007B) | 1773 (env->IBAT[1][nr] & ~0x0001FFFF & ~mask); 1774 #if !defined(FLUSH_ALL_TLBS) 1775 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 1776 #else 1777 tlb_flush(env_cpu(env)); 1778 #endif 1779 } 1780 } 1781 1782 void helper_store_ibatl(CPUPPCState *env, uint32_t nr, target_ulong value) 1783 { 1784 dump_store_bat(env, 'I', 1, nr, value); 1785 env->IBAT[1][nr] = value; 1786 } 1787 1788 void helper_store_dbatu(CPUPPCState *env, uint32_t nr, target_ulong value) 1789 { 1790 target_ulong mask; 1791 1792 dump_store_bat(env, 'D', 0, nr, value); 1793 if (env->DBAT[0][nr] != value) { 1794 /* 1795 * When storing valid upper BAT, mask BEPI and BRPN and 1796 * invalidate all TLBs covered by this BAT 1797 */ 1798 mask = (value << 15) & 0x0FFE0000UL; 1799 #if !defined(FLUSH_ALL_TLBS) 1800 do_invalidate_BAT(env, env->DBAT[0][nr], mask); 1801 #endif 1802 mask = (value << 15) & 0x0FFE0000UL; 1803 env->DBAT[0][nr] = (value & 0x00001FFFUL) | 1804 (value & ~0x0001FFFFUL & ~mask); 1805 env->DBAT[1][nr] = (env->DBAT[1][nr] & 0x0000007B) | 1806 (env->DBAT[1][nr] & ~0x0001FFFF & ~mask); 1807 #if !defined(FLUSH_ALL_TLBS) 1808 do_invalidate_BAT(env, env->DBAT[0][nr], mask); 1809 #else 1810 tlb_flush(env_cpu(env)); 1811 #endif 1812 } 1813 } 1814 1815 void helper_store_dbatl(CPUPPCState *env, uint32_t nr, target_ulong value) 1816 { 1817 dump_store_bat(env, 'D', 1, nr, value); 1818 env->DBAT[1][nr] = value; 1819 } 1820 1821 void helper_store_601_batu(CPUPPCState *env, uint32_t nr, target_ulong value) 1822 { 1823 target_ulong mask; 1824 #if defined(FLUSH_ALL_TLBS) 1825 int do_inval; 1826 #endif 1827 1828 dump_store_bat(env, 'I', 0, nr, value); 1829 if (env->IBAT[0][nr] != value) { 1830 #if defined(FLUSH_ALL_TLBS) 1831 do_inval = 0; 1832 #endif 1833 mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL; 1834 if (env->IBAT[1][nr] & 0x40) { 1835 /* Invalidate BAT only if it is valid */ 1836 #if !defined(FLUSH_ALL_TLBS) 1837 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 1838 #else 1839 do_inval = 1; 1840 #endif 1841 } 1842 /* 1843 * When storing valid upper BAT, mask BEPI and BRPN and 1844 * invalidate all TLBs covered by this BAT 1845 */ 1846 env->IBAT[0][nr] = (value & 0x00001FFFUL) | 1847 (value & ~0x0001FFFFUL & ~mask); 1848 env->DBAT[0][nr] = env->IBAT[0][nr]; 1849 if (env->IBAT[1][nr] & 0x40) { 1850 #if !defined(FLUSH_ALL_TLBS) 1851 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 1852 #else 1853 do_inval = 1; 1854 #endif 1855 } 1856 #if defined(FLUSH_ALL_TLBS) 1857 if (do_inval) { 1858 tlb_flush(env_cpu(env)); 1859 } 1860 #endif 1861 } 1862 } 1863 1864 void helper_store_601_batl(CPUPPCState *env, uint32_t nr, target_ulong value) 1865 { 1866 #if !defined(FLUSH_ALL_TLBS) 1867 target_ulong mask; 1868 #else 1869 int do_inval; 1870 #endif 1871 1872 dump_store_bat(env, 'I', 1, nr, value); 1873 if (env->IBAT[1][nr] != value) { 1874 #if defined(FLUSH_ALL_TLBS) 1875 do_inval = 0; 1876 #endif 1877 if (env->IBAT[1][nr] & 0x40) { 1878 #if !defined(FLUSH_ALL_TLBS) 1879 mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL; 1880 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 1881 #else 1882 do_inval = 1; 1883 #endif 1884 } 1885 if (value & 0x40) { 1886 #if !defined(FLUSH_ALL_TLBS) 1887 mask = (value << 17) & 0x0FFE0000UL; 1888 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 1889 #else 1890 do_inval = 1; 1891 #endif 1892 } 1893 env->IBAT[1][nr] = value; 1894 env->DBAT[1][nr] = value; 1895 #if defined(FLUSH_ALL_TLBS) 1896 if (do_inval) { 1897 tlb_flush(env_cpu(env)); 1898 } 1899 #endif 1900 } 1901 } 1902 #endif 1903 1904 /*****************************************************************************/ 1905 /* TLB management */ 1906 void ppc_tlb_invalidate_all(CPUPPCState *env) 1907 { 1908 #if defined(TARGET_PPC64) 1909 if (mmu_is_64bit(env->mmu_model)) { 1910 env->tlb_need_flush = 0; 1911 tlb_flush(env_cpu(env)); 1912 } else 1913 #endif /* defined(TARGET_PPC64) */ 1914 switch (env->mmu_model) { 1915 case POWERPC_MMU_SOFT_6xx: 1916 case POWERPC_MMU_SOFT_74xx: 1917 ppc6xx_tlb_invalidate_all(env); 1918 break; 1919 case POWERPC_MMU_SOFT_4xx: 1920 case POWERPC_MMU_SOFT_4xx_Z: 1921 ppc4xx_tlb_invalidate_all(env); 1922 break; 1923 case POWERPC_MMU_REAL: 1924 cpu_abort(env_cpu(env), "No TLB for PowerPC 4xx in real mode\n"); 1925 break; 1926 case POWERPC_MMU_MPC8xx: 1927 /* XXX: TODO */ 1928 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n"); 1929 break; 1930 case POWERPC_MMU_BOOKE: 1931 tlb_flush(env_cpu(env)); 1932 break; 1933 case POWERPC_MMU_BOOKE206: 1934 booke206_flush_tlb(env, -1, 0); 1935 break; 1936 case POWERPC_MMU_32B: 1937 case POWERPC_MMU_601: 1938 env->tlb_need_flush = 0; 1939 tlb_flush(env_cpu(env)); 1940 break; 1941 default: 1942 /* XXX: TODO */ 1943 cpu_abort(env_cpu(env), "Unknown MMU model %x\n", env->mmu_model); 1944 break; 1945 } 1946 } 1947 1948 #ifdef CONFIG_TCG 1949 void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr) 1950 { 1951 #if !defined(FLUSH_ALL_TLBS) 1952 addr &= TARGET_PAGE_MASK; 1953 #if defined(TARGET_PPC64) 1954 if (mmu_is_64bit(env->mmu_model)) { 1955 /* tlbie invalidate TLBs for all segments */ 1956 /* 1957 * XXX: given the fact that there are too many segments to invalidate, 1958 * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU, 1959 * we just invalidate all TLBs 1960 */ 1961 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 1962 } else 1963 #endif /* defined(TARGET_PPC64) */ 1964 switch (env->mmu_model) { 1965 case POWERPC_MMU_SOFT_6xx: 1966 case POWERPC_MMU_SOFT_74xx: 1967 ppc6xx_tlb_invalidate_virt(env, addr, 0); 1968 if (env->id_tlbs == 1) { 1969 ppc6xx_tlb_invalidate_virt(env, addr, 1); 1970 } 1971 break; 1972 case POWERPC_MMU_32B: 1973 case POWERPC_MMU_601: 1974 /* 1975 * Actual CPUs invalidate entire congruence classes based on 1976 * the geometry of their TLBs and some OSes take that into 1977 * account, we just mark the TLB to be flushed later (context 1978 * synchronizing event or sync instruction on 32-bit). 1979 */ 1980 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 1981 break; 1982 default: 1983 /* Should never reach here with other MMU models */ 1984 assert(0); 1985 } 1986 #else 1987 ppc_tlb_invalidate_all(env); 1988 #endif 1989 } 1990 1991 /*****************************************************************************/ 1992 /* Special registers manipulation */ 1993 1994 /* Segment registers load and store */ 1995 target_ulong helper_load_sr(CPUPPCState *env, target_ulong sr_num) 1996 { 1997 #if defined(TARGET_PPC64) 1998 if (mmu_is_64bit(env->mmu_model)) { 1999 /* XXX */ 2000 return 0; 2001 } 2002 #endif 2003 return env->sr[sr_num]; 2004 } 2005 2006 void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value) 2007 { 2008 qemu_log_mask(CPU_LOG_MMU, 2009 "%s: reg=%d " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__, 2010 (int)srnum, value, env->sr[srnum]); 2011 #if defined(TARGET_PPC64) 2012 if (mmu_is_64bit(env->mmu_model)) { 2013 PowerPCCPU *cpu = env_archcpu(env); 2014 uint64_t esid, vsid; 2015 2016 /* ESID = srnum */ 2017 esid = ((uint64_t)(srnum & 0xf) << 28) | SLB_ESID_V; 2018 2019 /* VSID = VSID */ 2020 vsid = (value & 0xfffffff) << 12; 2021 /* flags = flags */ 2022 vsid |= ((value >> 27) & 0xf) << 8; 2023 2024 ppc_store_slb(cpu, srnum, esid, vsid); 2025 } else 2026 #endif 2027 if (env->sr[srnum] != value) { 2028 env->sr[srnum] = value; 2029 /* 2030 * Invalidating 256MB of virtual memory in 4kB pages is way 2031 * longer than flushing the whole TLB. 2032 */ 2033 #if !defined(FLUSH_ALL_TLBS) && 0 2034 { 2035 target_ulong page, end; 2036 /* Invalidate 256 MB of virtual memory */ 2037 page = (16 << 20) * srnum; 2038 end = page + (16 << 20); 2039 for (; page != end; page += TARGET_PAGE_SIZE) { 2040 tlb_flush_page(env_cpu(env), page); 2041 } 2042 } 2043 #else 2044 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 2045 #endif 2046 } 2047 } 2048 2049 /* TLB management */ 2050 void helper_tlbia(CPUPPCState *env) 2051 { 2052 ppc_tlb_invalidate_all(env); 2053 } 2054 2055 void helper_tlbie(CPUPPCState *env, target_ulong addr) 2056 { 2057 ppc_tlb_invalidate_one(env, addr); 2058 } 2059 2060 void helper_tlbiva(CPUPPCState *env, target_ulong addr) 2061 { 2062 /* tlbiva instruction only exists on BookE */ 2063 assert(env->mmu_model == POWERPC_MMU_BOOKE); 2064 /* XXX: TODO */ 2065 cpu_abort(env_cpu(env), "BookE MMU model is not implemented\n"); 2066 } 2067 2068 /* Software driven TLBs management */ 2069 /* PowerPC 602/603 software TLB load instructions helpers */ 2070 static void do_6xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code) 2071 { 2072 target_ulong RPN, CMP, EPN; 2073 int way; 2074 2075 RPN = env->spr[SPR_RPA]; 2076 if (is_code) { 2077 CMP = env->spr[SPR_ICMP]; 2078 EPN = env->spr[SPR_IMISS]; 2079 } else { 2080 CMP = env->spr[SPR_DCMP]; 2081 EPN = env->spr[SPR_DMISS]; 2082 } 2083 way = (env->spr[SPR_SRR1] >> 17) & 1; 2084 (void)EPN; /* avoid a compiler warning */ 2085 LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx 2086 " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP, 2087 RPN, way); 2088 /* Store this TLB */ 2089 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK), 2090 way, is_code, CMP, RPN); 2091 } 2092 2093 void helper_6xx_tlbd(CPUPPCState *env, target_ulong EPN) 2094 { 2095 do_6xx_tlb(env, EPN, 0); 2096 } 2097 2098 void helper_6xx_tlbi(CPUPPCState *env, target_ulong EPN) 2099 { 2100 do_6xx_tlb(env, EPN, 1); 2101 } 2102 2103 /* PowerPC 74xx software TLB load instructions helpers */ 2104 static void do_74xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code) 2105 { 2106 target_ulong RPN, CMP, EPN; 2107 int way; 2108 2109 RPN = env->spr[SPR_PTELO]; 2110 CMP = env->spr[SPR_PTEHI]; 2111 EPN = env->spr[SPR_TLBMISS] & ~0x3; 2112 way = env->spr[SPR_TLBMISS] & 0x3; 2113 (void)EPN; /* avoid a compiler warning */ 2114 LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx 2115 " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP, 2116 RPN, way); 2117 /* Store this TLB */ 2118 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK), 2119 way, is_code, CMP, RPN); 2120 } 2121 2122 void helper_74xx_tlbd(CPUPPCState *env, target_ulong EPN) 2123 { 2124 do_74xx_tlb(env, EPN, 0); 2125 } 2126 2127 void helper_74xx_tlbi(CPUPPCState *env, target_ulong EPN) 2128 { 2129 do_74xx_tlb(env, EPN, 1); 2130 } 2131 2132 /*****************************************************************************/ 2133 /* PowerPC 601 specific instructions (POWER bridge) */ 2134 2135 target_ulong helper_rac(CPUPPCState *env, target_ulong addr) 2136 { 2137 mmu_ctx_t ctx; 2138 int nb_BATs; 2139 target_ulong ret = 0; 2140 2141 /* 2142 * We don't have to generate many instances of this instruction, 2143 * as rac is supervisor only. 2144 * 2145 * XXX: FIX THIS: Pretend we have no BAT 2146 */ 2147 nb_BATs = env->nb_BATs; 2148 env->nb_BATs = 0; 2149 if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0) { 2150 ret = ctx.raddr; 2151 } 2152 env->nb_BATs = nb_BATs; 2153 return ret; 2154 } 2155 2156 static inline target_ulong booke_tlb_to_page_size(int size) 2157 { 2158 return 1024 << (2 * size); 2159 } 2160 2161 static inline int booke_page_size_to_tlb(target_ulong page_size) 2162 { 2163 int size; 2164 2165 switch (page_size) { 2166 case 0x00000400UL: 2167 size = 0x0; 2168 break; 2169 case 0x00001000UL: 2170 size = 0x1; 2171 break; 2172 case 0x00004000UL: 2173 size = 0x2; 2174 break; 2175 case 0x00010000UL: 2176 size = 0x3; 2177 break; 2178 case 0x00040000UL: 2179 size = 0x4; 2180 break; 2181 case 0x00100000UL: 2182 size = 0x5; 2183 break; 2184 case 0x00400000UL: 2185 size = 0x6; 2186 break; 2187 case 0x01000000UL: 2188 size = 0x7; 2189 break; 2190 case 0x04000000UL: 2191 size = 0x8; 2192 break; 2193 case 0x10000000UL: 2194 size = 0x9; 2195 break; 2196 case 0x40000000UL: 2197 size = 0xA; 2198 break; 2199 #if defined(TARGET_PPC64) 2200 case 0x000100000000ULL: 2201 size = 0xB; 2202 break; 2203 case 0x000400000000ULL: 2204 size = 0xC; 2205 break; 2206 case 0x001000000000ULL: 2207 size = 0xD; 2208 break; 2209 case 0x004000000000ULL: 2210 size = 0xE; 2211 break; 2212 case 0x010000000000ULL: 2213 size = 0xF; 2214 break; 2215 #endif 2216 default: 2217 size = -1; 2218 break; 2219 } 2220 2221 return size; 2222 } 2223 2224 /* Helpers for 4xx TLB management */ 2225 #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */ 2226 2227 #define PPC4XX_TLBHI_V 0x00000040 2228 #define PPC4XX_TLBHI_E 0x00000020 2229 #define PPC4XX_TLBHI_SIZE_MIN 0 2230 #define PPC4XX_TLBHI_SIZE_MAX 7 2231 #define PPC4XX_TLBHI_SIZE_DEFAULT 1 2232 #define PPC4XX_TLBHI_SIZE_SHIFT 7 2233 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007 2234 2235 #define PPC4XX_TLBLO_EX 0x00000200 2236 #define PPC4XX_TLBLO_WR 0x00000100 2237 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF 2238 #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00 2239 2240 target_ulong helper_4xx_tlbre_hi(CPUPPCState *env, target_ulong entry) 2241 { 2242 ppcemb_tlb_t *tlb; 2243 target_ulong ret; 2244 int size; 2245 2246 entry &= PPC4XX_TLB_ENTRY_MASK; 2247 tlb = &env->tlb.tlbe[entry]; 2248 ret = tlb->EPN; 2249 if (tlb->prot & PAGE_VALID) { 2250 ret |= PPC4XX_TLBHI_V; 2251 } 2252 size = booke_page_size_to_tlb(tlb->size); 2253 if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) { 2254 size = PPC4XX_TLBHI_SIZE_DEFAULT; 2255 } 2256 ret |= size << PPC4XX_TLBHI_SIZE_SHIFT; 2257 env->spr[SPR_40x_PID] = tlb->PID; 2258 return ret; 2259 } 2260 2261 target_ulong helper_4xx_tlbre_lo(CPUPPCState *env, target_ulong entry) 2262 { 2263 ppcemb_tlb_t *tlb; 2264 target_ulong ret; 2265 2266 entry &= PPC4XX_TLB_ENTRY_MASK; 2267 tlb = &env->tlb.tlbe[entry]; 2268 ret = tlb->RPN; 2269 if (tlb->prot & PAGE_EXEC) { 2270 ret |= PPC4XX_TLBLO_EX; 2271 } 2272 if (tlb->prot & PAGE_WRITE) { 2273 ret |= PPC4XX_TLBLO_WR; 2274 } 2275 return ret; 2276 } 2277 2278 void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry, 2279 target_ulong val) 2280 { 2281 CPUState *cs = env_cpu(env); 2282 ppcemb_tlb_t *tlb; 2283 target_ulong page, end; 2284 2285 LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry, 2286 val); 2287 entry &= PPC4XX_TLB_ENTRY_MASK; 2288 tlb = &env->tlb.tlbe[entry]; 2289 /* Invalidate previous TLB (if it's valid) */ 2290 if (tlb->prot & PAGE_VALID) { 2291 end = tlb->EPN + tlb->size; 2292 LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end " 2293 TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end); 2294 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) { 2295 tlb_flush_page(cs, page); 2296 } 2297 } 2298 tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT) 2299 & PPC4XX_TLBHI_SIZE_MASK); 2300 /* 2301 * We cannot handle TLB size < TARGET_PAGE_SIZE. 2302 * If this ever occurs, we should implement TARGET_PAGE_BITS_VARY 2303 */ 2304 if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) { 2305 cpu_abort(cs, "TLB size " TARGET_FMT_lu " < %u " 2306 "are not supported (%d)\n" 2307 "Please implement TARGET_PAGE_BITS_VARY\n", 2308 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7)); 2309 } 2310 tlb->EPN = val & ~(tlb->size - 1); 2311 if (val & PPC4XX_TLBHI_V) { 2312 tlb->prot |= PAGE_VALID; 2313 if (val & PPC4XX_TLBHI_E) { 2314 /* XXX: TO BE FIXED */ 2315 cpu_abort(cs, 2316 "Little-endian TLB entries are not supported by now\n"); 2317 } 2318 } else { 2319 tlb->prot &= ~PAGE_VALID; 2320 } 2321 tlb->PID = env->spr[SPR_40x_PID]; /* PID */ 2322 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx 2323 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__, 2324 (int)entry, tlb->RPN, tlb->EPN, tlb->size, 2325 tlb->prot & PAGE_READ ? 'r' : '-', 2326 tlb->prot & PAGE_WRITE ? 'w' : '-', 2327 tlb->prot & PAGE_EXEC ? 'x' : '-', 2328 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID); 2329 /* Invalidate new TLB (if valid) */ 2330 if (tlb->prot & PAGE_VALID) { 2331 end = tlb->EPN + tlb->size; 2332 LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end " 2333 TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end); 2334 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) { 2335 tlb_flush_page(cs, page); 2336 } 2337 } 2338 } 2339 2340 void helper_4xx_tlbwe_lo(CPUPPCState *env, target_ulong entry, 2341 target_ulong val) 2342 { 2343 ppcemb_tlb_t *tlb; 2344 2345 LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry, 2346 val); 2347 entry &= PPC4XX_TLB_ENTRY_MASK; 2348 tlb = &env->tlb.tlbe[entry]; 2349 tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK; 2350 tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK; 2351 tlb->prot = PAGE_READ; 2352 if (val & PPC4XX_TLBLO_EX) { 2353 tlb->prot |= PAGE_EXEC; 2354 } 2355 if (val & PPC4XX_TLBLO_WR) { 2356 tlb->prot |= PAGE_WRITE; 2357 } 2358 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx 2359 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__, 2360 (int)entry, tlb->RPN, tlb->EPN, tlb->size, 2361 tlb->prot & PAGE_READ ? 'r' : '-', 2362 tlb->prot & PAGE_WRITE ? 'w' : '-', 2363 tlb->prot & PAGE_EXEC ? 'x' : '-', 2364 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID); 2365 } 2366 2367 target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address) 2368 { 2369 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]); 2370 } 2371 2372 /* PowerPC 440 TLB management */ 2373 void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry, 2374 target_ulong value) 2375 { 2376 ppcemb_tlb_t *tlb; 2377 target_ulong EPN, RPN, size; 2378 int do_flush_tlbs; 2379 2380 LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n", 2381 __func__, word, (int)entry, value); 2382 do_flush_tlbs = 0; 2383 entry &= 0x3F; 2384 tlb = &env->tlb.tlbe[entry]; 2385 switch (word) { 2386 default: 2387 /* Just here to please gcc */ 2388 case 0: 2389 EPN = value & 0xFFFFFC00; 2390 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN) { 2391 do_flush_tlbs = 1; 2392 } 2393 tlb->EPN = EPN; 2394 size = booke_tlb_to_page_size((value >> 4) & 0xF); 2395 if ((tlb->prot & PAGE_VALID) && tlb->size < size) { 2396 do_flush_tlbs = 1; 2397 } 2398 tlb->size = size; 2399 tlb->attr &= ~0x1; 2400 tlb->attr |= (value >> 8) & 1; 2401 if (value & 0x200) { 2402 tlb->prot |= PAGE_VALID; 2403 } else { 2404 if (tlb->prot & PAGE_VALID) { 2405 tlb->prot &= ~PAGE_VALID; 2406 do_flush_tlbs = 1; 2407 } 2408 } 2409 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF; 2410 if (do_flush_tlbs) { 2411 tlb_flush(env_cpu(env)); 2412 } 2413 break; 2414 case 1: 2415 RPN = value & 0xFFFFFC0F; 2416 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN) { 2417 tlb_flush(env_cpu(env)); 2418 } 2419 tlb->RPN = RPN; 2420 break; 2421 case 2: 2422 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00); 2423 tlb->prot = tlb->prot & PAGE_VALID; 2424 if (value & 0x1) { 2425 tlb->prot |= PAGE_READ << 4; 2426 } 2427 if (value & 0x2) { 2428 tlb->prot |= PAGE_WRITE << 4; 2429 } 2430 if (value & 0x4) { 2431 tlb->prot |= PAGE_EXEC << 4; 2432 } 2433 if (value & 0x8) { 2434 tlb->prot |= PAGE_READ; 2435 } 2436 if (value & 0x10) { 2437 tlb->prot |= PAGE_WRITE; 2438 } 2439 if (value & 0x20) { 2440 tlb->prot |= PAGE_EXEC; 2441 } 2442 break; 2443 } 2444 } 2445 2446 target_ulong helper_440_tlbre(CPUPPCState *env, uint32_t word, 2447 target_ulong entry) 2448 { 2449 ppcemb_tlb_t *tlb; 2450 target_ulong ret; 2451 int size; 2452 2453 entry &= 0x3F; 2454 tlb = &env->tlb.tlbe[entry]; 2455 switch (word) { 2456 default: 2457 /* Just here to please gcc */ 2458 case 0: 2459 ret = tlb->EPN; 2460 size = booke_page_size_to_tlb(tlb->size); 2461 if (size < 0 || size > 0xF) { 2462 size = 1; 2463 } 2464 ret |= size << 4; 2465 if (tlb->attr & 0x1) { 2466 ret |= 0x100; 2467 } 2468 if (tlb->prot & PAGE_VALID) { 2469 ret |= 0x200; 2470 } 2471 env->spr[SPR_440_MMUCR] &= ~0x000000FF; 2472 env->spr[SPR_440_MMUCR] |= tlb->PID; 2473 break; 2474 case 1: 2475 ret = tlb->RPN; 2476 break; 2477 case 2: 2478 ret = tlb->attr & ~0x1; 2479 if (tlb->prot & (PAGE_READ << 4)) { 2480 ret |= 0x1; 2481 } 2482 if (tlb->prot & (PAGE_WRITE << 4)) { 2483 ret |= 0x2; 2484 } 2485 if (tlb->prot & (PAGE_EXEC << 4)) { 2486 ret |= 0x4; 2487 } 2488 if (tlb->prot & PAGE_READ) { 2489 ret |= 0x8; 2490 } 2491 if (tlb->prot & PAGE_WRITE) { 2492 ret |= 0x10; 2493 } 2494 if (tlb->prot & PAGE_EXEC) { 2495 ret |= 0x20; 2496 } 2497 break; 2498 } 2499 return ret; 2500 } 2501 2502 target_ulong helper_440_tlbsx(CPUPPCState *env, target_ulong address) 2503 { 2504 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF); 2505 } 2506 2507 /* PowerPC BookE 2.06 TLB management */ 2508 2509 static ppcmas_tlb_t *booke206_cur_tlb(CPUPPCState *env) 2510 { 2511 uint32_t tlbncfg = 0; 2512 int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT; 2513 int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK); 2514 int tlb; 2515 2516 tlb = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT; 2517 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb]; 2518 2519 if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) { 2520 cpu_abort(env_cpu(env), "we don't support HES yet\n"); 2521 } 2522 2523 return booke206_get_tlbm(env, tlb, ea, esel); 2524 } 2525 2526 void helper_booke_setpid(CPUPPCState *env, uint32_t pidn, target_ulong pid) 2527 { 2528 env->spr[pidn] = pid; 2529 /* changing PIDs mean we're in a different address space now */ 2530 tlb_flush(env_cpu(env)); 2531 } 2532 2533 void helper_booke_set_eplc(CPUPPCState *env, target_ulong val) 2534 { 2535 env->spr[SPR_BOOKE_EPLC] = val & EPID_MASK; 2536 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_LOAD); 2537 } 2538 void helper_booke_set_epsc(CPUPPCState *env, target_ulong val) 2539 { 2540 env->spr[SPR_BOOKE_EPSC] = val & EPID_MASK; 2541 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_STORE); 2542 } 2543 2544 static inline void flush_page(CPUPPCState *env, ppcmas_tlb_t *tlb) 2545 { 2546 if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) { 2547 tlb_flush_page(env_cpu(env), tlb->mas2 & MAS2_EPN_MASK); 2548 } else { 2549 tlb_flush(env_cpu(env)); 2550 } 2551 } 2552 2553 void helper_booke206_tlbwe(CPUPPCState *env) 2554 { 2555 uint32_t tlbncfg, tlbn; 2556 ppcmas_tlb_t *tlb; 2557 uint32_t size_tlb, size_ps; 2558 target_ulong mask; 2559 2560 2561 switch (env->spr[SPR_BOOKE_MAS0] & MAS0_WQ_MASK) { 2562 case MAS0_WQ_ALWAYS: 2563 /* good to go, write that entry */ 2564 break; 2565 case MAS0_WQ_COND: 2566 /* XXX check if reserved */ 2567 if (0) { 2568 return; 2569 } 2570 break; 2571 case MAS0_WQ_CLR_RSRV: 2572 /* XXX clear entry */ 2573 return; 2574 default: 2575 /* no idea what to do */ 2576 return; 2577 } 2578 2579 if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) && 2580 !msr_gs) { 2581 /* XXX we don't support direct LRAT setting yet */ 2582 fprintf(stderr, "cpu: don't support LRAT setting yet\n"); 2583 return; 2584 } 2585 2586 tlbn = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT; 2587 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn]; 2588 2589 tlb = booke206_cur_tlb(env); 2590 2591 if (!tlb) { 2592 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 2593 POWERPC_EXCP_INVAL | 2594 POWERPC_EXCP_INVAL_INVAL, GETPC()); 2595 } 2596 2597 /* check that we support the targeted size */ 2598 size_tlb = (env->spr[SPR_BOOKE_MAS1] & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 2599 size_ps = booke206_tlbnps(env, tlbn); 2600 if ((env->spr[SPR_BOOKE_MAS1] & MAS1_VALID) && (tlbncfg & TLBnCFG_AVAIL) && 2601 !(size_ps & (1 << size_tlb))) { 2602 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 2603 POWERPC_EXCP_INVAL | 2604 POWERPC_EXCP_INVAL_INVAL, GETPC()); 2605 } 2606 2607 if (msr_gs) { 2608 cpu_abort(env_cpu(env), "missing HV implementation\n"); 2609 } 2610 2611 if (tlb->mas1 & MAS1_VALID) { 2612 /* 2613 * Invalidate the page in QEMU TLB if it was a valid entry. 2614 * 2615 * In "PowerPC e500 Core Family Reference Manual, Rev. 1", 2616 * Section "12.4.2 TLB Write Entry (tlbwe) Instruction": 2617 * (https://www.nxp.com/docs/en/reference-manual/E500CORERM.pdf) 2618 * 2619 * "Note that when an L2 TLB entry is written, it may be displacing an 2620 * already valid entry in the same L2 TLB location (a victim). If a 2621 * valid L1 TLB entry corresponds to the L2 MMU victim entry, that L1 2622 * TLB entry is automatically invalidated." 2623 */ 2624 flush_page(env, tlb); 2625 } 2626 2627 tlb->mas7_3 = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) | 2628 env->spr[SPR_BOOKE_MAS3]; 2629 tlb->mas1 = env->spr[SPR_BOOKE_MAS1]; 2630 2631 if ((env->spr[SPR_MMUCFG] & MMUCFG_MAVN) == MMUCFG_MAVN_V2) { 2632 /* For TLB which has a fixed size TSIZE is ignored with MAV2 */ 2633 booke206_fixed_size_tlbn(env, tlbn, tlb); 2634 } else { 2635 if (!(tlbncfg & TLBnCFG_AVAIL)) { 2636 /* force !AVAIL TLB entries to correct page size */ 2637 tlb->mas1 &= ~MAS1_TSIZE_MASK; 2638 /* XXX can be configured in MMUCSR0 */ 2639 tlb->mas1 |= (tlbncfg & TLBnCFG_MINSIZE) >> 12; 2640 } 2641 } 2642 2643 /* Make a mask from TLB size to discard invalid bits in EPN field */ 2644 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); 2645 /* Add a mask for page attributes */ 2646 mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E; 2647 2648 if (!msr_cm) { 2649 /* 2650 * Executing a tlbwe instruction in 32-bit mode will set bits 2651 * 0:31 of the TLB EPN field to zero. 2652 */ 2653 mask &= 0xffffffff; 2654 } 2655 2656 tlb->mas2 = env->spr[SPR_BOOKE_MAS2] & mask; 2657 2658 if (!(tlbncfg & TLBnCFG_IPROT)) { 2659 /* no IPROT supported by TLB */ 2660 tlb->mas1 &= ~MAS1_IPROT; 2661 } 2662 2663 flush_page(env, tlb); 2664 } 2665 2666 static inline void booke206_tlb_to_mas(CPUPPCState *env, ppcmas_tlb_t *tlb) 2667 { 2668 int tlbn = booke206_tlbm_to_tlbn(env, tlb); 2669 int way = booke206_tlbm_to_way(env, tlb); 2670 2671 env->spr[SPR_BOOKE_MAS0] = tlbn << MAS0_TLBSEL_SHIFT; 2672 env->spr[SPR_BOOKE_MAS0] |= way << MAS0_ESEL_SHIFT; 2673 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; 2674 2675 env->spr[SPR_BOOKE_MAS1] = tlb->mas1; 2676 env->spr[SPR_BOOKE_MAS2] = tlb->mas2; 2677 env->spr[SPR_BOOKE_MAS3] = tlb->mas7_3; 2678 env->spr[SPR_BOOKE_MAS7] = tlb->mas7_3 >> 32; 2679 } 2680 2681 void helper_booke206_tlbre(CPUPPCState *env) 2682 { 2683 ppcmas_tlb_t *tlb = NULL; 2684 2685 tlb = booke206_cur_tlb(env); 2686 if (!tlb) { 2687 env->spr[SPR_BOOKE_MAS1] = 0; 2688 } else { 2689 booke206_tlb_to_mas(env, tlb); 2690 } 2691 } 2692 2693 void helper_booke206_tlbsx(CPUPPCState *env, target_ulong address) 2694 { 2695 ppcmas_tlb_t *tlb = NULL; 2696 int i, j; 2697 hwaddr raddr; 2698 uint32_t spid, sas; 2699 2700 spid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID_MASK) >> MAS6_SPID_SHIFT; 2701 sas = env->spr[SPR_BOOKE_MAS6] & MAS6_SAS; 2702 2703 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 2704 int ways = booke206_tlb_ways(env, i); 2705 2706 for (j = 0; j < ways; j++) { 2707 tlb = booke206_get_tlbm(env, i, address, j); 2708 2709 if (!tlb) { 2710 continue; 2711 } 2712 2713 if (ppcmas_tlb_check(env, tlb, &raddr, address, spid)) { 2714 continue; 2715 } 2716 2717 if (sas != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { 2718 continue; 2719 } 2720 2721 booke206_tlb_to_mas(env, tlb); 2722 return; 2723 } 2724 } 2725 2726 /* no entry found, fill with defaults */ 2727 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; 2728 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; 2729 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; 2730 env->spr[SPR_BOOKE_MAS3] = 0; 2731 env->spr[SPR_BOOKE_MAS7] = 0; 2732 2733 if (env->spr[SPR_BOOKE_MAS6] & MAS6_SAS) { 2734 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; 2735 } 2736 2737 env->spr[SPR_BOOKE_MAS1] |= (env->spr[SPR_BOOKE_MAS6] >> 16) 2738 << MAS1_TID_SHIFT; 2739 2740 /* next victim logic */ 2741 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; 2742 env->last_way++; 2743 env->last_way &= booke206_tlb_ways(env, 0) - 1; 2744 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; 2745 } 2746 2747 static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn, 2748 uint32_t ea) 2749 { 2750 int i; 2751 int ways = booke206_tlb_ways(env, tlbn); 2752 target_ulong mask; 2753 2754 for (i = 0; i < ways; i++) { 2755 ppcmas_tlb_t *tlb = booke206_get_tlbm(env, tlbn, ea, i); 2756 if (!tlb) { 2757 continue; 2758 } 2759 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); 2760 if (((tlb->mas2 & MAS2_EPN_MASK) == (ea & mask)) && 2761 !(tlb->mas1 & MAS1_IPROT)) { 2762 tlb->mas1 &= ~MAS1_VALID; 2763 } 2764 } 2765 } 2766 2767 void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address) 2768 { 2769 CPUState *cs; 2770 2771 if (address & 0x4) { 2772 /* flush all entries */ 2773 if (address & 0x8) { 2774 /* flush all of TLB1 */ 2775 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB1, 1); 2776 } else { 2777 /* flush all of TLB0 */ 2778 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB0, 0); 2779 } 2780 return; 2781 } 2782 2783 if (address & 0x8) { 2784 /* flush TLB1 entries */ 2785 booke206_invalidate_ea_tlb(env, 1, address); 2786 CPU_FOREACH(cs) { 2787 tlb_flush(cs); 2788 } 2789 } else { 2790 /* flush TLB0 entries */ 2791 booke206_invalidate_ea_tlb(env, 0, address); 2792 CPU_FOREACH(cs) { 2793 tlb_flush_page(cs, address & MAS2_EPN_MASK); 2794 } 2795 } 2796 } 2797 2798 void helper_booke206_tlbilx0(CPUPPCState *env, target_ulong address) 2799 { 2800 /* XXX missing LPID handling */ 2801 booke206_flush_tlb(env, -1, 1); 2802 } 2803 2804 void helper_booke206_tlbilx1(CPUPPCState *env, target_ulong address) 2805 { 2806 int i, j; 2807 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID); 2808 ppcmas_tlb_t *tlb = env->tlb.tlbm; 2809 int tlb_size; 2810 2811 /* XXX missing LPID handling */ 2812 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 2813 tlb_size = booke206_tlb_size(env, i); 2814 for (j = 0; j < tlb_size; j++) { 2815 if (!(tlb[j].mas1 & MAS1_IPROT) && 2816 ((tlb[j].mas1 & MAS1_TID_MASK) == tid)) { 2817 tlb[j].mas1 &= ~MAS1_VALID; 2818 } 2819 } 2820 tlb += booke206_tlb_size(env, i); 2821 } 2822 tlb_flush(env_cpu(env)); 2823 } 2824 2825 void helper_booke206_tlbilx3(CPUPPCState *env, target_ulong address) 2826 { 2827 int i, j; 2828 ppcmas_tlb_t *tlb; 2829 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID); 2830 int pid = tid >> MAS6_SPID_SHIFT; 2831 int sgs = env->spr[SPR_BOOKE_MAS5] & MAS5_SGS; 2832 int ind = (env->spr[SPR_BOOKE_MAS6] & MAS6_SIND) ? MAS1_IND : 0; 2833 /* XXX check for unsupported isize and raise an invalid opcode then */ 2834 int size = env->spr[SPR_BOOKE_MAS6] & MAS6_ISIZE_MASK; 2835 /* XXX implement MAV2 handling */ 2836 bool mav2 = false; 2837 2838 /* XXX missing LPID handling */ 2839 /* flush by pid and ea */ 2840 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 2841 int ways = booke206_tlb_ways(env, i); 2842 2843 for (j = 0; j < ways; j++) { 2844 tlb = booke206_get_tlbm(env, i, address, j); 2845 if (!tlb) { 2846 continue; 2847 } 2848 if ((ppcmas_tlb_check(env, tlb, NULL, address, pid) != 0) || 2849 (tlb->mas1 & MAS1_IPROT) || 2850 ((tlb->mas1 & MAS1_IND) != ind) || 2851 ((tlb->mas8 & MAS8_TGS) != sgs)) { 2852 continue; 2853 } 2854 if (mav2 && ((tlb->mas1 & MAS1_TSIZE_MASK) != size)) { 2855 /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */ 2856 continue; 2857 } 2858 /* XXX e500mc doesn't match SAS, but other cores might */ 2859 tlb->mas1 &= ~MAS1_VALID; 2860 } 2861 } 2862 tlb_flush(env_cpu(env)); 2863 } 2864 2865 void helper_booke206_tlbflush(CPUPPCState *env, target_ulong type) 2866 { 2867 int flags = 0; 2868 2869 if (type & 2) { 2870 flags |= BOOKE206_FLUSH_TLB1; 2871 } 2872 2873 if (type & 4) { 2874 flags |= BOOKE206_FLUSH_TLB0; 2875 } 2876 2877 booke206_flush_tlb(env, flags, 1); 2878 } 2879 2880 2881 void helper_check_tlb_flush_local(CPUPPCState *env) 2882 { 2883 check_tlb_flush(env, false); 2884 } 2885 2886 void helper_check_tlb_flush_global(CPUPPCState *env) 2887 { 2888 check_tlb_flush(env, true); 2889 } 2890 #endif /* CONFIG_TCG */ 2891 2892 /*****************************************************************************/ 2893 2894 static bool ppc_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, 2895 hwaddr *raddrp, int *psizep, int *protp, 2896 int mmu_idx, bool guest_visible) 2897 { 2898 switch (cpu->env.mmu_model) { 2899 #if defined(TARGET_PPC64) 2900 case POWERPC_MMU_3_00: 2901 if (ppc64_v3_radix(cpu)) { 2902 return ppc_radix64_xlate(cpu, eaddr, access_type, 2903 raddrp, psizep, protp, mmu_idx, guest_visible); 2904 } 2905 /* fall through */ 2906 case POWERPC_MMU_64B: 2907 case POWERPC_MMU_2_03: 2908 case POWERPC_MMU_2_06: 2909 case POWERPC_MMU_2_07: 2910 return ppc_hash64_xlate(cpu, eaddr, access_type, 2911 raddrp, psizep, protp, mmu_idx, guest_visible); 2912 #endif 2913 2914 case POWERPC_MMU_32B: 2915 case POWERPC_MMU_601: 2916 return ppc_hash32_xlate(cpu, eaddr, access_type, 2917 raddrp, psizep, protp, mmu_idx, guest_visible); 2918 2919 default: 2920 return ppc_jumbo_xlate(cpu, eaddr, access_type, raddrp, 2921 psizep, protp, mmu_idx, guest_visible); 2922 } 2923 } 2924 2925 hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 2926 { 2927 PowerPCCPU *cpu = POWERPC_CPU(cs); 2928 hwaddr raddr; 2929 int s, p; 2930 2931 /* 2932 * Some MMUs have separate TLBs for code and data. If we only 2933 * try an MMU_DATA_LOAD, we may not be able to read instructions 2934 * mapped by code TLBs, so we also try a MMU_INST_FETCH. 2935 */ 2936 if (ppc_xlate(cpu, addr, MMU_DATA_LOAD, &raddr, &s, &p, 2937 cpu_mmu_index(&cpu->env, false), false) || 2938 ppc_xlate(cpu, addr, MMU_INST_FETCH, &raddr, &s, &p, 2939 cpu_mmu_index(&cpu->env, true), false)) { 2940 return raddr & TARGET_PAGE_MASK; 2941 } 2942 return -1; 2943 } 2944 2945 #ifdef CONFIG_TCG 2946 bool ppc_cpu_tlb_fill(CPUState *cs, vaddr eaddr, int size, 2947 MMUAccessType access_type, int mmu_idx, 2948 bool probe, uintptr_t retaddr) 2949 { 2950 PowerPCCPU *cpu = POWERPC_CPU(cs); 2951 hwaddr raddr; 2952 int page_size, prot; 2953 2954 if (ppc_xlate(cpu, eaddr, access_type, &raddr, 2955 &page_size, &prot, mmu_idx, !probe)) { 2956 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, 2957 prot, mmu_idx, 1UL << page_size); 2958 return true; 2959 } 2960 if (probe) { 2961 return false; 2962 } 2963 raise_exception_err_ra(&cpu->env, cs->exception_index, 2964 cpu->env.error_code, retaddr); 2965 } 2966 #endif 2967