1 /* 2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "cpu.h" 23 #include "sysemu/kvm.h" 24 #include "kvm_ppc.h" 25 #include "mmu-hash64.h" 26 #include "mmu-hash32.h" 27 #include "exec/exec-all.h" 28 #include "exec/log.h" 29 #include "helper_regs.h" 30 #include "qemu/error-report.h" 31 #include "qemu/main-loop.h" 32 #include "qemu/qemu-print.h" 33 #include "internal.h" 34 #include "mmu-book3s-v3.h" 35 #include "mmu-radix64.h" 36 37 #ifdef CONFIG_TCG 38 #include "exec/helper-proto.h" 39 #include "exec/cpu_ldst.h" 40 #endif 41 /* #define DEBUG_MMU */ 42 /* #define DEBUG_BATS */ 43 /* #define DEBUG_SOFTWARE_TLB */ 44 /* #define DUMP_PAGE_TABLES */ 45 /* #define FLUSH_ALL_TLBS */ 46 47 #ifdef DEBUG_MMU 48 # define LOG_MMU_STATE(cpu) log_cpu_state_mask(CPU_LOG_MMU, (cpu), 0) 49 #else 50 # define LOG_MMU_STATE(cpu) do { } while (0) 51 #endif 52 53 #ifdef DEBUG_SOFTWARE_TLB 54 # define LOG_SWTLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) 55 #else 56 # define LOG_SWTLB(...) do { } while (0) 57 #endif 58 59 #ifdef DEBUG_BATS 60 # define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) 61 #else 62 # define LOG_BATS(...) do { } while (0) 63 #endif 64 65 /*****************************************************************************/ 66 /* PowerPC MMU emulation */ 67 68 /* Context used internally during MMU translations */ 69 typedef struct mmu_ctx_t mmu_ctx_t; 70 struct mmu_ctx_t { 71 hwaddr raddr; /* Real address */ 72 hwaddr eaddr; /* Effective address */ 73 int prot; /* Protection bits */ 74 hwaddr hash[2]; /* Pagetable hash values */ 75 target_ulong ptem; /* Virtual segment ID | API */ 76 int key; /* Access key */ 77 int nx; /* Non-execute area */ 78 }; 79 80 /* Common routines used by software and hardware TLBs emulation */ 81 static inline int pte_is_valid(target_ulong pte0) 82 { 83 return pte0 & 0x80000000 ? 1 : 0; 84 } 85 86 static inline void pte_invalidate(target_ulong *pte0) 87 { 88 *pte0 &= ~0x80000000; 89 } 90 91 #define PTE_PTEM_MASK 0x7FFFFFBF 92 #define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B) 93 94 static int pp_check(int key, int pp, int nx) 95 { 96 int access; 97 98 /* Compute access rights */ 99 access = 0; 100 if (key == 0) { 101 switch (pp) { 102 case 0x0: 103 case 0x1: 104 case 0x2: 105 access |= PAGE_WRITE; 106 /* fall through */ 107 case 0x3: 108 access |= PAGE_READ; 109 break; 110 } 111 } else { 112 switch (pp) { 113 case 0x0: 114 access = 0; 115 break; 116 case 0x1: 117 case 0x3: 118 access = PAGE_READ; 119 break; 120 case 0x2: 121 access = PAGE_READ | PAGE_WRITE; 122 break; 123 } 124 } 125 if (nx == 0) { 126 access |= PAGE_EXEC; 127 } 128 129 return access; 130 } 131 132 static int check_prot(int prot, MMUAccessType access_type) 133 { 134 return prot & prot_for_access_type(access_type) ? 0 : -2; 135 } 136 137 static int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0, 138 target_ulong pte1, int h, 139 MMUAccessType access_type) 140 { 141 target_ulong ptem, mmask; 142 int access, ret, pteh, ptev, pp; 143 144 ret = -1; 145 /* Check validity and table match */ 146 ptev = pte_is_valid(pte0); 147 pteh = (pte0 >> 6) & 1; 148 if (ptev && h == pteh) { 149 /* Check vsid & api */ 150 ptem = pte0 & PTE_PTEM_MASK; 151 mmask = PTE_CHECK_MASK; 152 pp = pte1 & 0x00000003; 153 if (ptem == ctx->ptem) { 154 if (ctx->raddr != (hwaddr)-1ULL) { 155 /* all matches should have equal RPN, WIMG & PP */ 156 if ((ctx->raddr & mmask) != (pte1 & mmask)) { 157 qemu_log_mask(CPU_LOG_MMU, "Bad RPN/WIMG/PP\n"); 158 return -3; 159 } 160 } 161 /* Compute access rights */ 162 access = pp_check(ctx->key, pp, ctx->nx); 163 /* Keep the matching PTE information */ 164 ctx->raddr = pte1; 165 ctx->prot = access; 166 ret = check_prot(ctx->prot, access_type); 167 if (ret == 0) { 168 /* Access granted */ 169 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); 170 } else { 171 /* Access right violation */ 172 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); 173 } 174 } 175 } 176 177 return ret; 178 } 179 180 static int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p, 181 int ret, MMUAccessType access_type) 182 { 183 int store = 0; 184 185 /* Update page flags */ 186 if (!(*pte1p & 0x00000100)) { 187 /* Update accessed flag */ 188 *pte1p |= 0x00000100; 189 store = 1; 190 } 191 if (!(*pte1p & 0x00000080)) { 192 if (access_type == MMU_DATA_STORE && ret == 0) { 193 /* Update changed flag */ 194 *pte1p |= 0x00000080; 195 store = 1; 196 } else { 197 /* Force page fault for first write access */ 198 ctx->prot &= ~PAGE_WRITE; 199 } 200 } 201 202 return store; 203 } 204 205 /* Software driven TLB helpers */ 206 static inline int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr, 207 int way, int is_code) 208 { 209 int nr; 210 211 /* Select TLB num in a way from address */ 212 nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1); 213 /* Select TLB way */ 214 nr += env->tlb_per_way * way; 215 /* 6xx have separate TLBs for instructions and data */ 216 if (is_code && env->id_tlbs == 1) { 217 nr += env->nb_tlb; 218 } 219 220 return nr; 221 } 222 223 static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env) 224 { 225 ppc6xx_tlb_t *tlb; 226 int nr, max; 227 228 /* LOG_SWTLB("Invalidate all TLBs\n"); */ 229 /* Invalidate all defined software TLB */ 230 max = env->nb_tlb; 231 if (env->id_tlbs == 1) { 232 max *= 2; 233 } 234 for (nr = 0; nr < max; nr++) { 235 tlb = &env->tlb.tlb6[nr]; 236 pte_invalidate(&tlb->pte0); 237 } 238 tlb_flush(env_cpu(env)); 239 } 240 241 static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env, 242 target_ulong eaddr, 243 int is_code, int match_epn) 244 { 245 #if !defined(FLUSH_ALL_TLBS) 246 CPUState *cs = env_cpu(env); 247 ppc6xx_tlb_t *tlb; 248 int way, nr; 249 250 /* Invalidate ITLB + DTLB, all ways */ 251 for (way = 0; way < env->nb_ways; way++) { 252 nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code); 253 tlb = &env->tlb.tlb6[nr]; 254 if (pte_is_valid(tlb->pte0) && (match_epn == 0 || eaddr == tlb->EPN)) { 255 LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx "\n", nr, 256 env->nb_tlb, eaddr); 257 pte_invalidate(&tlb->pte0); 258 tlb_flush_page(cs, tlb->EPN); 259 } 260 } 261 #else 262 /* XXX: PowerPC specification say this is valid as well */ 263 ppc6xx_tlb_invalidate_all(env); 264 #endif 265 } 266 267 static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState *env, 268 target_ulong eaddr, int is_code) 269 { 270 ppc6xx_tlb_invalidate_virt2(env, eaddr, is_code, 0); 271 } 272 273 #ifdef CONFIG_TCG 274 static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way, 275 int is_code, target_ulong pte0, target_ulong pte1) 276 { 277 ppc6xx_tlb_t *tlb; 278 int nr; 279 280 nr = ppc6xx_tlb_getnum(env, EPN, way, is_code); 281 tlb = &env->tlb.tlb6[nr]; 282 LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx 283 " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb, EPN, pte0, pte1); 284 /* Invalidate any pending reference in QEMU for this virtual address */ 285 ppc6xx_tlb_invalidate_virt2(env, EPN, is_code, 1); 286 tlb->pte0 = pte0; 287 tlb->pte1 = pte1; 288 tlb->EPN = EPN; 289 /* Store last way for LRU mechanism */ 290 env->last_way = way; 291 } 292 #endif 293 294 static int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx, 295 target_ulong eaddr, MMUAccessType access_type) 296 { 297 ppc6xx_tlb_t *tlb; 298 int nr, best, way; 299 int ret; 300 301 best = -1; 302 ret = -1; /* No TLB found */ 303 for (way = 0; way < env->nb_ways; way++) { 304 nr = ppc6xx_tlb_getnum(env, eaddr, way, access_type == MMU_INST_FETCH); 305 tlb = &env->tlb.tlb6[nr]; 306 /* This test "emulates" the PTE index match for hardware TLBs */ 307 if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) { 308 LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx " " TARGET_FMT_lx 309 "] <> " TARGET_FMT_lx "\n", nr, env->nb_tlb, 310 pte_is_valid(tlb->pte0) ? "valid" : "inval", 311 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr); 312 continue; 313 } 314 LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx " <> " TARGET_FMT_lx " " 315 TARGET_FMT_lx " %c %c\n", nr, env->nb_tlb, 316 pte_is_valid(tlb->pte0) ? "valid" : "inval", 317 tlb->EPN, eaddr, tlb->pte1, 318 access_type == MMU_DATA_STORE ? 'S' : 'L', 319 access_type == MMU_INST_FETCH ? 'I' : 'D'); 320 switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1, 321 0, access_type)) { 322 case -3: 323 /* TLB inconsistency */ 324 return -1; 325 case -2: 326 /* Access violation */ 327 ret = -2; 328 best = nr; 329 break; 330 case -1: 331 default: 332 /* No match */ 333 break; 334 case 0: 335 /* access granted */ 336 /* 337 * XXX: we should go on looping to check all TLBs 338 * consistency but we can speed-up the whole thing as 339 * the result would be undefined if TLBs are not 340 * consistent. 341 */ 342 ret = 0; 343 best = nr; 344 goto done; 345 } 346 } 347 if (best != -1) { 348 done: 349 LOG_SWTLB("found TLB at addr " TARGET_FMT_plx " prot=%01x ret=%d\n", 350 ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret); 351 /* Update page flags */ 352 pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, access_type); 353 } 354 355 return ret; 356 } 357 358 /* Perform BAT hit & translation */ 359 static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp, 360 int *validp, int *protp, target_ulong *BATu, 361 target_ulong *BATl) 362 { 363 target_ulong bl; 364 int pp, valid, prot; 365 366 bl = (*BATu & 0x00001FFC) << 15; 367 valid = 0; 368 prot = 0; 369 if (((msr_pr == 0) && (*BATu & 0x00000002)) || 370 ((msr_pr != 0) && (*BATu & 0x00000001))) { 371 valid = 1; 372 pp = *BATl & 0x00000003; 373 if (pp != 0) { 374 prot = PAGE_READ | PAGE_EXEC; 375 if (pp == 0x2) { 376 prot |= PAGE_WRITE; 377 } 378 } 379 } 380 *blp = bl; 381 *validp = valid; 382 *protp = prot; 383 } 384 385 static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, 386 target_ulong virtual, MMUAccessType access_type) 387 { 388 target_ulong *BATlt, *BATut, *BATu, *BATl; 389 target_ulong BEPIl, BEPIu, bl; 390 int i, valid, prot; 391 int ret = -1; 392 bool ifetch = access_type == MMU_INST_FETCH; 393 394 LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__, 395 ifetch ? 'I' : 'D', virtual); 396 if (ifetch) { 397 BATlt = env->IBAT[1]; 398 BATut = env->IBAT[0]; 399 } else { 400 BATlt = env->DBAT[1]; 401 BATut = env->DBAT[0]; 402 } 403 for (i = 0; i < env->nb_BATs; i++) { 404 BATu = &BATut[i]; 405 BATl = &BATlt[i]; 406 BEPIu = *BATu & 0xF0000000; 407 BEPIl = *BATu & 0x0FFE0000; 408 bat_size_prot(env, &bl, &valid, &prot, BATu, BATl); 409 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx 410 " BATl " TARGET_FMT_lx "\n", __func__, 411 ifetch ? 'I' : 'D', i, virtual, *BATu, *BATl); 412 if ((virtual & 0xF0000000) == BEPIu && 413 ((virtual & 0x0FFE0000) & ~bl) == BEPIl) { 414 /* BAT matches */ 415 if (valid != 0) { 416 /* Get physical address */ 417 ctx->raddr = (*BATl & 0xF0000000) | 418 ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) | 419 (virtual & 0x0001F000); 420 /* Compute access rights */ 421 ctx->prot = prot; 422 ret = check_prot(ctx->prot, access_type); 423 if (ret == 0) { 424 LOG_BATS("BAT %d match: r " TARGET_FMT_plx " prot=%c%c\n", 425 i, ctx->raddr, ctx->prot & PAGE_READ ? 'R' : '-', 426 ctx->prot & PAGE_WRITE ? 'W' : '-'); 427 } 428 break; 429 } 430 } 431 } 432 if (ret < 0) { 433 #if defined(DEBUG_BATS) 434 if (qemu_log_enabled()) { 435 LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", virtual); 436 for (i = 0; i < 4; i++) { 437 BATu = &BATut[i]; 438 BATl = &BATlt[i]; 439 BEPIu = *BATu & 0xF0000000; 440 BEPIl = *BATu & 0x0FFE0000; 441 bl = (*BATu & 0x00001FFC) << 15; 442 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx 443 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " 444 TARGET_FMT_lx " " TARGET_FMT_lx "\n", 445 __func__, ifetch ? 'I' : 'D', i, virtual, 446 *BATu, *BATl, BEPIu, BEPIl, bl); 447 } 448 } 449 #endif 450 } 451 /* No hit */ 452 return ret; 453 } 454 455 /* Perform segment based translation */ 456 static int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, 457 target_ulong eaddr, MMUAccessType access_type, 458 int type) 459 { 460 PowerPCCPU *cpu = env_archcpu(env); 461 hwaddr hash; 462 target_ulong vsid; 463 int ds, pr, target_page_bits; 464 int ret; 465 target_ulong sr, pgidx; 466 467 pr = msr_pr; 468 ctx->eaddr = eaddr; 469 470 sr = env->sr[eaddr >> 28]; 471 ctx->key = (((sr & 0x20000000) && (pr != 0)) || 472 ((sr & 0x40000000) && (pr == 0))) ? 1 : 0; 473 ds = sr & 0x80000000 ? 1 : 0; 474 ctx->nx = sr & 0x10000000 ? 1 : 0; 475 vsid = sr & 0x00FFFFFF; 476 target_page_bits = TARGET_PAGE_BITS; 477 qemu_log_mask(CPU_LOG_MMU, 478 "Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx 479 " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx 480 " ir=%d dr=%d pr=%d %d t=%d\n", 481 eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir, 482 (int)msr_dr, pr != 0 ? 1 : 0, access_type == MMU_DATA_STORE, type); 483 pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits; 484 hash = vsid ^ pgidx; 485 ctx->ptem = (vsid << 7) | (pgidx >> 10); 486 487 qemu_log_mask(CPU_LOG_MMU, 488 "pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n", 489 ctx->key, ds, ctx->nx, vsid); 490 ret = -1; 491 if (!ds) { 492 /* Check if instruction fetch is allowed, if needed */ 493 if (type != ACCESS_CODE || ctx->nx == 0) { 494 /* Page address translation */ 495 qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx 496 " htab_mask " TARGET_FMT_plx 497 " hash " TARGET_FMT_plx "\n", 498 ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash); 499 ctx->hash[0] = hash; 500 ctx->hash[1] = ~hash; 501 502 /* Initialize real address with an invalid value */ 503 ctx->raddr = (hwaddr)-1ULL; 504 /* Software TLB search */ 505 ret = ppc6xx_tlb_check(env, ctx, eaddr, access_type); 506 #if defined(DUMP_PAGE_TABLES) 507 if (qemu_loglevel_mask(CPU_LOG_MMU)) { 508 CPUState *cs = env_cpu(env); 509 hwaddr curaddr; 510 uint32_t a0, a1, a2, a3; 511 512 qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx 513 "\n", ppc_hash32_hpt_base(cpu), 514 ppc_hash32_hpt_mask(env) + 0x80); 515 for (curaddr = ppc_hash32_hpt_base(cpu); 516 curaddr < (ppc_hash32_hpt_base(cpu) 517 + ppc_hash32_hpt_mask(cpu) + 0x80); 518 curaddr += 16) { 519 a0 = ldl_phys(cs->as, curaddr); 520 a1 = ldl_phys(cs->as, curaddr + 4); 521 a2 = ldl_phys(cs->as, curaddr + 8); 522 a3 = ldl_phys(cs->as, curaddr + 12); 523 if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) { 524 qemu_log(TARGET_FMT_plx ": %08x %08x %08x %08x\n", 525 curaddr, a0, a1, a2, a3); 526 } 527 } 528 } 529 #endif 530 } else { 531 qemu_log_mask(CPU_LOG_MMU, "No access allowed\n"); 532 ret = -3; 533 } 534 } else { 535 target_ulong sr; 536 537 qemu_log_mask(CPU_LOG_MMU, "direct store...\n"); 538 /* Direct-store segment : absolutely *BUGGY* for now */ 539 540 /* 541 * Direct-store implies a 32-bit MMU. 542 * Check the Segment Register's bus unit ID (BUID). 543 */ 544 sr = env->sr[eaddr >> 28]; 545 if ((sr & 0x1FF00000) >> 20 == 0x07f) { 546 /* 547 * Memory-forced I/O controller interface access 548 * 549 * If T=1 and BUID=x'07F', the 601 performs a memory 550 * access to SR[28-31] LA[4-31], bypassing all protection 551 * mechanisms. 552 */ 553 ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF); 554 ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 555 return 0; 556 } 557 558 switch (type) { 559 case ACCESS_INT: 560 /* Integer load/store : only access allowed */ 561 break; 562 case ACCESS_CODE: 563 /* No code fetch is allowed in direct-store areas */ 564 return -4; 565 case ACCESS_FLOAT: 566 /* Floating point load/store */ 567 return -4; 568 case ACCESS_RES: 569 /* lwarx, ldarx or srwcx. */ 570 return -4; 571 case ACCESS_CACHE: 572 /* 573 * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi 574 * 575 * Should make the instruction do no-op. As it already do 576 * no-op, it's quite easy :-) 577 */ 578 ctx->raddr = eaddr; 579 return 0; 580 case ACCESS_EXT: 581 /* eciwx or ecowx */ 582 return -4; 583 default: 584 qemu_log_mask(CPU_LOG_MMU, "ERROR: instruction should not need " 585 "address translation\n"); 586 return -4; 587 } 588 if ((access_type == MMU_DATA_STORE || ctx->key != 1) && 589 (access_type == MMU_DATA_LOAD || ctx->key != 0)) { 590 ctx->raddr = eaddr; 591 ret = 2; 592 } else { 593 ret = -2; 594 } 595 } 596 597 return ret; 598 } 599 600 /* Generic TLB check function for embedded PowerPC implementations */ 601 static int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb, 602 hwaddr *raddrp, 603 target_ulong address, uint32_t pid, int ext, 604 int i) 605 { 606 target_ulong mask; 607 608 /* Check valid flag */ 609 if (!(tlb->prot & PAGE_VALID)) { 610 return -1; 611 } 612 mask = ~(tlb->size - 1); 613 LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx " PID %u <=> " TARGET_FMT_lx 614 " " TARGET_FMT_lx " %u %x\n", __func__, i, address, pid, tlb->EPN, 615 mask, (uint32_t)tlb->PID, tlb->prot); 616 /* Check PID */ 617 if (tlb->PID != 0 && tlb->PID != pid) { 618 return -1; 619 } 620 /* Check effective address */ 621 if ((address & mask) != tlb->EPN) { 622 return -1; 623 } 624 *raddrp = (tlb->RPN & mask) | (address & ~mask); 625 if (ext) { 626 /* Extend the physical address to 36 bits */ 627 *raddrp |= (uint64_t)(tlb->RPN & 0xF) << 32; 628 } 629 630 return 0; 631 } 632 633 #ifdef CONFIG_TCG 634 /* Generic TLB search function for PowerPC embedded implementations */ 635 static int ppcemb_tlb_search(CPUPPCState *env, target_ulong address, 636 uint32_t pid) 637 { 638 ppcemb_tlb_t *tlb; 639 hwaddr raddr; 640 int i, ret; 641 642 /* Default return value is no match */ 643 ret = -1; 644 for (i = 0; i < env->nb_tlb; i++) { 645 tlb = &env->tlb.tlbe[i]; 646 if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, 0, i) == 0) { 647 ret = i; 648 break; 649 } 650 } 651 652 return ret; 653 } 654 #endif 655 656 /* Helpers specific to PowerPC 40x implementations */ 657 static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env) 658 { 659 ppcemb_tlb_t *tlb; 660 int i; 661 662 for (i = 0; i < env->nb_tlb; i++) { 663 tlb = &env->tlb.tlbe[i]; 664 tlb->prot &= ~PAGE_VALID; 665 } 666 tlb_flush(env_cpu(env)); 667 } 668 669 static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 670 target_ulong address, 671 MMUAccessType access_type) 672 { 673 ppcemb_tlb_t *tlb; 674 hwaddr raddr; 675 int i, ret, zsel, zpr, pr; 676 677 ret = -1; 678 raddr = (hwaddr)-1ULL; 679 pr = msr_pr; 680 for (i = 0; i < env->nb_tlb; i++) { 681 tlb = &env->tlb.tlbe[i]; 682 if (ppcemb_tlb_check(env, tlb, &raddr, address, 683 env->spr[SPR_40x_PID], 0, i) < 0) { 684 continue; 685 } 686 zsel = (tlb->attr >> 4) & 0xF; 687 zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3; 688 LOG_SWTLB("%s: TLB %d zsel %d zpr %d ty %d attr %08x\n", 689 __func__, i, zsel, zpr, access_type, tlb->attr); 690 /* Check execute enable bit */ 691 switch (zpr) { 692 case 0x2: 693 if (pr != 0) { 694 goto check_perms; 695 } 696 /* fall through */ 697 case 0x3: 698 /* All accesses granted */ 699 ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 700 ret = 0; 701 break; 702 case 0x0: 703 if (pr != 0) { 704 /* Raise Zone protection fault. */ 705 env->spr[SPR_40x_ESR] = 1 << 22; 706 ctx->prot = 0; 707 ret = -2; 708 break; 709 } 710 /* fall through */ 711 case 0x1: 712 check_perms: 713 /* Check from TLB entry */ 714 ctx->prot = tlb->prot; 715 ret = check_prot(ctx->prot, access_type); 716 if (ret == -2) { 717 env->spr[SPR_40x_ESR] = 0; 718 } 719 break; 720 } 721 if (ret >= 0) { 722 ctx->raddr = raddr; 723 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx 724 " %d %d\n", __func__, address, ctx->raddr, ctx->prot, 725 ret); 726 return 0; 727 } 728 } 729 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx 730 " %d %d\n", __func__, address, raddr, ctx->prot, ret); 731 732 return ret; 733 } 734 735 void store_40x_sler(CPUPPCState *env, uint32_t val) 736 { 737 /* XXX: TO BE FIXED */ 738 if (val != 0x00000000) { 739 cpu_abort(env_cpu(env), 740 "Little-endian regions are not supported by now\n"); 741 } 742 env->spr[SPR_405_SLER] = val; 743 } 744 745 static int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb, 746 hwaddr *raddr, int *prot, target_ulong address, 747 MMUAccessType access_type, int i) 748 { 749 int prot2; 750 751 if (ppcemb_tlb_check(env, tlb, raddr, address, 752 env->spr[SPR_BOOKE_PID], 753 !env->nb_pids, i) >= 0) { 754 goto found_tlb; 755 } 756 757 if (env->spr[SPR_BOOKE_PID1] && 758 ppcemb_tlb_check(env, tlb, raddr, address, 759 env->spr[SPR_BOOKE_PID1], 0, i) >= 0) { 760 goto found_tlb; 761 } 762 763 if (env->spr[SPR_BOOKE_PID2] && 764 ppcemb_tlb_check(env, tlb, raddr, address, 765 env->spr[SPR_BOOKE_PID2], 0, i) >= 0) { 766 goto found_tlb; 767 } 768 769 LOG_SWTLB("%s: TLB entry not found\n", __func__); 770 return -1; 771 772 found_tlb: 773 774 if (msr_pr != 0) { 775 prot2 = tlb->prot & 0xF; 776 } else { 777 prot2 = (tlb->prot >> 4) & 0xF; 778 } 779 780 /* Check the address space */ 781 if ((access_type == MMU_INST_FETCH ? msr_ir : msr_dr) != (tlb->attr & 1)) { 782 LOG_SWTLB("%s: AS doesn't match\n", __func__); 783 return -1; 784 } 785 786 *prot = prot2; 787 if (prot2 & prot_for_access_type(access_type)) { 788 LOG_SWTLB("%s: good TLB!\n", __func__); 789 return 0; 790 } 791 792 LOG_SWTLB("%s: no prot match: %x\n", __func__, prot2); 793 return access_type == MMU_INST_FETCH ? -3 : -2; 794 } 795 796 static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 797 target_ulong address, 798 MMUAccessType access_type) 799 { 800 ppcemb_tlb_t *tlb; 801 hwaddr raddr; 802 int i, ret; 803 804 ret = -1; 805 raddr = (hwaddr)-1ULL; 806 for (i = 0; i < env->nb_tlb; i++) { 807 tlb = &env->tlb.tlbe[i]; 808 ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address, 809 access_type, i); 810 if (ret != -1) { 811 break; 812 } 813 } 814 815 if (ret >= 0) { 816 ctx->raddr = raddr; 817 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx 818 " %d %d\n", __func__, address, ctx->raddr, ctx->prot, 819 ret); 820 } else { 821 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx 822 " %d %d\n", __func__, address, raddr, ctx->prot, ret); 823 } 824 825 return ret; 826 } 827 828 static void booke206_flush_tlb(CPUPPCState *env, int flags, 829 const int check_iprot) 830 { 831 int tlb_size; 832 int i, j; 833 ppcmas_tlb_t *tlb = env->tlb.tlbm; 834 835 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 836 if (flags & (1 << i)) { 837 tlb_size = booke206_tlb_size(env, i); 838 for (j = 0; j < tlb_size; j++) { 839 if (!check_iprot || !(tlb[j].mas1 & MAS1_IPROT)) { 840 tlb[j].mas1 &= ~MAS1_VALID; 841 } 842 } 843 } 844 tlb += booke206_tlb_size(env, i); 845 } 846 847 tlb_flush(env_cpu(env)); 848 } 849 850 static hwaddr booke206_tlb_to_page_size(CPUPPCState *env, 851 ppcmas_tlb_t *tlb) 852 { 853 int tlbm_size; 854 855 tlbm_size = (tlb->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 856 857 return 1024ULL << tlbm_size; 858 } 859 860 /* TLB check function for MAS based SoftTLBs */ 861 static int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb, 862 hwaddr *raddrp, target_ulong address, 863 uint32_t pid) 864 { 865 hwaddr mask; 866 uint32_t tlb_pid; 867 868 if (!msr_cm) { 869 /* In 32bit mode we can only address 32bit EAs */ 870 address = (uint32_t)address; 871 } 872 873 /* Check valid flag */ 874 if (!(tlb->mas1 & MAS1_VALID)) { 875 return -1; 876 } 877 878 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); 879 LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx " PID=0x%x MAS1=0x%x MAS2=0x%" 880 PRIx64 " mask=0x%" HWADDR_PRIx " MAS7_3=0x%" PRIx64 " MAS8=0x%" 881 PRIx32 "\n", __func__, address, pid, tlb->mas1, tlb->mas2, mask, 882 tlb->mas7_3, tlb->mas8); 883 884 /* Check PID */ 885 tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT; 886 if (tlb_pid != 0 && tlb_pid != pid) { 887 return -1; 888 } 889 890 /* Check effective address */ 891 if ((address & mask) != (tlb->mas2 & MAS2_EPN_MASK)) { 892 return -1; 893 } 894 895 if (raddrp) { 896 *raddrp = (tlb->mas7_3 & mask) | (address & ~mask); 897 } 898 899 return 0; 900 } 901 902 static bool is_epid_mmu(int mmu_idx) 903 { 904 return mmu_idx == PPC_TLB_EPID_STORE || mmu_idx == PPC_TLB_EPID_LOAD; 905 } 906 907 static uint32_t mmubooke206_esr(int mmu_idx, MMUAccessType access_type) 908 { 909 uint32_t esr = 0; 910 if (access_type == MMU_DATA_STORE) { 911 esr |= ESR_ST; 912 } 913 if (is_epid_mmu(mmu_idx)) { 914 esr |= ESR_EPID; 915 } 916 return esr; 917 } 918 919 /* 920 * Get EPID register given the mmu_idx. If this is regular load, 921 * construct the EPID access bits from current processor state 922 * 923 * Get the effective AS and PR bits and the PID. The PID is returned 924 * only if EPID load is requested, otherwise the caller must detect 925 * the correct EPID. Return true if valid EPID is returned. 926 */ 927 static bool mmubooke206_get_as(CPUPPCState *env, 928 int mmu_idx, uint32_t *epid_out, 929 bool *as_out, bool *pr_out) 930 { 931 if (is_epid_mmu(mmu_idx)) { 932 uint32_t epidr; 933 if (mmu_idx == PPC_TLB_EPID_STORE) { 934 epidr = env->spr[SPR_BOOKE_EPSC]; 935 } else { 936 epidr = env->spr[SPR_BOOKE_EPLC]; 937 } 938 *epid_out = (epidr & EPID_EPID) >> EPID_EPID_SHIFT; 939 *as_out = !!(epidr & EPID_EAS); 940 *pr_out = !!(epidr & EPID_EPR); 941 return true; 942 } else { 943 *as_out = msr_ds; 944 *pr_out = msr_pr; 945 return false; 946 } 947 } 948 949 /* Check if the tlb found by hashing really matches */ 950 static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb, 951 hwaddr *raddr, int *prot, 952 target_ulong address, 953 MMUAccessType access_type, int mmu_idx) 954 { 955 int prot2 = 0; 956 uint32_t epid; 957 bool as, pr; 958 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); 959 960 if (!use_epid) { 961 if (ppcmas_tlb_check(env, tlb, raddr, address, 962 env->spr[SPR_BOOKE_PID]) >= 0) { 963 goto found_tlb; 964 } 965 966 if (env->spr[SPR_BOOKE_PID1] && 967 ppcmas_tlb_check(env, tlb, raddr, address, 968 env->spr[SPR_BOOKE_PID1]) >= 0) { 969 goto found_tlb; 970 } 971 972 if (env->spr[SPR_BOOKE_PID2] && 973 ppcmas_tlb_check(env, tlb, raddr, address, 974 env->spr[SPR_BOOKE_PID2]) >= 0) { 975 goto found_tlb; 976 } 977 } else { 978 if (ppcmas_tlb_check(env, tlb, raddr, address, epid) >= 0) { 979 goto found_tlb; 980 } 981 } 982 983 LOG_SWTLB("%s: TLB entry not found\n", __func__); 984 return -1; 985 986 found_tlb: 987 988 if (pr) { 989 if (tlb->mas7_3 & MAS3_UR) { 990 prot2 |= PAGE_READ; 991 } 992 if (tlb->mas7_3 & MAS3_UW) { 993 prot2 |= PAGE_WRITE; 994 } 995 if (tlb->mas7_3 & MAS3_UX) { 996 prot2 |= PAGE_EXEC; 997 } 998 } else { 999 if (tlb->mas7_3 & MAS3_SR) { 1000 prot2 |= PAGE_READ; 1001 } 1002 if (tlb->mas7_3 & MAS3_SW) { 1003 prot2 |= PAGE_WRITE; 1004 } 1005 if (tlb->mas7_3 & MAS3_SX) { 1006 prot2 |= PAGE_EXEC; 1007 } 1008 } 1009 1010 /* Check the address space and permissions */ 1011 if (access_type == MMU_INST_FETCH) { 1012 /* There is no way to fetch code using epid load */ 1013 assert(!use_epid); 1014 as = msr_ir; 1015 } 1016 1017 if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { 1018 LOG_SWTLB("%s: AS doesn't match\n", __func__); 1019 return -1; 1020 } 1021 1022 *prot = prot2; 1023 if (prot2 & prot_for_access_type(access_type)) { 1024 LOG_SWTLB("%s: good TLB!\n", __func__); 1025 return 0; 1026 } 1027 1028 LOG_SWTLB("%s: no prot match: %x\n", __func__, prot2); 1029 return access_type == MMU_INST_FETCH ? -3 : -2; 1030 } 1031 1032 static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 1033 target_ulong address, 1034 MMUAccessType access_type, 1035 int mmu_idx) 1036 { 1037 ppcmas_tlb_t *tlb; 1038 hwaddr raddr; 1039 int i, j, ret; 1040 1041 ret = -1; 1042 raddr = (hwaddr)-1ULL; 1043 1044 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 1045 int ways = booke206_tlb_ways(env, i); 1046 1047 for (j = 0; j < ways; j++) { 1048 tlb = booke206_get_tlbm(env, i, address, j); 1049 if (!tlb) { 1050 continue; 1051 } 1052 ret = mmubooke206_check_tlb(env, tlb, &raddr, &ctx->prot, address, 1053 access_type, mmu_idx); 1054 if (ret != -1) { 1055 goto found_tlb; 1056 } 1057 } 1058 } 1059 1060 found_tlb: 1061 1062 if (ret >= 0) { 1063 ctx->raddr = raddr; 1064 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx 1065 " %d %d\n", __func__, address, ctx->raddr, ctx->prot, 1066 ret); 1067 } else { 1068 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx 1069 " %d %d\n", __func__, address, raddr, ctx->prot, ret); 1070 } 1071 1072 return ret; 1073 } 1074 1075 static const char *book3e_tsize_to_str[32] = { 1076 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", 1077 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", 1078 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G", 1079 "1T", "2T" 1080 }; 1081 1082 static void mmubooke_dump_mmu(CPUPPCState *env) 1083 { 1084 ppcemb_tlb_t *entry; 1085 int i; 1086 1087 if (kvm_enabled() && !env->kvm_sw_tlb) { 1088 qemu_printf("Cannot access KVM TLB\n"); 1089 return; 1090 } 1091 1092 qemu_printf("\nTLB:\n"); 1093 qemu_printf("Effective Physical Size PID Prot " 1094 "Attr\n"); 1095 1096 entry = &env->tlb.tlbe[0]; 1097 for (i = 0; i < env->nb_tlb; i++, entry++) { 1098 hwaddr ea, pa; 1099 target_ulong mask; 1100 uint64_t size = (uint64_t)entry->size; 1101 char size_buf[20]; 1102 1103 /* Check valid flag */ 1104 if (!(entry->prot & PAGE_VALID)) { 1105 continue; 1106 } 1107 1108 mask = ~(entry->size - 1); 1109 ea = entry->EPN & mask; 1110 pa = entry->RPN & mask; 1111 /* Extend the physical address to 36 bits */ 1112 pa |= (hwaddr)(entry->RPN & 0xF) << 32; 1113 if (size >= 1 * MiB) { 1114 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "M", size / MiB); 1115 } else { 1116 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "k", size / KiB); 1117 } 1118 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n", 1119 (uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID, 1120 entry->prot, entry->attr); 1121 } 1122 1123 } 1124 1125 static void mmubooke206_dump_one_tlb(CPUPPCState *env, int tlbn, int offset, 1126 int tlbsize) 1127 { 1128 ppcmas_tlb_t *entry; 1129 int i; 1130 1131 qemu_printf("\nTLB%d:\n", tlbn); 1132 qemu_printf("Effective Physical Size TID TS SRWX" 1133 " URWX WIMGE U0123\n"); 1134 1135 entry = &env->tlb.tlbm[offset]; 1136 for (i = 0; i < tlbsize; i++, entry++) { 1137 hwaddr ea, pa, size; 1138 int tsize; 1139 1140 if (!(entry->mas1 & MAS1_VALID)) { 1141 continue; 1142 } 1143 1144 tsize = (entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 1145 size = 1024ULL << tsize; 1146 ea = entry->mas2 & ~(size - 1); 1147 pa = entry->mas7_3 & ~(size - 1); 1148 1149 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c" 1150 "U%c%c%c %c%c%c%c%c U%c%c%c%c\n", 1151 (uint64_t)ea, (uint64_t)pa, 1152 book3e_tsize_to_str[tsize], 1153 (entry->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT, 1154 (entry->mas1 & MAS1_TS) >> MAS1_TS_SHIFT, 1155 entry->mas7_3 & MAS3_SR ? 'R' : '-', 1156 entry->mas7_3 & MAS3_SW ? 'W' : '-', 1157 entry->mas7_3 & MAS3_SX ? 'X' : '-', 1158 entry->mas7_3 & MAS3_UR ? 'R' : '-', 1159 entry->mas7_3 & MAS3_UW ? 'W' : '-', 1160 entry->mas7_3 & MAS3_UX ? 'X' : '-', 1161 entry->mas2 & MAS2_W ? 'W' : '-', 1162 entry->mas2 & MAS2_I ? 'I' : '-', 1163 entry->mas2 & MAS2_M ? 'M' : '-', 1164 entry->mas2 & MAS2_G ? 'G' : '-', 1165 entry->mas2 & MAS2_E ? 'E' : '-', 1166 entry->mas7_3 & MAS3_U0 ? '0' : '-', 1167 entry->mas7_3 & MAS3_U1 ? '1' : '-', 1168 entry->mas7_3 & MAS3_U2 ? '2' : '-', 1169 entry->mas7_3 & MAS3_U3 ? '3' : '-'); 1170 } 1171 } 1172 1173 static void mmubooke206_dump_mmu(CPUPPCState *env) 1174 { 1175 int offset = 0; 1176 int i; 1177 1178 if (kvm_enabled() && !env->kvm_sw_tlb) { 1179 qemu_printf("Cannot access KVM TLB\n"); 1180 return; 1181 } 1182 1183 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 1184 int size = booke206_tlb_size(env, i); 1185 1186 if (size == 0) { 1187 continue; 1188 } 1189 1190 mmubooke206_dump_one_tlb(env, i, offset, size); 1191 offset += size; 1192 } 1193 } 1194 1195 static void mmu6xx_dump_BATs(CPUPPCState *env, int type) 1196 { 1197 target_ulong *BATlt, *BATut, *BATu, *BATl; 1198 target_ulong BEPIl, BEPIu, bl; 1199 int i; 1200 1201 switch (type) { 1202 case ACCESS_CODE: 1203 BATlt = env->IBAT[1]; 1204 BATut = env->IBAT[0]; 1205 break; 1206 default: 1207 BATlt = env->DBAT[1]; 1208 BATut = env->DBAT[0]; 1209 break; 1210 } 1211 1212 for (i = 0; i < env->nb_BATs; i++) { 1213 BATu = &BATut[i]; 1214 BATl = &BATlt[i]; 1215 BEPIu = *BATu & 0xF0000000; 1216 BEPIl = *BATu & 0x0FFE0000; 1217 bl = (*BATu & 0x00001FFC) << 15; 1218 qemu_printf("%s BAT%d BATu " TARGET_FMT_lx 1219 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " 1220 TARGET_FMT_lx " " TARGET_FMT_lx "\n", 1221 type == ACCESS_CODE ? "code" : "data", i, 1222 *BATu, *BATl, BEPIu, BEPIl, bl); 1223 } 1224 } 1225 1226 static void mmu6xx_dump_mmu(CPUPPCState *env) 1227 { 1228 PowerPCCPU *cpu = env_archcpu(env); 1229 ppc6xx_tlb_t *tlb; 1230 target_ulong sr; 1231 int type, way, entry, i; 1232 1233 qemu_printf("HTAB base = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_base(cpu)); 1234 qemu_printf("HTAB mask = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_mask(cpu)); 1235 1236 qemu_printf("\nSegment registers:\n"); 1237 for (i = 0; i < 32; i++) { 1238 sr = env->sr[i]; 1239 if (sr & 0x80000000) { 1240 qemu_printf("%02d T=%d Ks=%d Kp=%d BUID=0x%03x " 1241 "CNTLR_SPEC=0x%05x\n", i, 1242 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, 1243 sr & 0x20000000 ? 1 : 0, (uint32_t)((sr >> 20) & 0x1FF), 1244 (uint32_t)(sr & 0xFFFFF)); 1245 } else { 1246 qemu_printf("%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i, 1247 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, 1248 sr & 0x20000000 ? 1 : 0, sr & 0x10000000 ? 1 : 0, 1249 (uint32_t)(sr & 0x00FFFFFF)); 1250 } 1251 } 1252 1253 qemu_printf("\nBATs:\n"); 1254 mmu6xx_dump_BATs(env, ACCESS_INT); 1255 mmu6xx_dump_BATs(env, ACCESS_CODE); 1256 1257 if (env->id_tlbs != 1) { 1258 qemu_printf("ERROR: 6xx MMU should have separated TLB" 1259 " for code and data\n"); 1260 } 1261 1262 qemu_printf("\nTLBs [EPN EPN + SIZE]\n"); 1263 1264 for (type = 0; type < 2; type++) { 1265 for (way = 0; way < env->nb_ways; way++) { 1266 for (entry = env->nb_tlb * type + env->tlb_per_way * way; 1267 entry < (env->nb_tlb * type + env->tlb_per_way * (way + 1)); 1268 entry++) { 1269 1270 tlb = &env->tlb.tlb6[entry]; 1271 qemu_printf("%s TLB %02d/%02d way:%d %s [" 1272 TARGET_FMT_lx " " TARGET_FMT_lx "]\n", 1273 type ? "code" : "data", entry % env->nb_tlb, 1274 env->nb_tlb, way, 1275 pte_is_valid(tlb->pte0) ? "valid" : "inval", 1276 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE); 1277 } 1278 } 1279 } 1280 } 1281 1282 void dump_mmu(CPUPPCState *env) 1283 { 1284 switch (env->mmu_model) { 1285 case POWERPC_MMU_BOOKE: 1286 mmubooke_dump_mmu(env); 1287 break; 1288 case POWERPC_MMU_BOOKE206: 1289 mmubooke206_dump_mmu(env); 1290 break; 1291 case POWERPC_MMU_SOFT_6xx: 1292 case POWERPC_MMU_SOFT_74xx: 1293 mmu6xx_dump_mmu(env); 1294 break; 1295 #if defined(TARGET_PPC64) 1296 case POWERPC_MMU_64B: 1297 case POWERPC_MMU_2_03: 1298 case POWERPC_MMU_2_06: 1299 case POWERPC_MMU_2_07: 1300 dump_slb(env_archcpu(env)); 1301 break; 1302 case POWERPC_MMU_3_00: 1303 if (ppc64_v3_radix(env_archcpu(env))) { 1304 qemu_log_mask(LOG_UNIMP, "%s: the PPC64 MMU is unsupported\n", 1305 __func__); 1306 } else { 1307 dump_slb(env_archcpu(env)); 1308 } 1309 break; 1310 #endif 1311 default: 1312 qemu_log_mask(LOG_UNIMP, "%s: unimplemented\n", __func__); 1313 } 1314 } 1315 1316 static int check_physical(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr, 1317 MMUAccessType access_type) 1318 { 1319 int in_plb, ret; 1320 1321 ctx->raddr = eaddr; 1322 ctx->prot = PAGE_READ | PAGE_EXEC; 1323 ret = 0; 1324 switch (env->mmu_model) { 1325 case POWERPC_MMU_SOFT_6xx: 1326 case POWERPC_MMU_SOFT_74xx: 1327 case POWERPC_MMU_SOFT_4xx: 1328 case POWERPC_MMU_REAL: 1329 case POWERPC_MMU_BOOKE: 1330 ctx->prot |= PAGE_WRITE; 1331 break; 1332 1333 case POWERPC_MMU_SOFT_4xx_Z: 1334 if (unlikely(msr_pe != 0)) { 1335 /* 1336 * 403 family add some particular protections, using 1337 * PBL/PBU registers for accesses with no translation. 1338 */ 1339 in_plb = 1340 /* Check PLB validity */ 1341 (env->pb[0] < env->pb[1] && 1342 /* and address in plb area */ 1343 eaddr >= env->pb[0] && eaddr < env->pb[1]) || 1344 (env->pb[2] < env->pb[3] && 1345 eaddr >= env->pb[2] && eaddr < env->pb[3]) ? 1 : 0; 1346 if (in_plb ^ msr_px) { 1347 /* Access in protected area */ 1348 if (access_type == MMU_DATA_STORE) { 1349 /* Access is not allowed */ 1350 ret = -2; 1351 } 1352 } else { 1353 /* Read-write access is allowed */ 1354 ctx->prot |= PAGE_WRITE; 1355 } 1356 } 1357 break; 1358 1359 default: 1360 /* Caller's checks mean we should never get here for other models */ 1361 abort(); 1362 return -1; 1363 } 1364 1365 return ret; 1366 } 1367 1368 static int get_physical_address_wtlb(CPUPPCState *env, mmu_ctx_t *ctx, 1369 target_ulong eaddr, 1370 MMUAccessType access_type, int type, 1371 int mmu_idx) 1372 { 1373 int ret = -1; 1374 bool real_mode = (type == ACCESS_CODE && msr_ir == 0) 1375 || (type != ACCESS_CODE && msr_dr == 0); 1376 1377 switch (env->mmu_model) { 1378 case POWERPC_MMU_SOFT_6xx: 1379 case POWERPC_MMU_SOFT_74xx: 1380 if (real_mode) { 1381 ret = check_physical(env, ctx, eaddr, access_type); 1382 } else { 1383 /* Try to find a BAT */ 1384 if (env->nb_BATs != 0) { 1385 ret = get_bat_6xx_tlb(env, ctx, eaddr, access_type); 1386 } 1387 if (ret < 0) { 1388 /* We didn't match any BAT entry or don't have BATs */ 1389 ret = get_segment_6xx_tlb(env, ctx, eaddr, access_type, type); 1390 } 1391 } 1392 break; 1393 1394 case POWERPC_MMU_SOFT_4xx: 1395 case POWERPC_MMU_SOFT_4xx_Z: 1396 if (real_mode) { 1397 ret = check_physical(env, ctx, eaddr, access_type); 1398 } else { 1399 ret = mmu40x_get_physical_address(env, ctx, eaddr, access_type); 1400 } 1401 break; 1402 case POWERPC_MMU_BOOKE: 1403 ret = mmubooke_get_physical_address(env, ctx, eaddr, access_type); 1404 break; 1405 case POWERPC_MMU_BOOKE206: 1406 ret = mmubooke206_get_physical_address(env, ctx, eaddr, access_type, 1407 mmu_idx); 1408 break; 1409 case POWERPC_MMU_MPC8xx: 1410 /* XXX: TODO */ 1411 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n"); 1412 break; 1413 case POWERPC_MMU_REAL: 1414 if (real_mode) { 1415 ret = check_physical(env, ctx, eaddr, access_type); 1416 } else { 1417 cpu_abort(env_cpu(env), 1418 "PowerPC in real mode do not do any translation\n"); 1419 } 1420 return -1; 1421 default: 1422 cpu_abort(env_cpu(env), "Unknown or invalid MMU model\n"); 1423 return -1; 1424 } 1425 1426 return ret; 1427 } 1428 1429 #ifdef CONFIG_TCG 1430 static int get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 1431 target_ulong eaddr, MMUAccessType access_type, 1432 int type) 1433 { 1434 return get_physical_address_wtlb(env, ctx, eaddr, access_type, type, 0); 1435 } 1436 #endif 1437 1438 hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 1439 { 1440 PowerPCCPU *cpu = POWERPC_CPU(cs); 1441 CPUPPCState *env = &cpu->env; 1442 mmu_ctx_t ctx; 1443 1444 switch (env->mmu_model) { 1445 #if defined(TARGET_PPC64) 1446 case POWERPC_MMU_64B: 1447 case POWERPC_MMU_2_03: 1448 case POWERPC_MMU_2_06: 1449 case POWERPC_MMU_2_07: 1450 return ppc_hash64_get_phys_page_debug(cpu, addr); 1451 case POWERPC_MMU_3_00: 1452 return ppc64_v3_get_phys_page_debug(cpu, addr); 1453 #endif 1454 1455 case POWERPC_MMU_32B: 1456 case POWERPC_MMU_601: 1457 return ppc_hash32_get_phys_page_debug(cpu, addr); 1458 1459 default: 1460 ; 1461 } 1462 1463 if (unlikely(get_physical_address(env, &ctx, addr, MMU_DATA_LOAD, 1464 ACCESS_INT) != 0)) { 1465 1466 /* 1467 * Some MMUs have separate TLBs for code and data. If we only 1468 * try an ACCESS_INT, we may not be able to read instructions 1469 * mapped by code TLBs, so we also try a ACCESS_CODE. 1470 */ 1471 if (unlikely(get_physical_address(env, &ctx, addr, MMU_INST_FETCH, 1472 ACCESS_CODE) != 0)) { 1473 return -1; 1474 } 1475 } 1476 1477 return ctx.raddr & TARGET_PAGE_MASK; 1478 } 1479 1480 static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address, 1481 MMUAccessType access_type, int mmu_idx) 1482 { 1483 uint32_t epid; 1484 bool as, pr; 1485 uint32_t missed_tid = 0; 1486 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); 1487 1488 if (access_type == MMU_INST_FETCH) { 1489 as = msr_ir; 1490 } 1491 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; 1492 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; 1493 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; 1494 env->spr[SPR_BOOKE_MAS3] = 0; 1495 env->spr[SPR_BOOKE_MAS6] = 0; 1496 env->spr[SPR_BOOKE_MAS7] = 0; 1497 1498 /* AS */ 1499 if (as) { 1500 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; 1501 env->spr[SPR_BOOKE_MAS6] |= MAS6_SAS; 1502 } 1503 1504 env->spr[SPR_BOOKE_MAS1] |= MAS1_VALID; 1505 env->spr[SPR_BOOKE_MAS2] |= address & MAS2_EPN_MASK; 1506 1507 if (!use_epid) { 1508 switch (env->spr[SPR_BOOKE_MAS4] & MAS4_TIDSELD_PIDZ) { 1509 case MAS4_TIDSELD_PID0: 1510 missed_tid = env->spr[SPR_BOOKE_PID]; 1511 break; 1512 case MAS4_TIDSELD_PID1: 1513 missed_tid = env->spr[SPR_BOOKE_PID1]; 1514 break; 1515 case MAS4_TIDSELD_PID2: 1516 missed_tid = env->spr[SPR_BOOKE_PID2]; 1517 break; 1518 } 1519 env->spr[SPR_BOOKE_MAS6] |= env->spr[SPR_BOOKE_PID] << 16; 1520 } else { 1521 missed_tid = epid; 1522 env->spr[SPR_BOOKE_MAS6] |= missed_tid << 16; 1523 } 1524 env->spr[SPR_BOOKE_MAS1] |= (missed_tid << MAS1_TID_SHIFT); 1525 1526 1527 /* next victim logic */ 1528 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; 1529 env->last_way++; 1530 env->last_way &= booke206_tlb_ways(env, 0) - 1; 1531 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; 1532 } 1533 1534 /* Perform address translation */ 1535 static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address, 1536 MMUAccessType access_type, int mmu_idx) 1537 { 1538 CPUState *cs = env_cpu(env); 1539 PowerPCCPU *cpu = POWERPC_CPU(cs); 1540 mmu_ctx_t ctx; 1541 int type; 1542 int ret = 0; 1543 1544 if (access_type == MMU_INST_FETCH) { 1545 /* code access */ 1546 type = ACCESS_CODE; 1547 } else { 1548 /* data access */ 1549 type = env->access_type; 1550 } 1551 ret = get_physical_address_wtlb(env, &ctx, address, access_type, 1552 type, mmu_idx); 1553 if (ret == 0) { 1554 tlb_set_page(cs, address & TARGET_PAGE_MASK, 1555 ctx.raddr & TARGET_PAGE_MASK, ctx.prot, 1556 mmu_idx, TARGET_PAGE_SIZE); 1557 ret = 0; 1558 } else if (ret < 0) { 1559 LOG_MMU_STATE(cs); 1560 if (type == ACCESS_CODE) { 1561 switch (ret) { 1562 case -1: 1563 /* No matches in page tables or TLB */ 1564 switch (env->mmu_model) { 1565 case POWERPC_MMU_SOFT_6xx: 1566 cs->exception_index = POWERPC_EXCP_IFTLB; 1567 env->error_code = 1 << 18; 1568 env->spr[SPR_IMISS] = address; 1569 env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem; 1570 goto tlb_miss; 1571 case POWERPC_MMU_SOFT_74xx: 1572 cs->exception_index = POWERPC_EXCP_IFTLB; 1573 goto tlb_miss_74xx; 1574 case POWERPC_MMU_SOFT_4xx: 1575 case POWERPC_MMU_SOFT_4xx_Z: 1576 cs->exception_index = POWERPC_EXCP_ITLB; 1577 env->error_code = 0; 1578 env->spr[SPR_40x_DEAR] = address; 1579 env->spr[SPR_40x_ESR] = 0x00000000; 1580 break; 1581 case POWERPC_MMU_BOOKE206: 1582 booke206_update_mas_tlb_miss(env, address, 2, mmu_idx); 1583 /* fall through */ 1584 case POWERPC_MMU_BOOKE: 1585 cs->exception_index = POWERPC_EXCP_ITLB; 1586 env->error_code = 0; 1587 env->spr[SPR_BOOKE_DEAR] = address; 1588 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, MMU_DATA_LOAD); 1589 return -1; 1590 case POWERPC_MMU_MPC8xx: 1591 /* XXX: TODO */ 1592 cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); 1593 break; 1594 case POWERPC_MMU_REAL: 1595 cpu_abort(cs, "PowerPC in real mode should never raise " 1596 "any MMU exceptions\n"); 1597 return -1; 1598 default: 1599 cpu_abort(cs, "Unknown or invalid MMU model\n"); 1600 return -1; 1601 } 1602 break; 1603 case -2: 1604 /* Access rights violation */ 1605 cs->exception_index = POWERPC_EXCP_ISI; 1606 env->error_code = 0x08000000; 1607 break; 1608 case -3: 1609 /* No execute protection violation */ 1610 if ((env->mmu_model == POWERPC_MMU_BOOKE) || 1611 (env->mmu_model == POWERPC_MMU_BOOKE206)) { 1612 env->spr[SPR_BOOKE_ESR] = 0x00000000; 1613 } 1614 cs->exception_index = POWERPC_EXCP_ISI; 1615 env->error_code = 0x10000000; 1616 break; 1617 case -4: 1618 /* Direct store exception */ 1619 /* No code fetch is allowed in direct-store areas */ 1620 cs->exception_index = POWERPC_EXCP_ISI; 1621 env->error_code = 0x10000000; 1622 break; 1623 } 1624 } else { 1625 switch (ret) { 1626 case -1: 1627 /* No matches in page tables or TLB */ 1628 switch (env->mmu_model) { 1629 case POWERPC_MMU_SOFT_6xx: 1630 if (access_type == MMU_DATA_STORE) { 1631 cs->exception_index = POWERPC_EXCP_DSTLB; 1632 env->error_code = 1 << 16; 1633 } else { 1634 cs->exception_index = POWERPC_EXCP_DLTLB; 1635 env->error_code = 0; 1636 } 1637 env->spr[SPR_DMISS] = address; 1638 env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem; 1639 tlb_miss: 1640 env->error_code |= ctx.key << 19; 1641 env->spr[SPR_HASH1] = ppc_hash32_hpt_base(cpu) + 1642 get_pteg_offset32(cpu, ctx.hash[0]); 1643 env->spr[SPR_HASH2] = ppc_hash32_hpt_base(cpu) + 1644 get_pteg_offset32(cpu, ctx.hash[1]); 1645 break; 1646 case POWERPC_MMU_SOFT_74xx: 1647 if (access_type == MMU_DATA_STORE) { 1648 cs->exception_index = POWERPC_EXCP_DSTLB; 1649 } else { 1650 cs->exception_index = POWERPC_EXCP_DLTLB; 1651 } 1652 tlb_miss_74xx: 1653 /* Implement LRU algorithm */ 1654 env->error_code = ctx.key << 19; 1655 env->spr[SPR_TLBMISS] = (address & ~((target_ulong)0x3)) | 1656 ((env->last_way + 1) & (env->nb_ways - 1)); 1657 env->spr[SPR_PTEHI] = 0x80000000 | ctx.ptem; 1658 break; 1659 case POWERPC_MMU_SOFT_4xx: 1660 case POWERPC_MMU_SOFT_4xx_Z: 1661 cs->exception_index = POWERPC_EXCP_DTLB; 1662 env->error_code = 0; 1663 env->spr[SPR_40x_DEAR] = address; 1664 if (access_type == MMU_DATA_STORE) { 1665 env->spr[SPR_40x_ESR] = 0x00800000; 1666 } else { 1667 env->spr[SPR_40x_ESR] = 0x00000000; 1668 } 1669 break; 1670 case POWERPC_MMU_MPC8xx: 1671 /* XXX: TODO */ 1672 cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); 1673 break; 1674 case POWERPC_MMU_BOOKE206: 1675 booke206_update_mas_tlb_miss(env, address, access_type, mmu_idx); 1676 /* fall through */ 1677 case POWERPC_MMU_BOOKE: 1678 cs->exception_index = POWERPC_EXCP_DTLB; 1679 env->error_code = 0; 1680 env->spr[SPR_BOOKE_DEAR] = address; 1681 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); 1682 return -1; 1683 case POWERPC_MMU_REAL: 1684 cpu_abort(cs, "PowerPC in real mode should never raise " 1685 "any MMU exceptions\n"); 1686 return -1; 1687 default: 1688 cpu_abort(cs, "Unknown or invalid MMU model\n"); 1689 return -1; 1690 } 1691 break; 1692 case -2: 1693 /* Access rights violation */ 1694 cs->exception_index = POWERPC_EXCP_DSI; 1695 env->error_code = 0; 1696 if (env->mmu_model == POWERPC_MMU_SOFT_4xx 1697 || env->mmu_model == POWERPC_MMU_SOFT_4xx_Z) { 1698 env->spr[SPR_40x_DEAR] = address; 1699 if (access_type == MMU_DATA_STORE) { 1700 env->spr[SPR_40x_ESR] |= 0x00800000; 1701 } 1702 } else if ((env->mmu_model == POWERPC_MMU_BOOKE) || 1703 (env->mmu_model == POWERPC_MMU_BOOKE206)) { 1704 env->spr[SPR_BOOKE_DEAR] = address; 1705 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); 1706 } else { 1707 env->spr[SPR_DAR] = address; 1708 if (access_type == MMU_DATA_STORE) { 1709 env->spr[SPR_DSISR] = 0x0A000000; 1710 } else { 1711 env->spr[SPR_DSISR] = 0x08000000; 1712 } 1713 } 1714 break; 1715 case -4: 1716 /* Direct store exception */ 1717 switch (type) { 1718 case ACCESS_FLOAT: 1719 /* Floating point load/store */ 1720 cs->exception_index = POWERPC_EXCP_ALIGN; 1721 env->error_code = POWERPC_EXCP_ALIGN_FP; 1722 env->spr[SPR_DAR] = address; 1723 break; 1724 case ACCESS_RES: 1725 /* lwarx, ldarx or stwcx. */ 1726 cs->exception_index = POWERPC_EXCP_DSI; 1727 env->error_code = 0; 1728 env->spr[SPR_DAR] = address; 1729 if (access_type == MMU_DATA_STORE) { 1730 env->spr[SPR_DSISR] = 0x06000000; 1731 } else { 1732 env->spr[SPR_DSISR] = 0x04000000; 1733 } 1734 break; 1735 case ACCESS_EXT: 1736 /* eciwx or ecowx */ 1737 cs->exception_index = POWERPC_EXCP_DSI; 1738 env->error_code = 0; 1739 env->spr[SPR_DAR] = address; 1740 if (access_type == MMU_DATA_STORE) { 1741 env->spr[SPR_DSISR] = 0x06100000; 1742 } else { 1743 env->spr[SPR_DSISR] = 0x04100000; 1744 } 1745 break; 1746 default: 1747 printf("DSI: invalid exception (%d)\n", ret); 1748 cs->exception_index = POWERPC_EXCP_PROGRAM; 1749 env->error_code = 1750 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL; 1751 env->spr[SPR_DAR] = address; 1752 break; 1753 } 1754 break; 1755 } 1756 } 1757 ret = 1; 1758 } 1759 1760 return ret; 1761 } 1762 1763 #ifdef CONFIG_TCG 1764 /*****************************************************************************/ 1765 /* BATs management */ 1766 #if !defined(FLUSH_ALL_TLBS) 1767 static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu, 1768 target_ulong mask) 1769 { 1770 CPUState *cs = env_cpu(env); 1771 target_ulong base, end, page; 1772 1773 base = BATu & ~0x0001FFFF; 1774 end = base + mask + 0x00020000; 1775 if (((end - base) >> TARGET_PAGE_BITS) > 1024) { 1776 /* Flushing 1024 4K pages is slower than a complete flush */ 1777 LOG_BATS("Flush all BATs\n"); 1778 tlb_flush(cs); 1779 LOG_BATS("Flush done\n"); 1780 return; 1781 } 1782 LOG_BATS("Flush BAT from " TARGET_FMT_lx " to " TARGET_FMT_lx " (" 1783 TARGET_FMT_lx ")\n", base, end, mask); 1784 for (page = base; page != end; page += TARGET_PAGE_SIZE) { 1785 tlb_flush_page(cs, page); 1786 } 1787 LOG_BATS("Flush done\n"); 1788 } 1789 #endif 1790 1791 static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr, 1792 target_ulong value) 1793 { 1794 LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", ID, 1795 nr, ul == 0 ? 'u' : 'l', value, env->nip); 1796 } 1797 1798 void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value) 1799 { 1800 target_ulong mask; 1801 #if defined(FLUSH_ALL_TLBS) 1802 PowerPCCPU *cpu = env_archcpu(env); 1803 #endif 1804 1805 dump_store_bat(env, 'I', 0, nr, value); 1806 if (env->IBAT[0][nr] != value) { 1807 mask = (value << 15) & 0x0FFE0000UL; 1808 #if !defined(FLUSH_ALL_TLBS) 1809 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 1810 #endif 1811 /* 1812 * When storing valid upper BAT, mask BEPI and BRPN and 1813 * invalidate all TLBs covered by this BAT 1814 */ 1815 mask = (value << 15) & 0x0FFE0000UL; 1816 env->IBAT[0][nr] = (value & 0x00001FFFUL) | 1817 (value & ~0x0001FFFFUL & ~mask); 1818 env->IBAT[1][nr] = (env->IBAT[1][nr] & 0x0000007B) | 1819 (env->IBAT[1][nr] & ~0x0001FFFF & ~mask); 1820 #if !defined(FLUSH_ALL_TLBS) 1821 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 1822 #else 1823 tlb_flush(env_cpu(env)); 1824 #endif 1825 } 1826 } 1827 1828 void helper_store_ibatl(CPUPPCState *env, uint32_t nr, target_ulong value) 1829 { 1830 dump_store_bat(env, 'I', 1, nr, value); 1831 env->IBAT[1][nr] = value; 1832 } 1833 1834 void helper_store_dbatu(CPUPPCState *env, uint32_t nr, target_ulong value) 1835 { 1836 target_ulong mask; 1837 #if defined(FLUSH_ALL_TLBS) 1838 PowerPCCPU *cpu = env_archcpu(env); 1839 #endif 1840 1841 dump_store_bat(env, 'D', 0, nr, value); 1842 if (env->DBAT[0][nr] != value) { 1843 /* 1844 * When storing valid upper BAT, mask BEPI and BRPN and 1845 * invalidate all TLBs covered by this BAT 1846 */ 1847 mask = (value << 15) & 0x0FFE0000UL; 1848 #if !defined(FLUSH_ALL_TLBS) 1849 do_invalidate_BAT(env, env->DBAT[0][nr], mask); 1850 #endif 1851 mask = (value << 15) & 0x0FFE0000UL; 1852 env->DBAT[0][nr] = (value & 0x00001FFFUL) | 1853 (value & ~0x0001FFFFUL & ~mask); 1854 env->DBAT[1][nr] = (env->DBAT[1][nr] & 0x0000007B) | 1855 (env->DBAT[1][nr] & ~0x0001FFFF & ~mask); 1856 #if !defined(FLUSH_ALL_TLBS) 1857 do_invalidate_BAT(env, env->DBAT[0][nr], mask); 1858 #else 1859 tlb_flush(env_cpu(env)); 1860 #endif 1861 } 1862 } 1863 1864 void helper_store_dbatl(CPUPPCState *env, uint32_t nr, target_ulong value) 1865 { 1866 dump_store_bat(env, 'D', 1, nr, value); 1867 env->DBAT[1][nr] = value; 1868 } 1869 1870 void helper_store_601_batu(CPUPPCState *env, uint32_t nr, target_ulong value) 1871 { 1872 target_ulong mask; 1873 #if defined(FLUSH_ALL_TLBS) 1874 PowerPCCPU *cpu = env_archcpu(env); 1875 int do_inval; 1876 #endif 1877 1878 dump_store_bat(env, 'I', 0, nr, value); 1879 if (env->IBAT[0][nr] != value) { 1880 #if defined(FLUSH_ALL_TLBS) 1881 do_inval = 0; 1882 #endif 1883 mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL; 1884 if (env->IBAT[1][nr] & 0x40) { 1885 /* Invalidate BAT only if it is valid */ 1886 #if !defined(FLUSH_ALL_TLBS) 1887 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 1888 #else 1889 do_inval = 1; 1890 #endif 1891 } 1892 /* 1893 * When storing valid upper BAT, mask BEPI and BRPN and 1894 * invalidate all TLBs covered by this BAT 1895 */ 1896 env->IBAT[0][nr] = (value & 0x00001FFFUL) | 1897 (value & ~0x0001FFFFUL & ~mask); 1898 env->DBAT[0][nr] = env->IBAT[0][nr]; 1899 if (env->IBAT[1][nr] & 0x40) { 1900 #if !defined(FLUSH_ALL_TLBS) 1901 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 1902 #else 1903 do_inval = 1; 1904 #endif 1905 } 1906 #if defined(FLUSH_ALL_TLBS) 1907 if (do_inval) { 1908 tlb_flush(env_cpu(env)); 1909 } 1910 #endif 1911 } 1912 } 1913 1914 void helper_store_601_batl(CPUPPCState *env, uint32_t nr, target_ulong value) 1915 { 1916 #if !defined(FLUSH_ALL_TLBS) 1917 target_ulong mask; 1918 #else 1919 PowerPCCPU *cpu = env_archcpu(env); 1920 int do_inval; 1921 #endif 1922 1923 dump_store_bat(env, 'I', 1, nr, value); 1924 if (env->IBAT[1][nr] != value) { 1925 #if defined(FLUSH_ALL_TLBS) 1926 do_inval = 0; 1927 #endif 1928 if (env->IBAT[1][nr] & 0x40) { 1929 #if !defined(FLUSH_ALL_TLBS) 1930 mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL; 1931 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 1932 #else 1933 do_inval = 1; 1934 #endif 1935 } 1936 if (value & 0x40) { 1937 #if !defined(FLUSH_ALL_TLBS) 1938 mask = (value << 17) & 0x0FFE0000UL; 1939 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 1940 #else 1941 do_inval = 1; 1942 #endif 1943 } 1944 env->IBAT[1][nr] = value; 1945 env->DBAT[1][nr] = value; 1946 #if defined(FLUSH_ALL_TLBS) 1947 if (do_inval) { 1948 tlb_flush(env_cpu(env)); 1949 } 1950 #endif 1951 } 1952 } 1953 #endif 1954 1955 /*****************************************************************************/ 1956 /* TLB management */ 1957 void ppc_tlb_invalidate_all(CPUPPCState *env) 1958 { 1959 #if defined(TARGET_PPC64) 1960 if (mmu_is_64bit(env->mmu_model)) { 1961 env->tlb_need_flush = 0; 1962 tlb_flush(env_cpu(env)); 1963 } else 1964 #endif /* defined(TARGET_PPC64) */ 1965 switch (env->mmu_model) { 1966 case POWERPC_MMU_SOFT_6xx: 1967 case POWERPC_MMU_SOFT_74xx: 1968 ppc6xx_tlb_invalidate_all(env); 1969 break; 1970 case POWERPC_MMU_SOFT_4xx: 1971 case POWERPC_MMU_SOFT_4xx_Z: 1972 ppc4xx_tlb_invalidate_all(env); 1973 break; 1974 case POWERPC_MMU_REAL: 1975 cpu_abort(env_cpu(env), "No TLB for PowerPC 4xx in real mode\n"); 1976 break; 1977 case POWERPC_MMU_MPC8xx: 1978 /* XXX: TODO */ 1979 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n"); 1980 break; 1981 case POWERPC_MMU_BOOKE: 1982 tlb_flush(env_cpu(env)); 1983 break; 1984 case POWERPC_MMU_BOOKE206: 1985 booke206_flush_tlb(env, -1, 0); 1986 break; 1987 case POWERPC_MMU_32B: 1988 case POWERPC_MMU_601: 1989 env->tlb_need_flush = 0; 1990 tlb_flush(env_cpu(env)); 1991 break; 1992 default: 1993 /* XXX: TODO */ 1994 cpu_abort(env_cpu(env), "Unknown MMU model %x\n", env->mmu_model); 1995 break; 1996 } 1997 } 1998 1999 #ifdef CONFIG_TCG 2000 void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr) 2001 { 2002 #if !defined(FLUSH_ALL_TLBS) 2003 addr &= TARGET_PAGE_MASK; 2004 #if defined(TARGET_PPC64) 2005 if (mmu_is_64bit(env->mmu_model)) { 2006 /* tlbie invalidate TLBs for all segments */ 2007 /* 2008 * XXX: given the fact that there are too many segments to invalidate, 2009 * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU, 2010 * we just invalidate all TLBs 2011 */ 2012 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 2013 } else 2014 #endif /* defined(TARGET_PPC64) */ 2015 switch (env->mmu_model) { 2016 case POWERPC_MMU_SOFT_6xx: 2017 case POWERPC_MMU_SOFT_74xx: 2018 ppc6xx_tlb_invalidate_virt(env, addr, 0); 2019 if (env->id_tlbs == 1) { 2020 ppc6xx_tlb_invalidate_virt(env, addr, 1); 2021 } 2022 break; 2023 case POWERPC_MMU_32B: 2024 case POWERPC_MMU_601: 2025 /* 2026 * Actual CPUs invalidate entire congruence classes based on 2027 * the geometry of their TLBs and some OSes take that into 2028 * account, we just mark the TLB to be flushed later (context 2029 * synchronizing event or sync instruction on 32-bit). 2030 */ 2031 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 2032 break; 2033 default: 2034 /* Should never reach here with other MMU models */ 2035 assert(0); 2036 } 2037 #else 2038 ppc_tlb_invalidate_all(env); 2039 #endif 2040 } 2041 2042 /*****************************************************************************/ 2043 /* Special registers manipulation */ 2044 2045 /* Segment registers load and store */ 2046 target_ulong helper_load_sr(CPUPPCState *env, target_ulong sr_num) 2047 { 2048 #if defined(TARGET_PPC64) 2049 if (mmu_is_64bit(env->mmu_model)) { 2050 /* XXX */ 2051 return 0; 2052 } 2053 #endif 2054 return env->sr[sr_num]; 2055 } 2056 2057 void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value) 2058 { 2059 qemu_log_mask(CPU_LOG_MMU, 2060 "%s: reg=%d " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__, 2061 (int)srnum, value, env->sr[srnum]); 2062 #if defined(TARGET_PPC64) 2063 if (mmu_is_64bit(env->mmu_model)) { 2064 PowerPCCPU *cpu = env_archcpu(env); 2065 uint64_t esid, vsid; 2066 2067 /* ESID = srnum */ 2068 esid = ((uint64_t)(srnum & 0xf) << 28) | SLB_ESID_V; 2069 2070 /* VSID = VSID */ 2071 vsid = (value & 0xfffffff) << 12; 2072 /* flags = flags */ 2073 vsid |= ((value >> 27) & 0xf) << 8; 2074 2075 ppc_store_slb(cpu, srnum, esid, vsid); 2076 } else 2077 #endif 2078 if (env->sr[srnum] != value) { 2079 env->sr[srnum] = value; 2080 /* 2081 * Invalidating 256MB of virtual memory in 4kB pages is way 2082 * longer than flushing the whole TLB. 2083 */ 2084 #if !defined(FLUSH_ALL_TLBS) && 0 2085 { 2086 target_ulong page, end; 2087 /* Invalidate 256 MB of virtual memory */ 2088 page = (16 << 20) * srnum; 2089 end = page + (16 << 20); 2090 for (; page != end; page += TARGET_PAGE_SIZE) { 2091 tlb_flush_page(env_cpu(env), page); 2092 } 2093 } 2094 #else 2095 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 2096 #endif 2097 } 2098 } 2099 2100 /* TLB management */ 2101 void helper_tlbia(CPUPPCState *env) 2102 { 2103 ppc_tlb_invalidate_all(env); 2104 } 2105 2106 void helper_tlbie(CPUPPCState *env, target_ulong addr) 2107 { 2108 ppc_tlb_invalidate_one(env, addr); 2109 } 2110 2111 void helper_tlbiva(CPUPPCState *env, target_ulong addr) 2112 { 2113 /* tlbiva instruction only exists on BookE */ 2114 assert(env->mmu_model == POWERPC_MMU_BOOKE); 2115 /* XXX: TODO */ 2116 cpu_abort(env_cpu(env), "BookE MMU model is not implemented\n"); 2117 } 2118 2119 /* Software driven TLBs management */ 2120 /* PowerPC 602/603 software TLB load instructions helpers */ 2121 static void do_6xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code) 2122 { 2123 target_ulong RPN, CMP, EPN; 2124 int way; 2125 2126 RPN = env->spr[SPR_RPA]; 2127 if (is_code) { 2128 CMP = env->spr[SPR_ICMP]; 2129 EPN = env->spr[SPR_IMISS]; 2130 } else { 2131 CMP = env->spr[SPR_DCMP]; 2132 EPN = env->spr[SPR_DMISS]; 2133 } 2134 way = (env->spr[SPR_SRR1] >> 17) & 1; 2135 (void)EPN; /* avoid a compiler warning */ 2136 LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx 2137 " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP, 2138 RPN, way); 2139 /* Store this TLB */ 2140 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK), 2141 way, is_code, CMP, RPN); 2142 } 2143 2144 void helper_6xx_tlbd(CPUPPCState *env, target_ulong EPN) 2145 { 2146 do_6xx_tlb(env, EPN, 0); 2147 } 2148 2149 void helper_6xx_tlbi(CPUPPCState *env, target_ulong EPN) 2150 { 2151 do_6xx_tlb(env, EPN, 1); 2152 } 2153 2154 /* PowerPC 74xx software TLB load instructions helpers */ 2155 static void do_74xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code) 2156 { 2157 target_ulong RPN, CMP, EPN; 2158 int way; 2159 2160 RPN = env->spr[SPR_PTELO]; 2161 CMP = env->spr[SPR_PTEHI]; 2162 EPN = env->spr[SPR_TLBMISS] & ~0x3; 2163 way = env->spr[SPR_TLBMISS] & 0x3; 2164 (void)EPN; /* avoid a compiler warning */ 2165 LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx 2166 " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP, 2167 RPN, way); 2168 /* Store this TLB */ 2169 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK), 2170 way, is_code, CMP, RPN); 2171 } 2172 2173 void helper_74xx_tlbd(CPUPPCState *env, target_ulong EPN) 2174 { 2175 do_74xx_tlb(env, EPN, 0); 2176 } 2177 2178 void helper_74xx_tlbi(CPUPPCState *env, target_ulong EPN) 2179 { 2180 do_74xx_tlb(env, EPN, 1); 2181 } 2182 2183 /*****************************************************************************/ 2184 /* PowerPC 601 specific instructions (POWER bridge) */ 2185 2186 target_ulong helper_rac(CPUPPCState *env, target_ulong addr) 2187 { 2188 mmu_ctx_t ctx; 2189 int nb_BATs; 2190 target_ulong ret = 0; 2191 2192 /* 2193 * We don't have to generate many instances of this instruction, 2194 * as rac is supervisor only. 2195 * 2196 * XXX: FIX THIS: Pretend we have no BAT 2197 */ 2198 nb_BATs = env->nb_BATs; 2199 env->nb_BATs = 0; 2200 if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0) { 2201 ret = ctx.raddr; 2202 } 2203 env->nb_BATs = nb_BATs; 2204 return ret; 2205 } 2206 2207 static inline target_ulong booke_tlb_to_page_size(int size) 2208 { 2209 return 1024 << (2 * size); 2210 } 2211 2212 static inline int booke_page_size_to_tlb(target_ulong page_size) 2213 { 2214 int size; 2215 2216 switch (page_size) { 2217 case 0x00000400UL: 2218 size = 0x0; 2219 break; 2220 case 0x00001000UL: 2221 size = 0x1; 2222 break; 2223 case 0x00004000UL: 2224 size = 0x2; 2225 break; 2226 case 0x00010000UL: 2227 size = 0x3; 2228 break; 2229 case 0x00040000UL: 2230 size = 0x4; 2231 break; 2232 case 0x00100000UL: 2233 size = 0x5; 2234 break; 2235 case 0x00400000UL: 2236 size = 0x6; 2237 break; 2238 case 0x01000000UL: 2239 size = 0x7; 2240 break; 2241 case 0x04000000UL: 2242 size = 0x8; 2243 break; 2244 case 0x10000000UL: 2245 size = 0x9; 2246 break; 2247 case 0x40000000UL: 2248 size = 0xA; 2249 break; 2250 #if defined(TARGET_PPC64) 2251 case 0x000100000000ULL: 2252 size = 0xB; 2253 break; 2254 case 0x000400000000ULL: 2255 size = 0xC; 2256 break; 2257 case 0x001000000000ULL: 2258 size = 0xD; 2259 break; 2260 case 0x004000000000ULL: 2261 size = 0xE; 2262 break; 2263 case 0x010000000000ULL: 2264 size = 0xF; 2265 break; 2266 #endif 2267 default: 2268 size = -1; 2269 break; 2270 } 2271 2272 return size; 2273 } 2274 2275 /* Helpers for 4xx TLB management */ 2276 #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */ 2277 2278 #define PPC4XX_TLBHI_V 0x00000040 2279 #define PPC4XX_TLBHI_E 0x00000020 2280 #define PPC4XX_TLBHI_SIZE_MIN 0 2281 #define PPC4XX_TLBHI_SIZE_MAX 7 2282 #define PPC4XX_TLBHI_SIZE_DEFAULT 1 2283 #define PPC4XX_TLBHI_SIZE_SHIFT 7 2284 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007 2285 2286 #define PPC4XX_TLBLO_EX 0x00000200 2287 #define PPC4XX_TLBLO_WR 0x00000100 2288 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF 2289 #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00 2290 2291 target_ulong helper_4xx_tlbre_hi(CPUPPCState *env, target_ulong entry) 2292 { 2293 ppcemb_tlb_t *tlb; 2294 target_ulong ret; 2295 int size; 2296 2297 entry &= PPC4XX_TLB_ENTRY_MASK; 2298 tlb = &env->tlb.tlbe[entry]; 2299 ret = tlb->EPN; 2300 if (tlb->prot & PAGE_VALID) { 2301 ret |= PPC4XX_TLBHI_V; 2302 } 2303 size = booke_page_size_to_tlb(tlb->size); 2304 if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) { 2305 size = PPC4XX_TLBHI_SIZE_DEFAULT; 2306 } 2307 ret |= size << PPC4XX_TLBHI_SIZE_SHIFT; 2308 env->spr[SPR_40x_PID] = tlb->PID; 2309 return ret; 2310 } 2311 2312 target_ulong helper_4xx_tlbre_lo(CPUPPCState *env, target_ulong entry) 2313 { 2314 ppcemb_tlb_t *tlb; 2315 target_ulong ret; 2316 2317 entry &= PPC4XX_TLB_ENTRY_MASK; 2318 tlb = &env->tlb.tlbe[entry]; 2319 ret = tlb->RPN; 2320 if (tlb->prot & PAGE_EXEC) { 2321 ret |= PPC4XX_TLBLO_EX; 2322 } 2323 if (tlb->prot & PAGE_WRITE) { 2324 ret |= PPC4XX_TLBLO_WR; 2325 } 2326 return ret; 2327 } 2328 2329 void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry, 2330 target_ulong val) 2331 { 2332 CPUState *cs = env_cpu(env); 2333 ppcemb_tlb_t *tlb; 2334 target_ulong page, end; 2335 2336 LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry, 2337 val); 2338 entry &= PPC4XX_TLB_ENTRY_MASK; 2339 tlb = &env->tlb.tlbe[entry]; 2340 /* Invalidate previous TLB (if it's valid) */ 2341 if (tlb->prot & PAGE_VALID) { 2342 end = tlb->EPN + tlb->size; 2343 LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end " 2344 TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end); 2345 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) { 2346 tlb_flush_page(cs, page); 2347 } 2348 } 2349 tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT) 2350 & PPC4XX_TLBHI_SIZE_MASK); 2351 /* 2352 * We cannot handle TLB size < TARGET_PAGE_SIZE. 2353 * If this ever occurs, we should implement TARGET_PAGE_BITS_VARY 2354 */ 2355 if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) { 2356 cpu_abort(cs, "TLB size " TARGET_FMT_lu " < %u " 2357 "are not supported (%d)\n" 2358 "Please implement TARGET_PAGE_BITS_VARY\n", 2359 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7)); 2360 } 2361 tlb->EPN = val & ~(tlb->size - 1); 2362 if (val & PPC4XX_TLBHI_V) { 2363 tlb->prot |= PAGE_VALID; 2364 if (val & PPC4XX_TLBHI_E) { 2365 /* XXX: TO BE FIXED */ 2366 cpu_abort(cs, 2367 "Little-endian TLB entries are not supported by now\n"); 2368 } 2369 } else { 2370 tlb->prot &= ~PAGE_VALID; 2371 } 2372 tlb->PID = env->spr[SPR_40x_PID]; /* PID */ 2373 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx 2374 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__, 2375 (int)entry, tlb->RPN, tlb->EPN, tlb->size, 2376 tlb->prot & PAGE_READ ? 'r' : '-', 2377 tlb->prot & PAGE_WRITE ? 'w' : '-', 2378 tlb->prot & PAGE_EXEC ? 'x' : '-', 2379 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID); 2380 /* Invalidate new TLB (if valid) */ 2381 if (tlb->prot & PAGE_VALID) { 2382 end = tlb->EPN + tlb->size; 2383 LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end " 2384 TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end); 2385 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) { 2386 tlb_flush_page(cs, page); 2387 } 2388 } 2389 } 2390 2391 void helper_4xx_tlbwe_lo(CPUPPCState *env, target_ulong entry, 2392 target_ulong val) 2393 { 2394 ppcemb_tlb_t *tlb; 2395 2396 LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry, 2397 val); 2398 entry &= PPC4XX_TLB_ENTRY_MASK; 2399 tlb = &env->tlb.tlbe[entry]; 2400 tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK; 2401 tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK; 2402 tlb->prot = PAGE_READ; 2403 if (val & PPC4XX_TLBLO_EX) { 2404 tlb->prot |= PAGE_EXEC; 2405 } 2406 if (val & PPC4XX_TLBLO_WR) { 2407 tlb->prot |= PAGE_WRITE; 2408 } 2409 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx 2410 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__, 2411 (int)entry, tlb->RPN, tlb->EPN, tlb->size, 2412 tlb->prot & PAGE_READ ? 'r' : '-', 2413 tlb->prot & PAGE_WRITE ? 'w' : '-', 2414 tlb->prot & PAGE_EXEC ? 'x' : '-', 2415 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID); 2416 } 2417 2418 target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address) 2419 { 2420 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]); 2421 } 2422 2423 /* PowerPC 440 TLB management */ 2424 void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry, 2425 target_ulong value) 2426 { 2427 ppcemb_tlb_t *tlb; 2428 target_ulong EPN, RPN, size; 2429 int do_flush_tlbs; 2430 2431 LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n", 2432 __func__, word, (int)entry, value); 2433 do_flush_tlbs = 0; 2434 entry &= 0x3F; 2435 tlb = &env->tlb.tlbe[entry]; 2436 switch (word) { 2437 default: 2438 /* Just here to please gcc */ 2439 case 0: 2440 EPN = value & 0xFFFFFC00; 2441 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN) { 2442 do_flush_tlbs = 1; 2443 } 2444 tlb->EPN = EPN; 2445 size = booke_tlb_to_page_size((value >> 4) & 0xF); 2446 if ((tlb->prot & PAGE_VALID) && tlb->size < size) { 2447 do_flush_tlbs = 1; 2448 } 2449 tlb->size = size; 2450 tlb->attr &= ~0x1; 2451 tlb->attr |= (value >> 8) & 1; 2452 if (value & 0x200) { 2453 tlb->prot |= PAGE_VALID; 2454 } else { 2455 if (tlb->prot & PAGE_VALID) { 2456 tlb->prot &= ~PAGE_VALID; 2457 do_flush_tlbs = 1; 2458 } 2459 } 2460 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF; 2461 if (do_flush_tlbs) { 2462 tlb_flush(env_cpu(env)); 2463 } 2464 break; 2465 case 1: 2466 RPN = value & 0xFFFFFC0F; 2467 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN) { 2468 tlb_flush(env_cpu(env)); 2469 } 2470 tlb->RPN = RPN; 2471 break; 2472 case 2: 2473 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00); 2474 tlb->prot = tlb->prot & PAGE_VALID; 2475 if (value & 0x1) { 2476 tlb->prot |= PAGE_READ << 4; 2477 } 2478 if (value & 0x2) { 2479 tlb->prot |= PAGE_WRITE << 4; 2480 } 2481 if (value & 0x4) { 2482 tlb->prot |= PAGE_EXEC << 4; 2483 } 2484 if (value & 0x8) { 2485 tlb->prot |= PAGE_READ; 2486 } 2487 if (value & 0x10) { 2488 tlb->prot |= PAGE_WRITE; 2489 } 2490 if (value & 0x20) { 2491 tlb->prot |= PAGE_EXEC; 2492 } 2493 break; 2494 } 2495 } 2496 2497 target_ulong helper_440_tlbre(CPUPPCState *env, uint32_t word, 2498 target_ulong entry) 2499 { 2500 ppcemb_tlb_t *tlb; 2501 target_ulong ret; 2502 int size; 2503 2504 entry &= 0x3F; 2505 tlb = &env->tlb.tlbe[entry]; 2506 switch (word) { 2507 default: 2508 /* Just here to please gcc */ 2509 case 0: 2510 ret = tlb->EPN; 2511 size = booke_page_size_to_tlb(tlb->size); 2512 if (size < 0 || size > 0xF) { 2513 size = 1; 2514 } 2515 ret |= size << 4; 2516 if (tlb->attr & 0x1) { 2517 ret |= 0x100; 2518 } 2519 if (tlb->prot & PAGE_VALID) { 2520 ret |= 0x200; 2521 } 2522 env->spr[SPR_440_MMUCR] &= ~0x000000FF; 2523 env->spr[SPR_440_MMUCR] |= tlb->PID; 2524 break; 2525 case 1: 2526 ret = tlb->RPN; 2527 break; 2528 case 2: 2529 ret = tlb->attr & ~0x1; 2530 if (tlb->prot & (PAGE_READ << 4)) { 2531 ret |= 0x1; 2532 } 2533 if (tlb->prot & (PAGE_WRITE << 4)) { 2534 ret |= 0x2; 2535 } 2536 if (tlb->prot & (PAGE_EXEC << 4)) { 2537 ret |= 0x4; 2538 } 2539 if (tlb->prot & PAGE_READ) { 2540 ret |= 0x8; 2541 } 2542 if (tlb->prot & PAGE_WRITE) { 2543 ret |= 0x10; 2544 } 2545 if (tlb->prot & PAGE_EXEC) { 2546 ret |= 0x20; 2547 } 2548 break; 2549 } 2550 return ret; 2551 } 2552 2553 target_ulong helper_440_tlbsx(CPUPPCState *env, target_ulong address) 2554 { 2555 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF); 2556 } 2557 2558 /* PowerPC BookE 2.06 TLB management */ 2559 2560 static ppcmas_tlb_t *booke206_cur_tlb(CPUPPCState *env) 2561 { 2562 uint32_t tlbncfg = 0; 2563 int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT; 2564 int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK); 2565 int tlb; 2566 2567 tlb = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT; 2568 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb]; 2569 2570 if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) { 2571 cpu_abort(env_cpu(env), "we don't support HES yet\n"); 2572 } 2573 2574 return booke206_get_tlbm(env, tlb, ea, esel); 2575 } 2576 2577 void helper_booke_setpid(CPUPPCState *env, uint32_t pidn, target_ulong pid) 2578 { 2579 env->spr[pidn] = pid; 2580 /* changing PIDs mean we're in a different address space now */ 2581 tlb_flush(env_cpu(env)); 2582 } 2583 2584 void helper_booke_set_eplc(CPUPPCState *env, target_ulong val) 2585 { 2586 env->spr[SPR_BOOKE_EPLC] = val & EPID_MASK; 2587 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_LOAD); 2588 } 2589 void helper_booke_set_epsc(CPUPPCState *env, target_ulong val) 2590 { 2591 env->spr[SPR_BOOKE_EPSC] = val & EPID_MASK; 2592 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_STORE); 2593 } 2594 2595 static inline void flush_page(CPUPPCState *env, ppcmas_tlb_t *tlb) 2596 { 2597 if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) { 2598 tlb_flush_page(env_cpu(env), tlb->mas2 & MAS2_EPN_MASK); 2599 } else { 2600 tlb_flush(env_cpu(env)); 2601 } 2602 } 2603 2604 void helper_booke206_tlbwe(CPUPPCState *env) 2605 { 2606 uint32_t tlbncfg, tlbn; 2607 ppcmas_tlb_t *tlb; 2608 uint32_t size_tlb, size_ps; 2609 target_ulong mask; 2610 2611 2612 switch (env->spr[SPR_BOOKE_MAS0] & MAS0_WQ_MASK) { 2613 case MAS0_WQ_ALWAYS: 2614 /* good to go, write that entry */ 2615 break; 2616 case MAS0_WQ_COND: 2617 /* XXX check if reserved */ 2618 if (0) { 2619 return; 2620 } 2621 break; 2622 case MAS0_WQ_CLR_RSRV: 2623 /* XXX clear entry */ 2624 return; 2625 default: 2626 /* no idea what to do */ 2627 return; 2628 } 2629 2630 if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) && 2631 !msr_gs) { 2632 /* XXX we don't support direct LRAT setting yet */ 2633 fprintf(stderr, "cpu: don't support LRAT setting yet\n"); 2634 return; 2635 } 2636 2637 tlbn = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT; 2638 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn]; 2639 2640 tlb = booke206_cur_tlb(env); 2641 2642 if (!tlb) { 2643 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 2644 POWERPC_EXCP_INVAL | 2645 POWERPC_EXCP_INVAL_INVAL, GETPC()); 2646 } 2647 2648 /* check that we support the targeted size */ 2649 size_tlb = (env->spr[SPR_BOOKE_MAS1] & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 2650 size_ps = booke206_tlbnps(env, tlbn); 2651 if ((env->spr[SPR_BOOKE_MAS1] & MAS1_VALID) && (tlbncfg & TLBnCFG_AVAIL) && 2652 !(size_ps & (1 << size_tlb))) { 2653 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 2654 POWERPC_EXCP_INVAL | 2655 POWERPC_EXCP_INVAL_INVAL, GETPC()); 2656 } 2657 2658 if (msr_gs) { 2659 cpu_abort(env_cpu(env), "missing HV implementation\n"); 2660 } 2661 2662 if (tlb->mas1 & MAS1_VALID) { 2663 /* 2664 * Invalidate the page in QEMU TLB if it was a valid entry. 2665 * 2666 * In "PowerPC e500 Core Family Reference Manual, Rev. 1", 2667 * Section "12.4.2 TLB Write Entry (tlbwe) Instruction": 2668 * (https://www.nxp.com/docs/en/reference-manual/E500CORERM.pdf) 2669 * 2670 * "Note that when an L2 TLB entry is written, it may be displacing an 2671 * already valid entry in the same L2 TLB location (a victim). If a 2672 * valid L1 TLB entry corresponds to the L2 MMU victim entry, that L1 2673 * TLB entry is automatically invalidated." 2674 */ 2675 flush_page(env, tlb); 2676 } 2677 2678 tlb->mas7_3 = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) | 2679 env->spr[SPR_BOOKE_MAS3]; 2680 tlb->mas1 = env->spr[SPR_BOOKE_MAS1]; 2681 2682 if ((env->spr[SPR_MMUCFG] & MMUCFG_MAVN) == MMUCFG_MAVN_V2) { 2683 /* For TLB which has a fixed size TSIZE is ignored with MAV2 */ 2684 booke206_fixed_size_tlbn(env, tlbn, tlb); 2685 } else { 2686 if (!(tlbncfg & TLBnCFG_AVAIL)) { 2687 /* force !AVAIL TLB entries to correct page size */ 2688 tlb->mas1 &= ~MAS1_TSIZE_MASK; 2689 /* XXX can be configured in MMUCSR0 */ 2690 tlb->mas1 |= (tlbncfg & TLBnCFG_MINSIZE) >> 12; 2691 } 2692 } 2693 2694 /* Make a mask from TLB size to discard invalid bits in EPN field */ 2695 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); 2696 /* Add a mask for page attributes */ 2697 mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E; 2698 2699 if (!msr_cm) { 2700 /* 2701 * Executing a tlbwe instruction in 32-bit mode will set bits 2702 * 0:31 of the TLB EPN field to zero. 2703 */ 2704 mask &= 0xffffffff; 2705 } 2706 2707 tlb->mas2 = env->spr[SPR_BOOKE_MAS2] & mask; 2708 2709 if (!(tlbncfg & TLBnCFG_IPROT)) { 2710 /* no IPROT supported by TLB */ 2711 tlb->mas1 &= ~MAS1_IPROT; 2712 } 2713 2714 flush_page(env, tlb); 2715 } 2716 2717 static inline void booke206_tlb_to_mas(CPUPPCState *env, ppcmas_tlb_t *tlb) 2718 { 2719 int tlbn = booke206_tlbm_to_tlbn(env, tlb); 2720 int way = booke206_tlbm_to_way(env, tlb); 2721 2722 env->spr[SPR_BOOKE_MAS0] = tlbn << MAS0_TLBSEL_SHIFT; 2723 env->spr[SPR_BOOKE_MAS0] |= way << MAS0_ESEL_SHIFT; 2724 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; 2725 2726 env->spr[SPR_BOOKE_MAS1] = tlb->mas1; 2727 env->spr[SPR_BOOKE_MAS2] = tlb->mas2; 2728 env->spr[SPR_BOOKE_MAS3] = tlb->mas7_3; 2729 env->spr[SPR_BOOKE_MAS7] = tlb->mas7_3 >> 32; 2730 } 2731 2732 void helper_booke206_tlbre(CPUPPCState *env) 2733 { 2734 ppcmas_tlb_t *tlb = NULL; 2735 2736 tlb = booke206_cur_tlb(env); 2737 if (!tlb) { 2738 env->spr[SPR_BOOKE_MAS1] = 0; 2739 } else { 2740 booke206_tlb_to_mas(env, tlb); 2741 } 2742 } 2743 2744 void helper_booke206_tlbsx(CPUPPCState *env, target_ulong address) 2745 { 2746 ppcmas_tlb_t *tlb = NULL; 2747 int i, j; 2748 hwaddr raddr; 2749 uint32_t spid, sas; 2750 2751 spid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID_MASK) >> MAS6_SPID_SHIFT; 2752 sas = env->spr[SPR_BOOKE_MAS6] & MAS6_SAS; 2753 2754 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 2755 int ways = booke206_tlb_ways(env, i); 2756 2757 for (j = 0; j < ways; j++) { 2758 tlb = booke206_get_tlbm(env, i, address, j); 2759 2760 if (!tlb) { 2761 continue; 2762 } 2763 2764 if (ppcmas_tlb_check(env, tlb, &raddr, address, spid)) { 2765 continue; 2766 } 2767 2768 if (sas != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { 2769 continue; 2770 } 2771 2772 booke206_tlb_to_mas(env, tlb); 2773 return; 2774 } 2775 } 2776 2777 /* no entry found, fill with defaults */ 2778 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; 2779 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; 2780 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; 2781 env->spr[SPR_BOOKE_MAS3] = 0; 2782 env->spr[SPR_BOOKE_MAS7] = 0; 2783 2784 if (env->spr[SPR_BOOKE_MAS6] & MAS6_SAS) { 2785 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; 2786 } 2787 2788 env->spr[SPR_BOOKE_MAS1] |= (env->spr[SPR_BOOKE_MAS6] >> 16) 2789 << MAS1_TID_SHIFT; 2790 2791 /* next victim logic */ 2792 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; 2793 env->last_way++; 2794 env->last_way &= booke206_tlb_ways(env, 0) - 1; 2795 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; 2796 } 2797 2798 static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn, 2799 uint32_t ea) 2800 { 2801 int i; 2802 int ways = booke206_tlb_ways(env, tlbn); 2803 target_ulong mask; 2804 2805 for (i = 0; i < ways; i++) { 2806 ppcmas_tlb_t *tlb = booke206_get_tlbm(env, tlbn, ea, i); 2807 if (!tlb) { 2808 continue; 2809 } 2810 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); 2811 if (((tlb->mas2 & MAS2_EPN_MASK) == (ea & mask)) && 2812 !(tlb->mas1 & MAS1_IPROT)) { 2813 tlb->mas1 &= ~MAS1_VALID; 2814 } 2815 } 2816 } 2817 2818 void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address) 2819 { 2820 CPUState *cs; 2821 2822 if (address & 0x4) { 2823 /* flush all entries */ 2824 if (address & 0x8) { 2825 /* flush all of TLB1 */ 2826 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB1, 1); 2827 } else { 2828 /* flush all of TLB0 */ 2829 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB0, 0); 2830 } 2831 return; 2832 } 2833 2834 if (address & 0x8) { 2835 /* flush TLB1 entries */ 2836 booke206_invalidate_ea_tlb(env, 1, address); 2837 CPU_FOREACH(cs) { 2838 tlb_flush(cs); 2839 } 2840 } else { 2841 /* flush TLB0 entries */ 2842 booke206_invalidate_ea_tlb(env, 0, address); 2843 CPU_FOREACH(cs) { 2844 tlb_flush_page(cs, address & MAS2_EPN_MASK); 2845 } 2846 } 2847 } 2848 2849 void helper_booke206_tlbilx0(CPUPPCState *env, target_ulong address) 2850 { 2851 /* XXX missing LPID handling */ 2852 booke206_flush_tlb(env, -1, 1); 2853 } 2854 2855 void helper_booke206_tlbilx1(CPUPPCState *env, target_ulong address) 2856 { 2857 int i, j; 2858 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID); 2859 ppcmas_tlb_t *tlb = env->tlb.tlbm; 2860 int tlb_size; 2861 2862 /* XXX missing LPID handling */ 2863 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 2864 tlb_size = booke206_tlb_size(env, i); 2865 for (j = 0; j < tlb_size; j++) { 2866 if (!(tlb[j].mas1 & MAS1_IPROT) && 2867 ((tlb[j].mas1 & MAS1_TID_MASK) == tid)) { 2868 tlb[j].mas1 &= ~MAS1_VALID; 2869 } 2870 } 2871 tlb += booke206_tlb_size(env, i); 2872 } 2873 tlb_flush(env_cpu(env)); 2874 } 2875 2876 void helper_booke206_tlbilx3(CPUPPCState *env, target_ulong address) 2877 { 2878 int i, j; 2879 ppcmas_tlb_t *tlb; 2880 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID); 2881 int pid = tid >> MAS6_SPID_SHIFT; 2882 int sgs = env->spr[SPR_BOOKE_MAS5] & MAS5_SGS; 2883 int ind = (env->spr[SPR_BOOKE_MAS6] & MAS6_SIND) ? MAS1_IND : 0; 2884 /* XXX check for unsupported isize and raise an invalid opcode then */ 2885 int size = env->spr[SPR_BOOKE_MAS6] & MAS6_ISIZE_MASK; 2886 /* XXX implement MAV2 handling */ 2887 bool mav2 = false; 2888 2889 /* XXX missing LPID handling */ 2890 /* flush by pid and ea */ 2891 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 2892 int ways = booke206_tlb_ways(env, i); 2893 2894 for (j = 0; j < ways; j++) { 2895 tlb = booke206_get_tlbm(env, i, address, j); 2896 if (!tlb) { 2897 continue; 2898 } 2899 if ((ppcmas_tlb_check(env, tlb, NULL, address, pid) != 0) || 2900 (tlb->mas1 & MAS1_IPROT) || 2901 ((tlb->mas1 & MAS1_IND) != ind) || 2902 ((tlb->mas8 & MAS8_TGS) != sgs)) { 2903 continue; 2904 } 2905 if (mav2 && ((tlb->mas1 & MAS1_TSIZE_MASK) != size)) { 2906 /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */ 2907 continue; 2908 } 2909 /* XXX e500mc doesn't match SAS, but other cores might */ 2910 tlb->mas1 &= ~MAS1_VALID; 2911 } 2912 } 2913 tlb_flush(env_cpu(env)); 2914 } 2915 2916 void helper_booke206_tlbflush(CPUPPCState *env, target_ulong type) 2917 { 2918 int flags = 0; 2919 2920 if (type & 2) { 2921 flags |= BOOKE206_FLUSH_TLB1; 2922 } 2923 2924 if (type & 4) { 2925 flags |= BOOKE206_FLUSH_TLB0; 2926 } 2927 2928 booke206_flush_tlb(env, flags, 1); 2929 } 2930 2931 2932 void helper_check_tlb_flush_local(CPUPPCState *env) 2933 { 2934 check_tlb_flush(env, false); 2935 } 2936 2937 void helper_check_tlb_flush_global(CPUPPCState *env) 2938 { 2939 check_tlb_flush(env, true); 2940 } 2941 #endif /* CONFIG_TCG */ 2942 2943 /*****************************************************************************/ 2944 2945 bool ppc_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, 2946 MMUAccessType access_type, int mmu_idx, 2947 bool probe, uintptr_t retaddr) 2948 { 2949 PowerPCCPU *cpu = POWERPC_CPU(cs); 2950 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs); 2951 CPUPPCState *env = &cpu->env; 2952 int ret; 2953 2954 if (pcc->handle_mmu_fault) { 2955 ret = pcc->handle_mmu_fault(cpu, addr, access_type, mmu_idx); 2956 } else { 2957 ret = cpu_ppc_handle_mmu_fault(env, addr, access_type, mmu_idx); 2958 } 2959 if (unlikely(ret != 0)) { 2960 if (probe) { 2961 return false; 2962 } 2963 raise_exception_err_ra(env, cs->exception_index, env->error_code, 2964 retaddr); 2965 } 2966 return true; 2967 } 2968