1 /* 2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "cpu.h" 23 #include "sysemu/kvm.h" 24 #include "kvm_ppc.h" 25 #include "mmu-hash64.h" 26 #include "mmu-hash32.h" 27 #include "exec/exec-all.h" 28 #include "exec/log.h" 29 #include "helper_regs.h" 30 #include "qemu/error-report.h" 31 #include "qemu/main-loop.h" 32 #include "qemu/qemu-print.h" 33 #include "internal.h" 34 #include "mmu-book3s-v3.h" 35 #include "mmu-radix64.h" 36 #include "exec/helper-proto.h" 37 #include "exec/cpu_ldst.h" 38 39 /* #define FLUSH_ALL_TLBS */ 40 41 /*****************************************************************************/ 42 /* PowerPC MMU emulation */ 43 44 /* Software driven TLB helpers */ 45 static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env) 46 { 47 ppc6xx_tlb_t *tlb; 48 int nr, max; 49 50 /* LOG_SWTLB("Invalidate all TLBs\n"); */ 51 /* Invalidate all defined software TLB */ 52 max = env->nb_tlb; 53 if (env->id_tlbs == 1) { 54 max *= 2; 55 } 56 for (nr = 0; nr < max; nr++) { 57 tlb = &env->tlb.tlb6[nr]; 58 pte_invalidate(&tlb->pte0); 59 } 60 tlb_flush(env_cpu(env)); 61 } 62 63 static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env, 64 target_ulong eaddr, 65 int is_code, int match_epn) 66 { 67 #if !defined(FLUSH_ALL_TLBS) 68 CPUState *cs = env_cpu(env); 69 ppc6xx_tlb_t *tlb; 70 int way, nr; 71 72 /* Invalidate ITLB + DTLB, all ways */ 73 for (way = 0; way < env->nb_ways; way++) { 74 nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code); 75 tlb = &env->tlb.tlb6[nr]; 76 if (pte_is_valid(tlb->pte0) && (match_epn == 0 || eaddr == tlb->EPN)) { 77 qemu_log_mask(CPU_LOG_MMU, "TLB invalidate %d/%d " 78 TARGET_FMT_lx "\n", nr, env->nb_tlb, eaddr); 79 pte_invalidate(&tlb->pte0); 80 tlb_flush_page(cs, tlb->EPN); 81 } 82 } 83 #else 84 /* XXX: PowerPC specification say this is valid as well */ 85 ppc6xx_tlb_invalidate_all(env); 86 #endif 87 } 88 89 static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState *env, 90 target_ulong eaddr, int is_code) 91 { 92 ppc6xx_tlb_invalidate_virt2(env, eaddr, is_code, 0); 93 } 94 95 static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way, 96 int is_code, target_ulong pte0, target_ulong pte1) 97 { 98 ppc6xx_tlb_t *tlb; 99 int nr; 100 101 nr = ppc6xx_tlb_getnum(env, EPN, way, is_code); 102 tlb = &env->tlb.tlb6[nr]; 103 qemu_log_mask(CPU_LOG_MMU, "Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 " 104 TARGET_FMT_lx " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb, 105 EPN, pte0, pte1); 106 /* Invalidate any pending reference in QEMU for this virtual address */ 107 ppc6xx_tlb_invalidate_virt2(env, EPN, is_code, 1); 108 tlb->pte0 = pte0; 109 tlb->pte1 = pte1; 110 tlb->EPN = EPN; 111 /* Store last way for LRU mechanism */ 112 env->last_way = way; 113 } 114 115 /* Generic TLB search function for PowerPC embedded implementations */ 116 static int ppcemb_tlb_search(CPUPPCState *env, target_ulong address, 117 uint32_t pid) 118 { 119 ppcemb_tlb_t *tlb; 120 hwaddr raddr; 121 int i, ret; 122 123 /* Default return value is no match */ 124 ret = -1; 125 for (i = 0; i < env->nb_tlb; i++) { 126 tlb = &env->tlb.tlbe[i]; 127 if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, 0, i) == 0) { 128 ret = i; 129 break; 130 } 131 } 132 133 return ret; 134 } 135 136 /* Helpers specific to PowerPC 40x implementations */ 137 static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env) 138 { 139 ppcemb_tlb_t *tlb; 140 int i; 141 142 for (i = 0; i < env->nb_tlb; i++) { 143 tlb = &env->tlb.tlbe[i]; 144 tlb->prot &= ~PAGE_VALID; 145 } 146 tlb_flush(env_cpu(env)); 147 } 148 149 static void booke206_flush_tlb(CPUPPCState *env, int flags, 150 const int check_iprot) 151 { 152 int tlb_size; 153 int i, j; 154 ppcmas_tlb_t *tlb = env->tlb.tlbm; 155 156 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 157 if (flags & (1 << i)) { 158 tlb_size = booke206_tlb_size(env, i); 159 for (j = 0; j < tlb_size; j++) { 160 if (!check_iprot || !(tlb[j].mas1 & MAS1_IPROT)) { 161 tlb[j].mas1 &= ~MAS1_VALID; 162 } 163 } 164 } 165 tlb += booke206_tlb_size(env, i); 166 } 167 168 tlb_flush(env_cpu(env)); 169 } 170 171 static int get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, 172 target_ulong eaddr, MMUAccessType access_type, 173 int type) 174 { 175 return get_physical_address_wtlb(env, ctx, eaddr, access_type, type, 0); 176 } 177 178 179 180 /*****************************************************************************/ 181 /* BATs management */ 182 #if !defined(FLUSH_ALL_TLBS) 183 static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu, 184 target_ulong mask) 185 { 186 CPUState *cs = env_cpu(env); 187 target_ulong base, end, page; 188 189 base = BATu & ~0x0001FFFF; 190 end = base + mask + 0x00020000; 191 if (((end - base) >> TARGET_PAGE_BITS) > 1024) { 192 /* Flushing 1024 4K pages is slower than a complete flush */ 193 qemu_log_mask(CPU_LOG_MMU, "Flush all BATs\n"); 194 tlb_flush(cs); 195 qemu_log_mask(CPU_LOG_MMU, "Flush done\n"); 196 return; 197 } 198 qemu_log_mask(CPU_LOG_MMU, "Flush BAT from " TARGET_FMT_lx 199 " to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", 200 base, end, mask); 201 for (page = base; page != end; page += TARGET_PAGE_SIZE) { 202 tlb_flush_page(cs, page); 203 } 204 qemu_log_mask(CPU_LOG_MMU, "Flush done\n"); 205 } 206 #endif 207 208 static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr, 209 target_ulong value) 210 { 211 qemu_log_mask(CPU_LOG_MMU, "Set %cBAT%d%c to " TARGET_FMT_lx " (" 212 TARGET_FMT_lx ")\n", ID, nr, ul == 0 ? 'u' : 'l', 213 value, env->nip); 214 } 215 216 void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value) 217 { 218 target_ulong mask; 219 220 dump_store_bat(env, 'I', 0, nr, value); 221 if (env->IBAT[0][nr] != value) { 222 mask = (value << 15) & 0x0FFE0000UL; 223 #if !defined(FLUSH_ALL_TLBS) 224 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 225 #endif 226 /* 227 * When storing valid upper BAT, mask BEPI and BRPN and 228 * invalidate all TLBs covered by this BAT 229 */ 230 mask = (value << 15) & 0x0FFE0000UL; 231 env->IBAT[0][nr] = (value & 0x00001FFFUL) | 232 (value & ~0x0001FFFFUL & ~mask); 233 env->IBAT[1][nr] = (env->IBAT[1][nr] & 0x0000007B) | 234 (env->IBAT[1][nr] & ~0x0001FFFF & ~mask); 235 #if !defined(FLUSH_ALL_TLBS) 236 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 237 #else 238 tlb_flush(env_cpu(env)); 239 #endif 240 } 241 } 242 243 void helper_store_ibatl(CPUPPCState *env, uint32_t nr, target_ulong value) 244 { 245 dump_store_bat(env, 'I', 1, nr, value); 246 env->IBAT[1][nr] = value; 247 } 248 249 void helper_store_dbatu(CPUPPCState *env, uint32_t nr, target_ulong value) 250 { 251 target_ulong mask; 252 253 dump_store_bat(env, 'D', 0, nr, value); 254 if (env->DBAT[0][nr] != value) { 255 /* 256 * When storing valid upper BAT, mask BEPI and BRPN and 257 * invalidate all TLBs covered by this BAT 258 */ 259 mask = (value << 15) & 0x0FFE0000UL; 260 #if !defined(FLUSH_ALL_TLBS) 261 do_invalidate_BAT(env, env->DBAT[0][nr], mask); 262 #endif 263 mask = (value << 15) & 0x0FFE0000UL; 264 env->DBAT[0][nr] = (value & 0x00001FFFUL) | 265 (value & ~0x0001FFFFUL & ~mask); 266 env->DBAT[1][nr] = (env->DBAT[1][nr] & 0x0000007B) | 267 (env->DBAT[1][nr] & ~0x0001FFFF & ~mask); 268 #if !defined(FLUSH_ALL_TLBS) 269 do_invalidate_BAT(env, env->DBAT[0][nr], mask); 270 #else 271 tlb_flush(env_cpu(env)); 272 #endif 273 } 274 } 275 276 void helper_store_dbatl(CPUPPCState *env, uint32_t nr, target_ulong value) 277 { 278 dump_store_bat(env, 'D', 1, nr, value); 279 env->DBAT[1][nr] = value; 280 } 281 282 /*****************************************************************************/ 283 /* TLB management */ 284 void ppc_tlb_invalidate_all(CPUPPCState *env) 285 { 286 #if defined(TARGET_PPC64) 287 if (mmu_is_64bit(env->mmu_model)) { 288 env->tlb_need_flush = 0; 289 tlb_flush(env_cpu(env)); 290 } else 291 #endif /* defined(TARGET_PPC64) */ 292 switch (env->mmu_model) { 293 case POWERPC_MMU_SOFT_6xx: 294 ppc6xx_tlb_invalidate_all(env); 295 break; 296 case POWERPC_MMU_SOFT_4xx: 297 ppc4xx_tlb_invalidate_all(env); 298 break; 299 case POWERPC_MMU_REAL: 300 cpu_abort(env_cpu(env), "No TLB for PowerPC 4xx in real mode\n"); 301 break; 302 case POWERPC_MMU_MPC8xx: 303 /* XXX: TODO */ 304 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n"); 305 break; 306 case POWERPC_MMU_BOOKE: 307 tlb_flush(env_cpu(env)); 308 break; 309 case POWERPC_MMU_BOOKE206: 310 booke206_flush_tlb(env, -1, 0); 311 break; 312 case POWERPC_MMU_32B: 313 env->tlb_need_flush = 0; 314 tlb_flush(env_cpu(env)); 315 break; 316 default: 317 /* XXX: TODO */ 318 cpu_abort(env_cpu(env), "Unknown MMU model %x\n", env->mmu_model); 319 break; 320 } 321 } 322 323 void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr) 324 { 325 #if !defined(FLUSH_ALL_TLBS) 326 addr &= TARGET_PAGE_MASK; 327 #if defined(TARGET_PPC64) 328 if (mmu_is_64bit(env->mmu_model)) { 329 /* tlbie invalidate TLBs for all segments */ 330 /* 331 * XXX: given the fact that there are too many segments to invalidate, 332 * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU, 333 * we just invalidate all TLBs 334 */ 335 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 336 } else 337 #endif /* defined(TARGET_PPC64) */ 338 switch (env->mmu_model) { 339 case POWERPC_MMU_SOFT_6xx: 340 ppc6xx_tlb_invalidate_virt(env, addr, 0); 341 if (env->id_tlbs == 1) { 342 ppc6xx_tlb_invalidate_virt(env, addr, 1); 343 } 344 break; 345 case POWERPC_MMU_32B: 346 /* 347 * Actual CPUs invalidate entire congruence classes based on 348 * the geometry of their TLBs and some OSes take that into 349 * account, we just mark the TLB to be flushed later (context 350 * synchronizing event or sync instruction on 32-bit). 351 */ 352 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 353 break; 354 default: 355 /* Should never reach here with other MMU models */ 356 assert(0); 357 } 358 #else 359 ppc_tlb_invalidate_all(env); 360 #endif 361 } 362 363 /*****************************************************************************/ 364 /* Special registers manipulation */ 365 366 /* Segment registers load and store */ 367 target_ulong helper_load_sr(CPUPPCState *env, target_ulong sr_num) 368 { 369 #if defined(TARGET_PPC64) 370 if (mmu_is_64bit(env->mmu_model)) { 371 /* XXX */ 372 return 0; 373 } 374 #endif 375 return env->sr[sr_num]; 376 } 377 378 void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value) 379 { 380 qemu_log_mask(CPU_LOG_MMU, 381 "%s: reg=%d " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__, 382 (int)srnum, value, env->sr[srnum]); 383 #if defined(TARGET_PPC64) 384 if (mmu_is_64bit(env->mmu_model)) { 385 PowerPCCPU *cpu = env_archcpu(env); 386 uint64_t esid, vsid; 387 388 /* ESID = srnum */ 389 esid = ((uint64_t)(srnum & 0xf) << 28) | SLB_ESID_V; 390 391 /* VSID = VSID */ 392 vsid = (value & 0xfffffff) << 12; 393 /* flags = flags */ 394 vsid |= ((value >> 27) & 0xf) << 8; 395 396 ppc_store_slb(cpu, srnum, esid, vsid); 397 } else 398 #endif 399 if (env->sr[srnum] != value) { 400 env->sr[srnum] = value; 401 /* 402 * Invalidating 256MB of virtual memory in 4kB pages is way 403 * longer than flushing the whole TLB. 404 */ 405 #if !defined(FLUSH_ALL_TLBS) && 0 406 { 407 target_ulong page, end; 408 /* Invalidate 256 MB of virtual memory */ 409 page = (16 << 20) * srnum; 410 end = page + (16 << 20); 411 for (; page != end; page += TARGET_PAGE_SIZE) { 412 tlb_flush_page(env_cpu(env), page); 413 } 414 } 415 #else 416 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 417 #endif 418 } 419 } 420 421 /* TLB management */ 422 void helper_tlbia(CPUPPCState *env) 423 { 424 ppc_tlb_invalidate_all(env); 425 } 426 427 void helper_tlbie(CPUPPCState *env, target_ulong addr) 428 { 429 ppc_tlb_invalidate_one(env, addr); 430 } 431 432 void helper_tlbiva(CPUPPCState *env, target_ulong addr) 433 { 434 /* tlbiva instruction only exists on BookE */ 435 assert(env->mmu_model == POWERPC_MMU_BOOKE); 436 /* XXX: TODO */ 437 cpu_abort(env_cpu(env), "BookE MMU model is not implemented\n"); 438 } 439 440 /* Software driven TLBs management */ 441 /* PowerPC 602/603 software TLB load instructions helpers */ 442 static void do_6xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code) 443 { 444 target_ulong RPN, CMP, EPN; 445 int way; 446 447 RPN = env->spr[SPR_RPA]; 448 if (is_code) { 449 CMP = env->spr[SPR_ICMP]; 450 EPN = env->spr[SPR_IMISS]; 451 } else { 452 CMP = env->spr[SPR_DCMP]; 453 EPN = env->spr[SPR_DMISS]; 454 } 455 way = (env->spr[SPR_SRR1] >> 17) & 1; 456 (void)EPN; /* avoid a compiler warning */ 457 qemu_log_mask(CPU_LOG_MMU, "%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx 458 " PTE0 " TARGET_FMT_lx " PTE1 " TARGET_FMT_lx " way %d\n", 459 __func__, new_EPN, EPN, CMP, RPN, way); 460 /* Store this TLB */ 461 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK), 462 way, is_code, CMP, RPN); 463 } 464 465 void helper_6xx_tlbd(CPUPPCState *env, target_ulong EPN) 466 { 467 do_6xx_tlb(env, EPN, 0); 468 } 469 470 void helper_6xx_tlbi(CPUPPCState *env, target_ulong EPN) 471 { 472 do_6xx_tlb(env, EPN, 1); 473 } 474 475 /*****************************************************************************/ 476 /* PowerPC 601 specific instructions (POWER bridge) */ 477 478 target_ulong helper_rac(CPUPPCState *env, target_ulong addr) 479 { 480 mmu_ctx_t ctx; 481 int nb_BATs; 482 target_ulong ret = 0; 483 484 /* 485 * We don't have to generate many instances of this instruction, 486 * as rac is supervisor only. 487 * 488 * XXX: FIX THIS: Pretend we have no BAT 489 */ 490 nb_BATs = env->nb_BATs; 491 env->nb_BATs = 0; 492 if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0) { 493 ret = ctx.raddr; 494 } 495 env->nb_BATs = nb_BATs; 496 return ret; 497 } 498 499 static inline target_ulong booke_tlb_to_page_size(int size) 500 { 501 return 1024 << (2 * size); 502 } 503 504 static inline int booke_page_size_to_tlb(target_ulong page_size) 505 { 506 int size; 507 508 switch (page_size) { 509 case 0x00000400UL: 510 size = 0x0; 511 break; 512 case 0x00001000UL: 513 size = 0x1; 514 break; 515 case 0x00004000UL: 516 size = 0x2; 517 break; 518 case 0x00010000UL: 519 size = 0x3; 520 break; 521 case 0x00040000UL: 522 size = 0x4; 523 break; 524 case 0x00100000UL: 525 size = 0x5; 526 break; 527 case 0x00400000UL: 528 size = 0x6; 529 break; 530 case 0x01000000UL: 531 size = 0x7; 532 break; 533 case 0x04000000UL: 534 size = 0x8; 535 break; 536 case 0x10000000UL: 537 size = 0x9; 538 break; 539 case 0x40000000UL: 540 size = 0xA; 541 break; 542 #if defined(TARGET_PPC64) 543 case 0x000100000000ULL: 544 size = 0xB; 545 break; 546 case 0x000400000000ULL: 547 size = 0xC; 548 break; 549 case 0x001000000000ULL: 550 size = 0xD; 551 break; 552 case 0x004000000000ULL: 553 size = 0xE; 554 break; 555 case 0x010000000000ULL: 556 size = 0xF; 557 break; 558 #endif 559 default: 560 size = -1; 561 break; 562 } 563 564 return size; 565 } 566 567 /* Helpers for 4xx TLB management */ 568 #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */ 569 570 #define PPC4XX_TLBHI_V 0x00000040 571 #define PPC4XX_TLBHI_E 0x00000020 572 #define PPC4XX_TLBHI_SIZE_MIN 0 573 #define PPC4XX_TLBHI_SIZE_MAX 7 574 #define PPC4XX_TLBHI_SIZE_DEFAULT 1 575 #define PPC4XX_TLBHI_SIZE_SHIFT 7 576 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007 577 578 #define PPC4XX_TLBLO_EX 0x00000200 579 #define PPC4XX_TLBLO_WR 0x00000100 580 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF 581 #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00 582 583 void helper_store_40x_pid(CPUPPCState *env, target_ulong val) 584 { 585 if (env->spr[SPR_40x_PID] != val) { 586 env->spr[SPR_40x_PID] = val; 587 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 588 } 589 } 590 591 target_ulong helper_4xx_tlbre_hi(CPUPPCState *env, target_ulong entry) 592 { 593 ppcemb_tlb_t *tlb; 594 target_ulong ret; 595 int size; 596 597 entry &= PPC4XX_TLB_ENTRY_MASK; 598 tlb = &env->tlb.tlbe[entry]; 599 ret = tlb->EPN; 600 if (tlb->prot & PAGE_VALID) { 601 ret |= PPC4XX_TLBHI_V; 602 } 603 size = booke_page_size_to_tlb(tlb->size); 604 if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) { 605 size = PPC4XX_TLBHI_SIZE_DEFAULT; 606 } 607 ret |= size << PPC4XX_TLBHI_SIZE_SHIFT; 608 helper_store_40x_pid(env, tlb->PID); 609 return ret; 610 } 611 612 target_ulong helper_4xx_tlbre_lo(CPUPPCState *env, target_ulong entry) 613 { 614 ppcemb_tlb_t *tlb; 615 target_ulong ret; 616 617 entry &= PPC4XX_TLB_ENTRY_MASK; 618 tlb = &env->tlb.tlbe[entry]; 619 ret = tlb->RPN; 620 if (tlb->prot & PAGE_EXEC) { 621 ret |= PPC4XX_TLBLO_EX; 622 } 623 if (tlb->prot & PAGE_WRITE) { 624 ret |= PPC4XX_TLBLO_WR; 625 } 626 return ret; 627 } 628 629 void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry, 630 target_ulong val) 631 { 632 CPUState *cs = env_cpu(env); 633 ppcemb_tlb_t *tlb; 634 target_ulong page, end; 635 636 qemu_log_mask(CPU_LOG_MMU, "%s entry %d val " TARGET_FMT_lx "\n", 637 __func__, (int)entry, 638 val); 639 entry &= PPC4XX_TLB_ENTRY_MASK; 640 tlb = &env->tlb.tlbe[entry]; 641 /* Invalidate previous TLB (if it's valid) */ 642 if (tlb->prot & PAGE_VALID) { 643 end = tlb->EPN + tlb->size; 644 qemu_log_mask(CPU_LOG_MMU, "%s: invalidate old TLB %d start " 645 TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__, 646 (int)entry, tlb->EPN, end); 647 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) { 648 tlb_flush_page(cs, page); 649 } 650 } 651 tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT) 652 & PPC4XX_TLBHI_SIZE_MASK); 653 /* 654 * We cannot handle TLB size < TARGET_PAGE_SIZE. 655 * If this ever occurs, we should implement TARGET_PAGE_BITS_VARY 656 */ 657 if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) { 658 cpu_abort(cs, "TLB size " TARGET_FMT_lu " < %u " 659 "are not supported (%d)\n" 660 "Please implement TARGET_PAGE_BITS_VARY\n", 661 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7)); 662 } 663 tlb->EPN = val & ~(tlb->size - 1); 664 if (val & PPC4XX_TLBHI_V) { 665 tlb->prot |= PAGE_VALID; 666 if (val & PPC4XX_TLBHI_E) { 667 /* XXX: TO BE FIXED */ 668 cpu_abort(cs, 669 "Little-endian TLB entries are not supported by now\n"); 670 } 671 } else { 672 tlb->prot &= ~PAGE_VALID; 673 } 674 tlb->PID = env->spr[SPR_40x_PID]; /* PID */ 675 qemu_log_mask(CPU_LOG_MMU, "%s: set up TLB %d RPN " TARGET_FMT_plx 676 " EPN " TARGET_FMT_lx " size " TARGET_FMT_lx 677 " prot %c%c%c%c PID %d\n", __func__, 678 (int)entry, tlb->RPN, tlb->EPN, tlb->size, 679 tlb->prot & PAGE_READ ? 'r' : '-', 680 tlb->prot & PAGE_WRITE ? 'w' : '-', 681 tlb->prot & PAGE_EXEC ? 'x' : '-', 682 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID); 683 /* Invalidate new TLB (if valid) */ 684 if (tlb->prot & PAGE_VALID) { 685 end = tlb->EPN + tlb->size; 686 qemu_log_mask(CPU_LOG_MMU, "%s: invalidate TLB %d start " 687 TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__, 688 (int)entry, tlb->EPN, end); 689 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) { 690 tlb_flush_page(cs, page); 691 } 692 } 693 } 694 695 void helper_4xx_tlbwe_lo(CPUPPCState *env, target_ulong entry, 696 target_ulong val) 697 { 698 ppcemb_tlb_t *tlb; 699 700 qemu_log_mask(CPU_LOG_MMU, "%s entry %i val " TARGET_FMT_lx "\n", 701 __func__, (int)entry, val); 702 entry &= PPC4XX_TLB_ENTRY_MASK; 703 tlb = &env->tlb.tlbe[entry]; 704 tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK; 705 tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK; 706 tlb->prot = PAGE_READ; 707 if (val & PPC4XX_TLBLO_EX) { 708 tlb->prot |= PAGE_EXEC; 709 } 710 if (val & PPC4XX_TLBLO_WR) { 711 tlb->prot |= PAGE_WRITE; 712 } 713 qemu_log_mask(CPU_LOG_MMU, "%s: set up TLB %d RPN " TARGET_FMT_plx 714 " EPN " TARGET_FMT_lx 715 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__, 716 (int)entry, tlb->RPN, tlb->EPN, tlb->size, 717 tlb->prot & PAGE_READ ? 'r' : '-', 718 tlb->prot & PAGE_WRITE ? 'w' : '-', 719 tlb->prot & PAGE_EXEC ? 'x' : '-', 720 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID); 721 722 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 723 } 724 725 target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address) 726 { 727 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]); 728 } 729 730 /* PowerPC 440 TLB management */ 731 void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry, 732 target_ulong value) 733 { 734 ppcemb_tlb_t *tlb; 735 target_ulong EPN, RPN, size; 736 int do_flush_tlbs; 737 738 qemu_log_mask(CPU_LOG_MMU, "%s word %d entry %d value " TARGET_FMT_lx "\n", 739 __func__, word, (int)entry, value); 740 do_flush_tlbs = 0; 741 entry &= 0x3F; 742 tlb = &env->tlb.tlbe[entry]; 743 switch (word) { 744 default: 745 /* Just here to please gcc */ 746 case 0: 747 EPN = value & 0xFFFFFC00; 748 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN) { 749 do_flush_tlbs = 1; 750 } 751 tlb->EPN = EPN; 752 size = booke_tlb_to_page_size((value >> 4) & 0xF); 753 if ((tlb->prot & PAGE_VALID) && tlb->size < size) { 754 do_flush_tlbs = 1; 755 } 756 tlb->size = size; 757 tlb->attr &= ~0x1; 758 tlb->attr |= (value >> 8) & 1; 759 if (value & 0x200) { 760 tlb->prot |= PAGE_VALID; 761 } else { 762 if (tlb->prot & PAGE_VALID) { 763 tlb->prot &= ~PAGE_VALID; 764 do_flush_tlbs = 1; 765 } 766 } 767 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF; 768 if (do_flush_tlbs) { 769 tlb_flush(env_cpu(env)); 770 } 771 break; 772 case 1: 773 RPN = value & 0xFFFFFC0F; 774 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN) { 775 tlb_flush(env_cpu(env)); 776 } 777 tlb->RPN = RPN; 778 break; 779 case 2: 780 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00); 781 tlb->prot = tlb->prot & PAGE_VALID; 782 if (value & 0x1) { 783 tlb->prot |= PAGE_READ << 4; 784 } 785 if (value & 0x2) { 786 tlb->prot |= PAGE_WRITE << 4; 787 } 788 if (value & 0x4) { 789 tlb->prot |= PAGE_EXEC << 4; 790 } 791 if (value & 0x8) { 792 tlb->prot |= PAGE_READ; 793 } 794 if (value & 0x10) { 795 tlb->prot |= PAGE_WRITE; 796 } 797 if (value & 0x20) { 798 tlb->prot |= PAGE_EXEC; 799 } 800 break; 801 } 802 } 803 804 target_ulong helper_440_tlbre(CPUPPCState *env, uint32_t word, 805 target_ulong entry) 806 { 807 ppcemb_tlb_t *tlb; 808 target_ulong ret; 809 int size; 810 811 entry &= 0x3F; 812 tlb = &env->tlb.tlbe[entry]; 813 switch (word) { 814 default: 815 /* Just here to please gcc */ 816 case 0: 817 ret = tlb->EPN; 818 size = booke_page_size_to_tlb(tlb->size); 819 if (size < 0 || size > 0xF) { 820 size = 1; 821 } 822 ret |= size << 4; 823 if (tlb->attr & 0x1) { 824 ret |= 0x100; 825 } 826 if (tlb->prot & PAGE_VALID) { 827 ret |= 0x200; 828 } 829 env->spr[SPR_440_MMUCR] &= ~0x000000FF; 830 env->spr[SPR_440_MMUCR] |= tlb->PID; 831 break; 832 case 1: 833 ret = tlb->RPN; 834 break; 835 case 2: 836 ret = tlb->attr & ~0x1; 837 if (tlb->prot & (PAGE_READ << 4)) { 838 ret |= 0x1; 839 } 840 if (tlb->prot & (PAGE_WRITE << 4)) { 841 ret |= 0x2; 842 } 843 if (tlb->prot & (PAGE_EXEC << 4)) { 844 ret |= 0x4; 845 } 846 if (tlb->prot & PAGE_READ) { 847 ret |= 0x8; 848 } 849 if (tlb->prot & PAGE_WRITE) { 850 ret |= 0x10; 851 } 852 if (tlb->prot & PAGE_EXEC) { 853 ret |= 0x20; 854 } 855 break; 856 } 857 return ret; 858 } 859 860 target_ulong helper_440_tlbsx(CPUPPCState *env, target_ulong address) 861 { 862 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF); 863 } 864 865 /* PowerPC BookE 2.06 TLB management */ 866 867 static ppcmas_tlb_t *booke206_cur_tlb(CPUPPCState *env) 868 { 869 uint32_t tlbncfg = 0; 870 int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT; 871 int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK); 872 int tlb; 873 874 tlb = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT; 875 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb]; 876 877 if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) { 878 cpu_abort(env_cpu(env), "we don't support HES yet\n"); 879 } 880 881 return booke206_get_tlbm(env, tlb, ea, esel); 882 } 883 884 void helper_booke_setpid(CPUPPCState *env, uint32_t pidn, target_ulong pid) 885 { 886 env->spr[pidn] = pid; 887 /* changing PIDs mean we're in a different address space now */ 888 tlb_flush(env_cpu(env)); 889 } 890 891 void helper_booke_set_eplc(CPUPPCState *env, target_ulong val) 892 { 893 env->spr[SPR_BOOKE_EPLC] = val & EPID_MASK; 894 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_LOAD); 895 } 896 void helper_booke_set_epsc(CPUPPCState *env, target_ulong val) 897 { 898 env->spr[SPR_BOOKE_EPSC] = val & EPID_MASK; 899 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_STORE); 900 } 901 902 static inline void flush_page(CPUPPCState *env, ppcmas_tlb_t *tlb) 903 { 904 if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) { 905 tlb_flush_page(env_cpu(env), tlb->mas2 & MAS2_EPN_MASK); 906 } else { 907 tlb_flush(env_cpu(env)); 908 } 909 } 910 911 void helper_booke206_tlbwe(CPUPPCState *env) 912 { 913 uint32_t tlbncfg, tlbn; 914 ppcmas_tlb_t *tlb; 915 uint32_t size_tlb, size_ps; 916 target_ulong mask; 917 918 919 switch (env->spr[SPR_BOOKE_MAS0] & MAS0_WQ_MASK) { 920 case MAS0_WQ_ALWAYS: 921 /* good to go, write that entry */ 922 break; 923 case MAS0_WQ_COND: 924 /* XXX check if reserved */ 925 if (0) { 926 return; 927 } 928 break; 929 case MAS0_WQ_CLR_RSRV: 930 /* XXX clear entry */ 931 return; 932 default: 933 /* no idea what to do */ 934 return; 935 } 936 937 if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) && 938 !FIELD_EX64(env->msr, MSR, GS)) { 939 /* XXX we don't support direct LRAT setting yet */ 940 fprintf(stderr, "cpu: don't support LRAT setting yet\n"); 941 return; 942 } 943 944 tlbn = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT; 945 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn]; 946 947 tlb = booke206_cur_tlb(env); 948 949 if (!tlb) { 950 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 951 POWERPC_EXCP_INVAL | 952 POWERPC_EXCP_INVAL_INVAL, GETPC()); 953 } 954 955 /* check that we support the targeted size */ 956 size_tlb = (env->spr[SPR_BOOKE_MAS1] & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 957 size_ps = booke206_tlbnps(env, tlbn); 958 if ((env->spr[SPR_BOOKE_MAS1] & MAS1_VALID) && (tlbncfg & TLBnCFG_AVAIL) && 959 !(size_ps & (1 << size_tlb))) { 960 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 961 POWERPC_EXCP_INVAL | 962 POWERPC_EXCP_INVAL_INVAL, GETPC()); 963 } 964 965 if (FIELD_EX64(env->msr, MSR, GS)) { 966 cpu_abort(env_cpu(env), "missing HV implementation\n"); 967 } 968 969 if (tlb->mas1 & MAS1_VALID) { 970 /* 971 * Invalidate the page in QEMU TLB if it was a valid entry. 972 * 973 * In "PowerPC e500 Core Family Reference Manual, Rev. 1", 974 * Section "12.4.2 TLB Write Entry (tlbwe) Instruction": 975 * (https://www.nxp.com/docs/en/reference-manual/E500CORERM.pdf) 976 * 977 * "Note that when an L2 TLB entry is written, it may be displacing an 978 * already valid entry in the same L2 TLB location (a victim). If a 979 * valid L1 TLB entry corresponds to the L2 MMU victim entry, that L1 980 * TLB entry is automatically invalidated." 981 */ 982 flush_page(env, tlb); 983 } 984 985 tlb->mas7_3 = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) | 986 env->spr[SPR_BOOKE_MAS3]; 987 tlb->mas1 = env->spr[SPR_BOOKE_MAS1]; 988 989 if ((env->spr[SPR_MMUCFG] & MMUCFG_MAVN) == MMUCFG_MAVN_V2) { 990 /* For TLB which has a fixed size TSIZE is ignored with MAV2 */ 991 booke206_fixed_size_tlbn(env, tlbn, tlb); 992 } else { 993 if (!(tlbncfg & TLBnCFG_AVAIL)) { 994 /* force !AVAIL TLB entries to correct page size */ 995 tlb->mas1 &= ~MAS1_TSIZE_MASK; 996 /* XXX can be configured in MMUCSR0 */ 997 tlb->mas1 |= (tlbncfg & TLBnCFG_MINSIZE) >> 12; 998 } 999 } 1000 1001 /* Make a mask from TLB size to discard invalid bits in EPN field */ 1002 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); 1003 /* Add a mask for page attributes */ 1004 mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E; 1005 1006 if (!FIELD_EX64(env->msr, MSR, CM)) { 1007 /* 1008 * Executing a tlbwe instruction in 32-bit mode will set bits 1009 * 0:31 of the TLB EPN field to zero. 1010 */ 1011 mask &= 0xffffffff; 1012 } 1013 1014 tlb->mas2 = env->spr[SPR_BOOKE_MAS2] & mask; 1015 1016 if (!(tlbncfg & TLBnCFG_IPROT)) { 1017 /* no IPROT supported by TLB */ 1018 tlb->mas1 &= ~MAS1_IPROT; 1019 } 1020 1021 flush_page(env, tlb); 1022 } 1023 1024 static inline void booke206_tlb_to_mas(CPUPPCState *env, ppcmas_tlb_t *tlb) 1025 { 1026 int tlbn = booke206_tlbm_to_tlbn(env, tlb); 1027 int way = booke206_tlbm_to_way(env, tlb); 1028 1029 env->spr[SPR_BOOKE_MAS0] = tlbn << MAS0_TLBSEL_SHIFT; 1030 env->spr[SPR_BOOKE_MAS0] |= way << MAS0_ESEL_SHIFT; 1031 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; 1032 1033 env->spr[SPR_BOOKE_MAS1] = tlb->mas1; 1034 env->spr[SPR_BOOKE_MAS2] = tlb->mas2; 1035 env->spr[SPR_BOOKE_MAS3] = tlb->mas7_3; 1036 env->spr[SPR_BOOKE_MAS7] = tlb->mas7_3 >> 32; 1037 } 1038 1039 void helper_booke206_tlbre(CPUPPCState *env) 1040 { 1041 ppcmas_tlb_t *tlb = NULL; 1042 1043 tlb = booke206_cur_tlb(env); 1044 if (!tlb) { 1045 env->spr[SPR_BOOKE_MAS1] = 0; 1046 } else { 1047 booke206_tlb_to_mas(env, tlb); 1048 } 1049 } 1050 1051 void helper_booke206_tlbsx(CPUPPCState *env, target_ulong address) 1052 { 1053 ppcmas_tlb_t *tlb = NULL; 1054 int i, j; 1055 hwaddr raddr; 1056 uint32_t spid, sas; 1057 1058 spid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID_MASK) >> MAS6_SPID_SHIFT; 1059 sas = env->spr[SPR_BOOKE_MAS6] & MAS6_SAS; 1060 1061 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 1062 int ways = booke206_tlb_ways(env, i); 1063 1064 for (j = 0; j < ways; j++) { 1065 tlb = booke206_get_tlbm(env, i, address, j); 1066 1067 if (!tlb) { 1068 continue; 1069 } 1070 1071 if (ppcmas_tlb_check(env, tlb, &raddr, address, spid)) { 1072 continue; 1073 } 1074 1075 if (sas != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { 1076 continue; 1077 } 1078 1079 booke206_tlb_to_mas(env, tlb); 1080 return; 1081 } 1082 } 1083 1084 /* no entry found, fill with defaults */ 1085 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; 1086 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; 1087 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; 1088 env->spr[SPR_BOOKE_MAS3] = 0; 1089 env->spr[SPR_BOOKE_MAS7] = 0; 1090 1091 if (env->spr[SPR_BOOKE_MAS6] & MAS6_SAS) { 1092 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; 1093 } 1094 1095 env->spr[SPR_BOOKE_MAS1] |= (env->spr[SPR_BOOKE_MAS6] >> 16) 1096 << MAS1_TID_SHIFT; 1097 1098 /* next victim logic */ 1099 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; 1100 env->last_way++; 1101 env->last_way &= booke206_tlb_ways(env, 0) - 1; 1102 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; 1103 } 1104 1105 static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn, 1106 vaddr ea) 1107 { 1108 int i; 1109 int ways = booke206_tlb_ways(env, tlbn); 1110 target_ulong mask; 1111 1112 for (i = 0; i < ways; i++) { 1113 ppcmas_tlb_t *tlb = booke206_get_tlbm(env, tlbn, ea, i); 1114 if (!tlb) { 1115 continue; 1116 } 1117 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); 1118 if (((tlb->mas2 & MAS2_EPN_MASK) == (ea & mask)) && 1119 !(tlb->mas1 & MAS1_IPROT)) { 1120 tlb->mas1 &= ~MAS1_VALID; 1121 } 1122 } 1123 } 1124 1125 void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address) 1126 { 1127 CPUState *cs; 1128 1129 if (address & 0x4) { 1130 /* flush all entries */ 1131 if (address & 0x8) { 1132 /* flush all of TLB1 */ 1133 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB1, 1); 1134 } else { 1135 /* flush all of TLB0 */ 1136 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB0, 0); 1137 } 1138 return; 1139 } 1140 1141 if (address & 0x8) { 1142 /* flush TLB1 entries */ 1143 booke206_invalidate_ea_tlb(env, 1, address); 1144 CPU_FOREACH(cs) { 1145 tlb_flush(cs); 1146 } 1147 } else { 1148 /* flush TLB0 entries */ 1149 booke206_invalidate_ea_tlb(env, 0, address); 1150 CPU_FOREACH(cs) { 1151 tlb_flush_page(cs, address & MAS2_EPN_MASK); 1152 } 1153 } 1154 } 1155 1156 void helper_booke206_tlbilx0(CPUPPCState *env, target_ulong address) 1157 { 1158 /* XXX missing LPID handling */ 1159 booke206_flush_tlb(env, -1, 1); 1160 } 1161 1162 void helper_booke206_tlbilx1(CPUPPCState *env, target_ulong address) 1163 { 1164 int i, j; 1165 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID); 1166 ppcmas_tlb_t *tlb = env->tlb.tlbm; 1167 int tlb_size; 1168 1169 /* XXX missing LPID handling */ 1170 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 1171 tlb_size = booke206_tlb_size(env, i); 1172 for (j = 0; j < tlb_size; j++) { 1173 if (!(tlb[j].mas1 & MAS1_IPROT) && 1174 ((tlb[j].mas1 & MAS1_TID_MASK) == tid)) { 1175 tlb[j].mas1 &= ~MAS1_VALID; 1176 } 1177 } 1178 tlb += booke206_tlb_size(env, i); 1179 } 1180 tlb_flush(env_cpu(env)); 1181 } 1182 1183 void helper_booke206_tlbilx3(CPUPPCState *env, target_ulong address) 1184 { 1185 int i, j; 1186 ppcmas_tlb_t *tlb; 1187 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID); 1188 int pid = tid >> MAS6_SPID_SHIFT; 1189 int sgs = env->spr[SPR_BOOKE_MAS5] & MAS5_SGS; 1190 int ind = (env->spr[SPR_BOOKE_MAS6] & MAS6_SIND) ? MAS1_IND : 0; 1191 /* XXX check for unsupported isize and raise an invalid opcode then */ 1192 int size = env->spr[SPR_BOOKE_MAS6] & MAS6_ISIZE_MASK; 1193 /* XXX implement MAV2 handling */ 1194 bool mav2 = false; 1195 1196 /* XXX missing LPID handling */ 1197 /* flush by pid and ea */ 1198 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 1199 int ways = booke206_tlb_ways(env, i); 1200 1201 for (j = 0; j < ways; j++) { 1202 tlb = booke206_get_tlbm(env, i, address, j); 1203 if (!tlb) { 1204 continue; 1205 } 1206 if ((ppcmas_tlb_check(env, tlb, NULL, address, pid) != 0) || 1207 (tlb->mas1 & MAS1_IPROT) || 1208 ((tlb->mas1 & MAS1_IND) != ind) || 1209 ((tlb->mas8 & MAS8_TGS) != sgs)) { 1210 continue; 1211 } 1212 if (mav2 && ((tlb->mas1 & MAS1_TSIZE_MASK) != size)) { 1213 /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */ 1214 continue; 1215 } 1216 /* XXX e500mc doesn't match SAS, but other cores might */ 1217 tlb->mas1 &= ~MAS1_VALID; 1218 } 1219 } 1220 tlb_flush(env_cpu(env)); 1221 } 1222 1223 void helper_booke206_tlbflush(CPUPPCState *env, target_ulong type) 1224 { 1225 int flags = 0; 1226 1227 if (type & 2) { 1228 flags |= BOOKE206_FLUSH_TLB1; 1229 } 1230 1231 if (type & 4) { 1232 flags |= BOOKE206_FLUSH_TLB0; 1233 } 1234 1235 booke206_flush_tlb(env, flags, 1); 1236 } 1237 1238 1239 void helper_check_tlb_flush_local(CPUPPCState *env) 1240 { 1241 check_tlb_flush(env, false); 1242 } 1243 1244 void helper_check_tlb_flush_global(CPUPPCState *env) 1245 { 1246 check_tlb_flush(env, true); 1247 } 1248 1249 1250 bool ppc_cpu_tlb_fill(CPUState *cs, vaddr eaddr, int size, 1251 MMUAccessType access_type, int mmu_idx, 1252 bool probe, uintptr_t retaddr) 1253 { 1254 PowerPCCPU *cpu = POWERPC_CPU(cs); 1255 hwaddr raddr; 1256 int page_size, prot; 1257 1258 if (ppc_xlate(cpu, eaddr, access_type, &raddr, 1259 &page_size, &prot, mmu_idx, !probe)) { 1260 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, 1261 prot, mmu_idx, 1UL << page_size); 1262 return true; 1263 } 1264 if (probe) { 1265 return false; 1266 } 1267 raise_exception_err_ra(&cpu->env, cs->exception_index, 1268 cpu->env.error_code, retaddr); 1269 } 1270