1 /* 2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * Copyright (c) 2013 David Gibson, IBM Corporation 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "exec/helper-proto.h" 25 #include "qemu/error-report.h" 26 #include "qemu/qemu-print.h" 27 #include "sysemu/hw_accel.h" 28 #include "kvm_ppc.h" 29 #include "mmu-hash64.h" 30 #include "exec/log.h" 31 #include "hw/hw.h" 32 #include "internal.h" 33 #include "mmu-book3s-v3.h" 34 #include "helper_regs.h" 35 36 /* #define DEBUG_SLB */ 37 38 #ifdef DEBUG_SLB 39 # define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) 40 #else 41 # define LOG_SLB(...) do { } while (0) 42 #endif 43 44 /* 45 * SLB handling 46 */ 47 48 static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr) 49 { 50 CPUPPCState *env = &cpu->env; 51 uint64_t esid_256M, esid_1T; 52 int n; 53 54 LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr); 55 56 esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V; 57 esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V; 58 59 for (n = 0; n < cpu->hash64_opts->slb_size; n++) { 60 ppc_slb_t *slb = &env->slb[n]; 61 62 LOG_SLB("%s: slot %d %016" PRIx64 " %016" 63 PRIx64 "\n", __func__, n, slb->esid, slb->vsid); 64 /* 65 * We check for 1T matches on all MMUs here - if the MMU 66 * doesn't have 1T segment support, we will have prevented 1T 67 * entries from being inserted in the slbmte code. 68 */ 69 if (((slb->esid == esid_256M) && 70 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M)) 71 || ((slb->esid == esid_1T) && 72 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) { 73 return slb; 74 } 75 } 76 77 return NULL; 78 } 79 80 void dump_slb(PowerPCCPU *cpu) 81 { 82 CPUPPCState *env = &cpu->env; 83 int i; 84 uint64_t slbe, slbv; 85 86 cpu_synchronize_state(CPU(cpu)); 87 88 qemu_printf("SLB\tESID\t\t\tVSID\n"); 89 for (i = 0; i < cpu->hash64_opts->slb_size; i++) { 90 slbe = env->slb[i].esid; 91 slbv = env->slb[i].vsid; 92 if (slbe == 0 && slbv == 0) { 93 continue; 94 } 95 qemu_printf("%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n", 96 i, slbe, slbv); 97 } 98 } 99 100 void helper_slbia(CPUPPCState *env, uint32_t ih) 101 { 102 PowerPCCPU *cpu = env_archcpu(env); 103 int starting_entry; 104 int n; 105 106 /* 107 * slbia must always flush all TLB (which is equivalent to ERAT in ppc 108 * architecture). Matching on SLB_ESID_V is not good enough, because slbmte 109 * can overwrite a valid SLB without flushing its lookaside information. 110 * 111 * It would be possible to keep the TLB in synch with the SLB by flushing 112 * when a valid entry is overwritten by slbmte, and therefore slbia would 113 * not have to flush unless it evicts a valid SLB entry. However it is 114 * expected that slbmte is more common than slbia, and slbia is usually 115 * going to evict valid SLB entries, so that tradeoff is unlikely to be a 116 * good one. 117 * 118 * ISA v2.05 introduced IH field with values 0,1,2,6. These all invalidate 119 * the same SLB entries (everything but entry 0), but differ in what 120 * "lookaside information" is invalidated. TCG can ignore this and flush 121 * everything. 122 * 123 * ISA v3.0 introduced additional values 3,4,7, which change what SLBs are 124 * invalidated. 125 */ 126 127 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 128 129 starting_entry = 1; /* default for IH=0,1,2,6 */ 130 131 if (env->mmu_model == POWERPC_MMU_3_00) { 132 switch (ih) { 133 case 0x7: 134 /* invalidate no SLBs, but all lookaside information */ 135 return; 136 137 case 0x3: 138 case 0x4: 139 /* also considers SLB entry 0 */ 140 starting_entry = 0; 141 break; 142 143 case 0x5: 144 /* treat undefined values as ih==0, and warn */ 145 qemu_log_mask(LOG_GUEST_ERROR, 146 "slbia undefined IH field %u.\n", ih); 147 break; 148 149 default: 150 /* 0,1,2,6 */ 151 break; 152 } 153 } 154 155 for (n = starting_entry; n < cpu->hash64_opts->slb_size; n++) { 156 ppc_slb_t *slb = &env->slb[n]; 157 158 if (!(slb->esid & SLB_ESID_V)) { 159 continue; 160 } 161 if (env->mmu_model == POWERPC_MMU_3_00) { 162 if (ih == 0x3 && (slb->vsid & SLB_VSID_C) == 0) { 163 /* preserves entries with a class value of 0 */ 164 continue; 165 } 166 } 167 168 slb->esid &= ~SLB_ESID_V; 169 } 170 } 171 172 static void __helper_slbie(CPUPPCState *env, target_ulong addr, 173 target_ulong global) 174 { 175 PowerPCCPU *cpu = env_archcpu(env); 176 ppc_slb_t *slb; 177 178 slb = slb_lookup(cpu, addr); 179 if (!slb) { 180 return; 181 } 182 183 if (slb->esid & SLB_ESID_V) { 184 slb->esid &= ~SLB_ESID_V; 185 186 /* 187 * XXX: given the fact that segment size is 256 MB or 1TB, 188 * and we still don't have a tlb_flush_mask(env, n, mask) 189 * in QEMU, we just invalidate all TLBs 190 */ 191 env->tlb_need_flush |= 192 (global == false ? TLB_NEED_LOCAL_FLUSH : TLB_NEED_GLOBAL_FLUSH); 193 } 194 } 195 196 void helper_slbie(CPUPPCState *env, target_ulong addr) 197 { 198 __helper_slbie(env, addr, false); 199 } 200 201 void helper_slbieg(CPUPPCState *env, target_ulong addr) 202 { 203 __helper_slbie(env, addr, true); 204 } 205 206 int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot, 207 target_ulong esid, target_ulong vsid) 208 { 209 CPUPPCState *env = &cpu->env; 210 ppc_slb_t *slb = &env->slb[slot]; 211 const PPCHash64SegmentPageSizes *sps = NULL; 212 int i; 213 214 if (slot >= cpu->hash64_opts->slb_size) { 215 return -1; /* Bad slot number */ 216 } 217 if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) { 218 return -1; /* Reserved bits set */ 219 } 220 if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) { 221 return -1; /* Bad segment size */ 222 } 223 if ((vsid & SLB_VSID_B) && !(ppc_hash64_has(cpu, PPC_HASH64_1TSEG))) { 224 return -1; /* 1T segment on MMU that doesn't support it */ 225 } 226 227 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 228 const PPCHash64SegmentPageSizes *sps1 = &cpu->hash64_opts->sps[i]; 229 230 if (!sps1->page_shift) { 231 break; 232 } 233 234 if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) { 235 sps = sps1; 236 break; 237 } 238 } 239 240 if (!sps) { 241 error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu 242 " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx, 243 slot, esid, vsid); 244 return -1; 245 } 246 247 slb->esid = esid; 248 slb->vsid = vsid; 249 slb->sps = sps; 250 251 LOG_SLB("%s: " TARGET_FMT_lu " " TARGET_FMT_lx " - " TARGET_FMT_lx 252 " => %016" PRIx64 " %016" PRIx64 "\n", __func__, slot, esid, vsid, 253 slb->esid, slb->vsid); 254 255 return 0; 256 } 257 258 static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb, 259 target_ulong *rt) 260 { 261 CPUPPCState *env = &cpu->env; 262 int slot = rb & 0xfff; 263 ppc_slb_t *slb = &env->slb[slot]; 264 265 if (slot >= cpu->hash64_opts->slb_size) { 266 return -1; 267 } 268 269 *rt = slb->esid; 270 return 0; 271 } 272 273 static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb, 274 target_ulong *rt) 275 { 276 CPUPPCState *env = &cpu->env; 277 int slot = rb & 0xfff; 278 ppc_slb_t *slb = &env->slb[slot]; 279 280 if (slot >= cpu->hash64_opts->slb_size) { 281 return -1; 282 } 283 284 *rt = slb->vsid; 285 return 0; 286 } 287 288 static int ppc_find_slb_vsid(PowerPCCPU *cpu, target_ulong rb, 289 target_ulong *rt) 290 { 291 CPUPPCState *env = &cpu->env; 292 ppc_slb_t *slb; 293 294 if (!msr_is_64bit(env, env->msr)) { 295 rb &= 0xffffffff; 296 } 297 slb = slb_lookup(cpu, rb); 298 if (slb == NULL) { 299 *rt = (target_ulong)-1ul; 300 } else { 301 *rt = slb->vsid; 302 } 303 return 0; 304 } 305 306 void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs) 307 { 308 PowerPCCPU *cpu = env_archcpu(env); 309 310 if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) { 311 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 312 POWERPC_EXCP_INVAL, GETPC()); 313 } 314 } 315 316 target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb) 317 { 318 PowerPCCPU *cpu = env_archcpu(env); 319 target_ulong rt = 0; 320 321 if (ppc_load_slb_esid(cpu, rb, &rt) < 0) { 322 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 323 POWERPC_EXCP_INVAL, GETPC()); 324 } 325 return rt; 326 } 327 328 target_ulong helper_find_slb_vsid(CPUPPCState *env, target_ulong rb) 329 { 330 PowerPCCPU *cpu = env_archcpu(env); 331 target_ulong rt = 0; 332 333 if (ppc_find_slb_vsid(cpu, rb, &rt) < 0) { 334 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 335 POWERPC_EXCP_INVAL, GETPC()); 336 } 337 return rt; 338 } 339 340 target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb) 341 { 342 PowerPCCPU *cpu = env_archcpu(env); 343 target_ulong rt = 0; 344 345 if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) { 346 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 347 POWERPC_EXCP_INVAL, GETPC()); 348 } 349 return rt; 350 } 351 352 /* Check No-Execute or Guarded Storage */ 353 static inline int ppc_hash64_pte_noexec_guard(PowerPCCPU *cpu, 354 ppc_hash_pte64_t pte) 355 { 356 /* Exec permissions CANNOT take away read or write permissions */ 357 return (pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G) ? 358 PAGE_READ | PAGE_WRITE : PAGE_READ | PAGE_WRITE | PAGE_EXEC; 359 } 360 361 /* Check Basic Storage Protection */ 362 static int ppc_hash64_pte_prot(PowerPCCPU *cpu, 363 ppc_slb_t *slb, ppc_hash_pte64_t pte) 364 { 365 CPUPPCState *env = &cpu->env; 366 unsigned pp, key; 367 /* 368 * Some pp bit combinations have undefined behaviour, so default 369 * to no access in those cases 370 */ 371 int prot = 0; 372 373 key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP) 374 : (slb->vsid & SLB_VSID_KS)); 375 pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61); 376 377 if (key == 0) { 378 switch (pp) { 379 case 0x0: 380 case 0x1: 381 case 0x2: 382 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 383 break; 384 385 case 0x3: 386 case 0x6: 387 prot = PAGE_READ | PAGE_EXEC; 388 break; 389 } 390 } else { 391 switch (pp) { 392 case 0x0: 393 case 0x6: 394 break; 395 396 case 0x1: 397 case 0x3: 398 prot = PAGE_READ | PAGE_EXEC; 399 break; 400 401 case 0x2: 402 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 403 break; 404 } 405 } 406 407 return prot; 408 } 409 410 /* Check the instruction access permissions specified in the IAMR */ 411 static int ppc_hash64_iamr_prot(PowerPCCPU *cpu, int key) 412 { 413 CPUPPCState *env = &cpu->env; 414 int iamr_bits = (env->spr[SPR_IAMR] >> 2 * (31 - key)) & 0x3; 415 416 /* 417 * An instruction fetch is permitted if the IAMR bit is 0. 418 * If the bit is set, return PAGE_READ | PAGE_WRITE because this bit 419 * can only take away EXEC permissions not READ or WRITE permissions. 420 * If bit is cleared return PAGE_READ | PAGE_WRITE | PAGE_EXEC since 421 * EXEC permissions are allowed. 422 */ 423 return (iamr_bits & 0x1) ? PAGE_READ | PAGE_WRITE : 424 PAGE_READ | PAGE_WRITE | PAGE_EXEC; 425 } 426 427 static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte) 428 { 429 CPUPPCState *env = &cpu->env; 430 int key, amrbits; 431 int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 432 433 /* Only recent MMUs implement Virtual Page Class Key Protection */ 434 if (!ppc_hash64_has(cpu, PPC_HASH64_AMR)) { 435 return prot; 436 } 437 438 key = HPTE64_R_KEY(pte.pte1); 439 amrbits = (env->spr[SPR_AMR] >> 2 * (31 - key)) & 0x3; 440 441 /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */ 442 /* env->spr[SPR_AMR]); */ 443 444 /* 445 * A store is permitted if the AMR bit is 0. Remove write 446 * protection if it is set. 447 */ 448 if (amrbits & 0x2) { 449 prot &= ~PAGE_WRITE; 450 } 451 /* 452 * A load is permitted if the AMR bit is 0. Remove read 453 * protection if it is set. 454 */ 455 if (amrbits & 0x1) { 456 prot &= ~PAGE_READ; 457 } 458 459 switch (env->mmu_model) { 460 /* 461 * MMU version 2.07 and later support IAMR 462 * Check if the IAMR allows the instruction access - it will return 463 * PAGE_EXEC if it doesn't (and thus that bit will be cleared) or 0 464 * if it does (and prot will be unchanged indicating execution support). 465 */ 466 case POWERPC_MMU_2_07: 467 case POWERPC_MMU_3_00: 468 prot &= ppc_hash64_iamr_prot(cpu, key); 469 break; 470 default: 471 break; 472 } 473 474 return prot; 475 } 476 477 const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu, 478 hwaddr ptex, int n) 479 { 480 hwaddr pte_offset = ptex * HASH_PTE_SIZE_64; 481 hwaddr base; 482 hwaddr plen = n * HASH_PTE_SIZE_64; 483 const ppc_hash_pte64_t *hptes; 484 485 if (cpu->vhyp) { 486 PPCVirtualHypervisorClass *vhc = 487 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 488 return vhc->map_hptes(cpu->vhyp, ptex, n); 489 } 490 base = ppc_hash64_hpt_base(cpu); 491 492 if (!base) { 493 return NULL; 494 } 495 496 hptes = address_space_map(CPU(cpu)->as, base + pte_offset, &plen, false, 497 MEMTXATTRS_UNSPECIFIED); 498 if (plen < (n * HASH_PTE_SIZE_64)) { 499 hw_error("%s: Unable to map all requested HPTEs\n", __func__); 500 } 501 return hptes; 502 } 503 504 void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes, 505 hwaddr ptex, int n) 506 { 507 if (cpu->vhyp) { 508 PPCVirtualHypervisorClass *vhc = 509 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 510 vhc->unmap_hptes(cpu->vhyp, hptes, ptex, n); 511 return; 512 } 513 514 address_space_unmap(CPU(cpu)->as, (void *)hptes, n * HASH_PTE_SIZE_64, 515 false, n * HASH_PTE_SIZE_64); 516 } 517 518 static unsigned hpte_page_shift(const PPCHash64SegmentPageSizes *sps, 519 uint64_t pte0, uint64_t pte1) 520 { 521 int i; 522 523 if (!(pte0 & HPTE64_V_LARGE)) { 524 if (sps->page_shift != 12) { 525 /* 4kiB page in a non 4kiB segment */ 526 return 0; 527 } 528 /* Normal 4kiB page */ 529 return 12; 530 } 531 532 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 533 const PPCHash64PageSize *ps = &sps->enc[i]; 534 uint64_t mask; 535 536 if (!ps->page_shift) { 537 break; 538 } 539 540 if (ps->page_shift == 12) { 541 /* L bit is set so this can't be a 4kiB page */ 542 continue; 543 } 544 545 mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN; 546 547 if ((pte1 & mask) == ((uint64_t)ps->pte_enc << HPTE64_R_RPN_SHIFT)) { 548 return ps->page_shift; 549 } 550 } 551 552 return 0; /* Bad page size encoding */ 553 } 554 555 static void ppc64_v3_new_to_old_hpte(target_ulong *pte0, target_ulong *pte1) 556 { 557 /* Insert B into pte0 */ 558 *pte0 = (*pte0 & HPTE64_V_COMMON_BITS) | 559 ((*pte1 & HPTE64_R_3_0_SSIZE_MASK) << 560 (HPTE64_V_SSIZE_SHIFT - HPTE64_R_3_0_SSIZE_SHIFT)); 561 562 /* Remove B from pte1 */ 563 *pte1 = *pte1 & ~HPTE64_R_3_0_SSIZE_MASK; 564 } 565 566 567 static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash, 568 const PPCHash64SegmentPageSizes *sps, 569 target_ulong ptem, 570 ppc_hash_pte64_t *pte, unsigned *pshift) 571 { 572 int i; 573 const ppc_hash_pte64_t *pteg; 574 target_ulong pte0, pte1; 575 target_ulong ptex; 576 577 ptex = (hash & ppc_hash64_hpt_mask(cpu)) * HPTES_PER_GROUP; 578 pteg = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP); 579 if (!pteg) { 580 return -1; 581 } 582 for (i = 0; i < HPTES_PER_GROUP; i++) { 583 pte0 = ppc_hash64_hpte0(cpu, pteg, i); 584 /* 585 * pte0 contains the valid bit and must be read before pte1, 586 * otherwise we might see an old pte1 with a new valid bit and 587 * thus an inconsistent hpte value 588 */ 589 smp_rmb(); 590 pte1 = ppc_hash64_hpte1(cpu, pteg, i); 591 592 /* Convert format if necessary */ 593 if (cpu->env.mmu_model == POWERPC_MMU_3_00 && !cpu->vhyp) { 594 ppc64_v3_new_to_old_hpte(&pte0, &pte1); 595 } 596 597 /* This compares V, B, H (secondary) and the AVPN */ 598 if (HPTE64_V_COMPARE(pte0, ptem)) { 599 *pshift = hpte_page_shift(sps, pte0, pte1); 600 /* 601 * If there is no match, ignore the PTE, it could simply 602 * be for a different segment size encoding and the 603 * architecture specifies we should not match. Linux will 604 * potentially leave behind PTEs for the wrong base page 605 * size when demoting segments. 606 */ 607 if (*pshift == 0) { 608 continue; 609 } 610 /* 611 * We don't do anything with pshift yet as qemu TLB only 612 * deals with 4K pages anyway 613 */ 614 pte->pte0 = pte0; 615 pte->pte1 = pte1; 616 ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP); 617 return ptex + i; 618 } 619 } 620 ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP); 621 /* 622 * We didn't find a valid entry. 623 */ 624 return -1; 625 } 626 627 static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu, 628 ppc_slb_t *slb, target_ulong eaddr, 629 ppc_hash_pte64_t *pte, unsigned *pshift) 630 { 631 CPUPPCState *env = &cpu->env; 632 hwaddr hash, ptex; 633 uint64_t vsid, epnmask, epn, ptem; 634 const PPCHash64SegmentPageSizes *sps = slb->sps; 635 636 /* 637 * The SLB store path should prevent any bad page size encodings 638 * getting in there, so: 639 */ 640 assert(sps); 641 642 /* If ISL is set in LPCR we need to clamp the page size to 4K */ 643 if (env->spr[SPR_LPCR] & LPCR_ISL) { 644 /* We assume that when using TCG, 4k is first entry of SPS */ 645 sps = &cpu->hash64_opts->sps[0]; 646 assert(sps->page_shift == 12); 647 } 648 649 epnmask = ~((1ULL << sps->page_shift) - 1); 650 651 if (slb->vsid & SLB_VSID_B) { 652 /* 1TB segment */ 653 vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T; 654 epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask; 655 hash = vsid ^ (vsid << 25) ^ (epn >> sps->page_shift); 656 } else { 657 /* 256M segment */ 658 vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT; 659 epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask; 660 hash = vsid ^ (epn >> sps->page_shift); 661 } 662 ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN); 663 ptem |= HPTE64_V_VALID; 664 665 /* Page address translation */ 666 qemu_log_mask(CPU_LOG_MMU, 667 "htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx 668 " hash " TARGET_FMT_plx "\n", 669 ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), hash); 670 671 /* Primary PTEG lookup */ 672 qemu_log_mask(CPU_LOG_MMU, 673 "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx 674 " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx 675 " hash=" TARGET_FMT_plx "\n", 676 ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), 677 vsid, ptem, hash); 678 ptex = ppc_hash64_pteg_search(cpu, hash, sps, ptem, pte, pshift); 679 680 if (ptex == -1) { 681 /* Secondary PTEG lookup */ 682 ptem |= HPTE64_V_SECONDARY; 683 qemu_log_mask(CPU_LOG_MMU, 684 "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx 685 " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx 686 " hash=" TARGET_FMT_plx "\n", ppc_hash64_hpt_base(cpu), 687 ppc_hash64_hpt_mask(cpu), vsid, ptem, ~hash); 688 689 ptex = ppc_hash64_pteg_search(cpu, ~hash, sps, ptem, pte, pshift); 690 } 691 692 return ptex; 693 } 694 695 unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu, 696 uint64_t pte0, uint64_t pte1) 697 { 698 int i; 699 700 if (!(pte0 & HPTE64_V_LARGE)) { 701 return 12; 702 } 703 704 /* 705 * The encodings in env->sps need to be carefully chosen so that 706 * this gives an unambiguous result. 707 */ 708 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 709 const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i]; 710 unsigned shift; 711 712 if (!sps->page_shift) { 713 break; 714 } 715 716 shift = hpte_page_shift(sps, pte0, pte1); 717 if (shift) { 718 return shift; 719 } 720 } 721 722 return 0; 723 } 724 725 static bool ppc_hash64_use_vrma(CPUPPCState *env) 726 { 727 switch (env->mmu_model) { 728 case POWERPC_MMU_3_00: 729 /* 730 * ISAv3.0 (POWER9) always uses VRMA, the VPM0 field and RMOR 731 * register no longer exist 732 */ 733 return true; 734 735 default: 736 return !!(env->spr[SPR_LPCR] & LPCR_VPM0); 737 } 738 } 739 740 static void ppc_hash64_set_isi(CPUState *cs, uint64_t error_code) 741 { 742 CPUPPCState *env = &POWERPC_CPU(cs)->env; 743 bool vpm; 744 745 if (msr_ir) { 746 vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1); 747 } else { 748 vpm = ppc_hash64_use_vrma(env); 749 } 750 if (vpm && !msr_hv) { 751 cs->exception_index = POWERPC_EXCP_HISI; 752 } else { 753 cs->exception_index = POWERPC_EXCP_ISI; 754 } 755 env->error_code = error_code; 756 } 757 758 static void ppc_hash64_set_dsi(CPUState *cs, uint64_t dar, uint64_t dsisr) 759 { 760 CPUPPCState *env = &POWERPC_CPU(cs)->env; 761 bool vpm; 762 763 if (msr_dr) { 764 vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1); 765 } else { 766 vpm = ppc_hash64_use_vrma(env); 767 } 768 if (vpm && !msr_hv) { 769 cs->exception_index = POWERPC_EXCP_HDSI; 770 env->spr[SPR_HDAR] = dar; 771 env->spr[SPR_HDSISR] = dsisr; 772 } else { 773 cs->exception_index = POWERPC_EXCP_DSI; 774 env->spr[SPR_DAR] = dar; 775 env->spr[SPR_DSISR] = dsisr; 776 } 777 env->error_code = 0; 778 } 779 780 781 static void ppc_hash64_set_r(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1) 782 { 783 hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 16; 784 785 if (cpu->vhyp) { 786 PPCVirtualHypervisorClass *vhc = 787 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 788 vhc->hpte_set_r(cpu->vhyp, ptex, pte1); 789 return; 790 } 791 base = ppc_hash64_hpt_base(cpu); 792 793 794 /* The HW performs a non-atomic byte update */ 795 stb_phys(CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01); 796 } 797 798 static void ppc_hash64_set_c(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1) 799 { 800 hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 15; 801 802 if (cpu->vhyp) { 803 PPCVirtualHypervisorClass *vhc = 804 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 805 vhc->hpte_set_c(cpu->vhyp, ptex, pte1); 806 return; 807 } 808 base = ppc_hash64_hpt_base(cpu); 809 810 /* The HW performs a non-atomic byte update */ 811 stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80); 812 } 813 814 static target_ulong rmls_limit(PowerPCCPU *cpu) 815 { 816 CPUPPCState *env = &cpu->env; 817 /* 818 * In theory the meanings of RMLS values are implementation 819 * dependent. In practice, this seems to have been the set from 820 * POWER4+..POWER8, and RMLS is no longer supported in POWER9. 821 * 822 * Unsupported values mean the OS has shot itself in the 823 * foot. Return a 0-sized RMA in this case, which we expect 824 * to trigger an immediate DSI or ISI 825 */ 826 static const target_ulong rma_sizes[16] = { 827 [0] = 256 * GiB, 828 [1] = 16 * GiB, 829 [2] = 1 * GiB, 830 [3] = 64 * MiB, 831 [4] = 256 * MiB, 832 [7] = 128 * MiB, 833 [8] = 32 * MiB, 834 }; 835 target_ulong rmls = (env->spr[SPR_LPCR] & LPCR_RMLS) >> LPCR_RMLS_SHIFT; 836 837 return rma_sizes[rmls]; 838 } 839 840 static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb) 841 { 842 CPUPPCState *env = &cpu->env; 843 target_ulong lpcr = env->spr[SPR_LPCR]; 844 uint32_t vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT; 845 target_ulong vsid = SLB_VSID_VRMA | ((vrmasd << 4) & SLB_VSID_LLP_MASK); 846 int i; 847 848 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 849 const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i]; 850 851 if (!sps->page_shift) { 852 break; 853 } 854 855 if ((vsid & SLB_VSID_LLP_MASK) == sps->slb_enc) { 856 slb->esid = SLB_ESID_V; 857 slb->vsid = vsid; 858 slb->sps = sps; 859 return 0; 860 } 861 } 862 863 error_report("Bad page size encoding in LPCR[VRMASD]; LPCR=0x" 864 TARGET_FMT_lx, lpcr); 865 866 return -1; 867 } 868 869 int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, 870 int rwx, int mmu_idx) 871 { 872 CPUState *cs = CPU(cpu); 873 CPUPPCState *env = &cpu->env; 874 ppc_slb_t vrma_slbe; 875 ppc_slb_t *slb; 876 unsigned apshift; 877 hwaddr ptex; 878 ppc_hash_pte64_t pte; 879 int exec_prot, pp_prot, amr_prot, prot; 880 MMUAccessType access_type; 881 int need_prot; 882 hwaddr raddr; 883 884 assert((rwx == 0) || (rwx == 1) || (rwx == 2)); 885 access_type = rwx; 886 887 /* 888 * Note on LPCR usage: 970 uses HID4, but our special variant of 889 * store_spr copies relevant fields into env->spr[SPR_LPCR]. 890 * Similarly we filter unimplemented bits when storing into LPCR 891 * depending on the MMU version. This code can thus just use the 892 * LPCR "as-is". 893 */ 894 895 /* 1. Handle real mode accesses */ 896 if (access_type == MMU_INST_FETCH ? !msr_ir : !msr_dr) { 897 /* 898 * Translation is supposedly "off", but in real mode the top 4 899 * effective address bits are (mostly) ignored 900 */ 901 raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; 902 903 if (cpu->vhyp) { 904 /* 905 * In virtual hypervisor mode, there's nothing to do: 906 * EA == GPA == qemu guest address 907 */ 908 } else if (msr_hv || !env->has_hv_mode) { 909 /* In HV mode, add HRMOR if top EA bit is clear */ 910 if (!(eaddr >> 63)) { 911 raddr |= env->spr[SPR_HRMOR]; 912 } 913 } else if (ppc_hash64_use_vrma(env)) { 914 /* Emulated VRMA mode */ 915 slb = &vrma_slbe; 916 if (build_vrma_slbe(cpu, slb) != 0) { 917 /* Invalid VRMA setup, machine check */ 918 cs->exception_index = POWERPC_EXCP_MCHECK; 919 env->error_code = 0; 920 return 1; 921 } 922 923 goto skip_slb_search; 924 } else { 925 target_ulong limit = rmls_limit(cpu); 926 927 /* Emulated old-style RMO mode, bounds check against RMLS */ 928 if (raddr >= limit) { 929 switch (access_type) { 930 case MMU_INST_FETCH: 931 ppc_hash64_set_isi(cs, SRR1_PROTFAULT); 932 break; 933 case MMU_DATA_LOAD: 934 ppc_hash64_set_dsi(cs, eaddr, DSISR_PROTFAULT); 935 break; 936 case MMU_DATA_STORE: 937 ppc_hash64_set_dsi(cs, eaddr, 938 DSISR_PROTFAULT | DSISR_ISSTORE); 939 break; 940 default: 941 g_assert_not_reached(); 942 } 943 return 1; 944 } 945 946 raddr |= env->spr[SPR_RMOR]; 947 } 948 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, 949 PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, 950 TARGET_PAGE_SIZE); 951 return 0; 952 } 953 954 /* 2. Translation is on, so look up the SLB */ 955 slb = slb_lookup(cpu, eaddr); 956 if (!slb) { 957 /* No entry found, check if in-memory segment tables are in use */ 958 if (ppc64_use_proc_tbl(cpu)) { 959 /* TODO - Unsupported */ 960 error_report("Segment Table Support Unimplemented"); 961 exit(1); 962 } 963 /* Segment still not found, generate the appropriate interrupt */ 964 switch (access_type) { 965 case MMU_INST_FETCH: 966 cs->exception_index = POWERPC_EXCP_ISEG; 967 env->error_code = 0; 968 break; 969 case MMU_DATA_LOAD: 970 case MMU_DATA_STORE: 971 cs->exception_index = POWERPC_EXCP_DSEG; 972 env->error_code = 0; 973 env->spr[SPR_DAR] = eaddr; 974 break; 975 default: 976 g_assert_not_reached(); 977 } 978 return 1; 979 } 980 981 skip_slb_search: 982 983 /* 3. Check for segment level no-execute violation */ 984 if (access_type == MMU_INST_FETCH && (slb->vsid & SLB_VSID_N)) { 985 ppc_hash64_set_isi(cs, SRR1_NOEXEC_GUARD); 986 return 1; 987 } 988 989 /* 4. Locate the PTE in the hash table */ 990 ptex = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift); 991 if (ptex == -1) { 992 switch (access_type) { 993 case MMU_INST_FETCH: 994 ppc_hash64_set_isi(cs, SRR1_NOPTE); 995 break; 996 case MMU_DATA_LOAD: 997 ppc_hash64_set_dsi(cs, eaddr, DSISR_NOPTE); 998 break; 999 case MMU_DATA_STORE: 1000 ppc_hash64_set_dsi(cs, eaddr, DSISR_NOPTE | DSISR_ISSTORE); 1001 break; 1002 default: 1003 g_assert_not_reached(); 1004 } 1005 return 1; 1006 } 1007 qemu_log_mask(CPU_LOG_MMU, 1008 "found PTE at index %08" HWADDR_PRIx "\n", ptex); 1009 1010 /* 5. Check access permissions */ 1011 1012 exec_prot = ppc_hash64_pte_noexec_guard(cpu, pte); 1013 pp_prot = ppc_hash64_pte_prot(cpu, slb, pte); 1014 amr_prot = ppc_hash64_amr_prot(cpu, pte); 1015 prot = exec_prot & pp_prot & amr_prot; 1016 1017 need_prot = prot_for_access_type(access_type); 1018 if (need_prot & ~prot) { 1019 /* Access right violation */ 1020 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); 1021 if (access_type == MMU_INST_FETCH) { 1022 int srr1 = 0; 1023 if (PAGE_EXEC & ~exec_prot) { 1024 srr1 |= SRR1_NOEXEC_GUARD; /* Access violates noexec or guard */ 1025 } else if (PAGE_EXEC & ~pp_prot) { 1026 srr1 |= SRR1_PROTFAULT; /* Access violates access authority */ 1027 } 1028 if (PAGE_EXEC & ~amr_prot) { 1029 srr1 |= SRR1_IAMR; /* Access violates virt pg class key prot */ 1030 } 1031 ppc_hash64_set_isi(cs, srr1); 1032 } else { 1033 int dsisr = 0; 1034 if (need_prot & ~pp_prot) { 1035 dsisr |= DSISR_PROTFAULT; 1036 } 1037 if (access_type == MMU_DATA_STORE) { 1038 dsisr |= DSISR_ISSTORE; 1039 } 1040 if (need_prot & ~amr_prot) { 1041 dsisr |= DSISR_AMR; 1042 } 1043 ppc_hash64_set_dsi(cs, eaddr, dsisr); 1044 } 1045 return 1; 1046 } 1047 1048 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); 1049 1050 /* 6. Update PTE referenced and changed bits if necessary */ 1051 1052 if (!(pte.pte1 & HPTE64_R_R)) { 1053 ppc_hash64_set_r(cpu, ptex, pte.pte1); 1054 } 1055 if (!(pte.pte1 & HPTE64_R_C)) { 1056 if (access_type == MMU_DATA_STORE) { 1057 ppc_hash64_set_c(cpu, ptex, pte.pte1); 1058 } else { 1059 /* 1060 * Treat the page as read-only for now, so that a later write 1061 * will pass through this function again to set the C bit 1062 */ 1063 prot &= ~PAGE_WRITE; 1064 } 1065 } 1066 1067 /* 7. Determine the real address from the PTE */ 1068 1069 raddr = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr); 1070 1071 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, 1072 prot, mmu_idx, 1ULL << apshift); 1073 1074 return 0; 1075 } 1076 1077 hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr) 1078 { 1079 CPUPPCState *env = &cpu->env; 1080 ppc_slb_t vrma_slbe; 1081 ppc_slb_t *slb; 1082 hwaddr ptex, raddr; 1083 ppc_hash_pte64_t pte; 1084 unsigned apshift; 1085 1086 /* Handle real mode */ 1087 if (msr_dr == 0) { 1088 /* In real mode the top 4 effective address bits are ignored */ 1089 raddr = addr & 0x0FFFFFFFFFFFFFFFULL; 1090 1091 if (cpu->vhyp) { 1092 /* 1093 * In virtual hypervisor mode, there's nothing to do: 1094 * EA == GPA == qemu guest address 1095 */ 1096 return raddr; 1097 } else if ((msr_hv || !env->has_hv_mode) && !(addr >> 63)) { 1098 /* In HV mode, add HRMOR if top EA bit is clear */ 1099 return raddr | env->spr[SPR_HRMOR]; 1100 } else if (ppc_hash64_use_vrma(env)) { 1101 /* Emulated VRMA mode */ 1102 slb = &vrma_slbe; 1103 if (build_vrma_slbe(cpu, slb) != 0) { 1104 return -1; 1105 } 1106 } else { 1107 target_ulong limit = rmls_limit(cpu); 1108 1109 /* Emulated old-style RMO mode, bounds check against RMLS */ 1110 if (raddr >= limit) { 1111 return -1; 1112 } 1113 return raddr | env->spr[SPR_RMOR]; 1114 } 1115 } else { 1116 slb = slb_lookup(cpu, addr); 1117 if (!slb) { 1118 return -1; 1119 } 1120 } 1121 1122 ptex = ppc_hash64_htab_lookup(cpu, slb, addr, &pte, &apshift); 1123 if (ptex == -1) { 1124 return -1; 1125 } 1126 1127 return deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, addr) 1128 & TARGET_PAGE_MASK; 1129 } 1130 1131 void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex, 1132 target_ulong pte0, target_ulong pte1) 1133 { 1134 /* 1135 * XXX: given the fact that there are too many segments to 1136 * invalidate, and we still don't have a tlb_flush_mask(env, n, 1137 * mask) in QEMU, we just invalidate all TLBs 1138 */ 1139 cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH; 1140 } 1141 1142 void helper_store_lpcr(CPUPPCState *env, target_ulong val) 1143 { 1144 PowerPCCPU *cpu = env_archcpu(env); 1145 1146 ppc_store_lpcr(cpu, val); 1147 } 1148 1149 void ppc_hash64_init(PowerPCCPU *cpu) 1150 { 1151 CPUPPCState *env = &cpu->env; 1152 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 1153 1154 if (!pcc->hash64_opts) { 1155 assert(!mmu_is_64bit(env->mmu_model)); 1156 return; 1157 } 1158 1159 cpu->hash64_opts = g_memdup(pcc->hash64_opts, sizeof(*cpu->hash64_opts)); 1160 } 1161 1162 void ppc_hash64_finalize(PowerPCCPU *cpu) 1163 { 1164 g_free(cpu->hash64_opts); 1165 } 1166 1167 const PPCHash64Options ppc_hash64_opts_basic = { 1168 .flags = 0, 1169 .slb_size = 64, 1170 .sps = { 1171 { .page_shift = 12, /* 4K */ 1172 .slb_enc = 0, 1173 .enc = { { .page_shift = 12, .pte_enc = 0 } } 1174 }, 1175 { .page_shift = 24, /* 16M */ 1176 .slb_enc = 0x100, 1177 .enc = { { .page_shift = 24, .pte_enc = 0 } } 1178 }, 1179 }, 1180 }; 1181 1182 const PPCHash64Options ppc_hash64_opts_POWER7 = { 1183 .flags = PPC_HASH64_1TSEG | PPC_HASH64_AMR | PPC_HASH64_CI_LARGEPAGE, 1184 .slb_size = 32, 1185 .sps = { 1186 { 1187 .page_shift = 12, /* 4K */ 1188 .slb_enc = 0, 1189 .enc = { { .page_shift = 12, .pte_enc = 0 }, 1190 { .page_shift = 16, .pte_enc = 0x7 }, 1191 { .page_shift = 24, .pte_enc = 0x38 }, }, 1192 }, 1193 { 1194 .page_shift = 16, /* 64K */ 1195 .slb_enc = SLB_VSID_64K, 1196 .enc = { { .page_shift = 16, .pte_enc = 0x1 }, 1197 { .page_shift = 24, .pte_enc = 0x8 }, }, 1198 }, 1199 { 1200 .page_shift = 24, /* 16M */ 1201 .slb_enc = SLB_VSID_16M, 1202 .enc = { { .page_shift = 24, .pte_enc = 0 }, }, 1203 }, 1204 { 1205 .page_shift = 34, /* 16G */ 1206 .slb_enc = SLB_VSID_16G, 1207 .enc = { { .page_shift = 34, .pte_enc = 0x3 }, }, 1208 }, 1209 } 1210 }; 1211 1212 1213