1 /* 2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * Copyright (c) 2013 David Gibson, IBM Corporation 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "qemu/error-report.h" 25 #include "qemu/qemu-print.h" 26 #include "sysemu/hw_accel.h" 27 #include "kvm_ppc.h" 28 #include "mmu-hash64.h" 29 #include "exec/log.h" 30 #include "hw/hw.h" 31 #include "internal.h" 32 #include "mmu-book3s-v3.h" 33 #include "helper_regs.h" 34 35 #ifdef CONFIG_TCG 36 #include "exec/helper-proto.h" 37 #endif 38 39 /* #define DEBUG_SLB */ 40 41 #ifdef DEBUG_SLB 42 # define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) 43 #else 44 # define LOG_SLB(...) do { } while (0) 45 #endif 46 47 /* 48 * SLB handling 49 */ 50 51 static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr) 52 { 53 CPUPPCState *env = &cpu->env; 54 uint64_t esid_256M, esid_1T; 55 int n; 56 57 LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr); 58 59 esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V; 60 esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V; 61 62 for (n = 0; n < cpu->hash64_opts->slb_size; n++) { 63 ppc_slb_t *slb = &env->slb[n]; 64 65 LOG_SLB("%s: slot %d %016" PRIx64 " %016" 66 PRIx64 "\n", __func__, n, slb->esid, slb->vsid); 67 /* 68 * We check for 1T matches on all MMUs here - if the MMU 69 * doesn't have 1T segment support, we will have prevented 1T 70 * entries from being inserted in the slbmte code. 71 */ 72 if (((slb->esid == esid_256M) && 73 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M)) 74 || ((slb->esid == esid_1T) && 75 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) { 76 return slb; 77 } 78 } 79 80 return NULL; 81 } 82 83 void dump_slb(PowerPCCPU *cpu) 84 { 85 CPUPPCState *env = &cpu->env; 86 int i; 87 uint64_t slbe, slbv; 88 89 cpu_synchronize_state(CPU(cpu)); 90 91 qemu_printf("SLB\tESID\t\t\tVSID\n"); 92 for (i = 0; i < cpu->hash64_opts->slb_size; i++) { 93 slbe = env->slb[i].esid; 94 slbv = env->slb[i].vsid; 95 if (slbe == 0 && slbv == 0) { 96 continue; 97 } 98 qemu_printf("%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n", 99 i, slbe, slbv); 100 } 101 } 102 103 #ifdef CONFIG_TCG 104 void helper_SLBIA(CPUPPCState *env, uint32_t ih) 105 { 106 PowerPCCPU *cpu = env_archcpu(env); 107 int starting_entry; 108 int n; 109 110 /* 111 * slbia must always flush all TLB (which is equivalent to ERAT in ppc 112 * architecture). Matching on SLB_ESID_V is not good enough, because slbmte 113 * can overwrite a valid SLB without flushing its lookaside information. 114 * 115 * It would be possible to keep the TLB in synch with the SLB by flushing 116 * when a valid entry is overwritten by slbmte, and therefore slbia would 117 * not have to flush unless it evicts a valid SLB entry. However it is 118 * expected that slbmte is more common than slbia, and slbia is usually 119 * going to evict valid SLB entries, so that tradeoff is unlikely to be a 120 * good one. 121 * 122 * ISA v2.05 introduced IH field with values 0,1,2,6. These all invalidate 123 * the same SLB entries (everything but entry 0), but differ in what 124 * "lookaside information" is invalidated. TCG can ignore this and flush 125 * everything. 126 * 127 * ISA v3.0 introduced additional values 3,4,7, which change what SLBs are 128 * invalidated. 129 */ 130 131 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 132 133 starting_entry = 1; /* default for IH=0,1,2,6 */ 134 135 if (env->mmu_model == POWERPC_MMU_3_00) { 136 switch (ih) { 137 case 0x7: 138 /* invalidate no SLBs, but all lookaside information */ 139 return; 140 141 case 0x3: 142 case 0x4: 143 /* also considers SLB entry 0 */ 144 starting_entry = 0; 145 break; 146 147 case 0x5: 148 /* treat undefined values as ih==0, and warn */ 149 qemu_log_mask(LOG_GUEST_ERROR, 150 "slbia undefined IH field %u.\n", ih); 151 break; 152 153 default: 154 /* 0,1,2,6 */ 155 break; 156 } 157 } 158 159 for (n = starting_entry; n < cpu->hash64_opts->slb_size; n++) { 160 ppc_slb_t *slb = &env->slb[n]; 161 162 if (!(slb->esid & SLB_ESID_V)) { 163 continue; 164 } 165 if (env->mmu_model == POWERPC_MMU_3_00) { 166 if (ih == 0x3 && (slb->vsid & SLB_VSID_C) == 0) { 167 /* preserves entries with a class value of 0 */ 168 continue; 169 } 170 } 171 172 slb->esid &= ~SLB_ESID_V; 173 } 174 } 175 176 #if defined(TARGET_PPC64) 177 void helper_SLBIAG(CPUPPCState *env, target_ulong rs, uint32_t l) 178 { 179 PowerPCCPU *cpu = env_archcpu(env); 180 int n; 181 182 /* 183 * slbiag must always flush all TLB (which is equivalent to ERAT in ppc 184 * architecture). Matching on SLB_ESID_V is not good enough, because slbmte 185 * can overwrite a valid SLB without flushing its lookaside information. 186 * 187 * It would be possible to keep the TLB in synch with the SLB by flushing 188 * when a valid entry is overwritten by slbmte, and therefore slbiag would 189 * not have to flush unless it evicts a valid SLB entry. However it is 190 * expected that slbmte is more common than slbiag, and slbiag is usually 191 * going to evict valid SLB entries, so that tradeoff is unlikely to be a 192 * good one. 193 */ 194 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 195 196 for (n = 0; n < cpu->hash64_opts->slb_size; n++) { 197 ppc_slb_t *slb = &env->slb[n]; 198 slb->esid &= ~SLB_ESID_V; 199 } 200 } 201 #endif 202 203 static void __helper_slbie(CPUPPCState *env, target_ulong addr, 204 target_ulong global) 205 { 206 PowerPCCPU *cpu = env_archcpu(env); 207 ppc_slb_t *slb; 208 209 slb = slb_lookup(cpu, addr); 210 if (!slb) { 211 return; 212 } 213 214 if (slb->esid & SLB_ESID_V) { 215 slb->esid &= ~SLB_ESID_V; 216 217 /* 218 * XXX: given the fact that segment size is 256 MB or 1TB, 219 * and we still don't have a tlb_flush_mask(env, n, mask) 220 * in QEMU, we just invalidate all TLBs 221 */ 222 env->tlb_need_flush |= 223 (global == false ? TLB_NEED_LOCAL_FLUSH : TLB_NEED_GLOBAL_FLUSH); 224 } 225 } 226 227 void helper_SLBIE(CPUPPCState *env, target_ulong addr) 228 { 229 __helper_slbie(env, addr, false); 230 } 231 232 void helper_SLBIEG(CPUPPCState *env, target_ulong addr) 233 { 234 __helper_slbie(env, addr, true); 235 } 236 #endif 237 238 int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot, 239 target_ulong esid, target_ulong vsid) 240 { 241 CPUPPCState *env = &cpu->env; 242 ppc_slb_t *slb = &env->slb[slot]; 243 const PPCHash64SegmentPageSizes *sps = NULL; 244 int i; 245 246 if (slot >= cpu->hash64_opts->slb_size) { 247 return -1; /* Bad slot number */ 248 } 249 if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) { 250 return -1; /* Reserved bits set */ 251 } 252 if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) { 253 return -1; /* Bad segment size */ 254 } 255 if ((vsid & SLB_VSID_B) && !(ppc_hash64_has(cpu, PPC_HASH64_1TSEG))) { 256 return -1; /* 1T segment on MMU that doesn't support it */ 257 } 258 259 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 260 const PPCHash64SegmentPageSizes *sps1 = &cpu->hash64_opts->sps[i]; 261 262 if (!sps1->page_shift) { 263 break; 264 } 265 266 if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) { 267 sps = sps1; 268 break; 269 } 270 } 271 272 if (!sps) { 273 error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu 274 " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx, 275 slot, esid, vsid); 276 return -1; 277 } 278 279 slb->esid = esid; 280 slb->vsid = vsid; 281 slb->sps = sps; 282 283 LOG_SLB("%s: " TARGET_FMT_lu " " TARGET_FMT_lx " - " TARGET_FMT_lx 284 " => %016" PRIx64 " %016" PRIx64 "\n", __func__, slot, esid, vsid, 285 slb->esid, slb->vsid); 286 287 return 0; 288 } 289 290 #ifdef CONFIG_TCG 291 static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb, 292 target_ulong *rt) 293 { 294 CPUPPCState *env = &cpu->env; 295 int slot = rb & 0xfff; 296 ppc_slb_t *slb = &env->slb[slot]; 297 298 if (slot >= cpu->hash64_opts->slb_size) { 299 return -1; 300 } 301 302 *rt = slb->esid; 303 return 0; 304 } 305 306 static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb, 307 target_ulong *rt) 308 { 309 CPUPPCState *env = &cpu->env; 310 int slot = rb & 0xfff; 311 ppc_slb_t *slb = &env->slb[slot]; 312 313 if (slot >= cpu->hash64_opts->slb_size) { 314 return -1; 315 } 316 317 *rt = slb->vsid; 318 return 0; 319 } 320 321 static int ppc_find_slb_vsid(PowerPCCPU *cpu, target_ulong rb, 322 target_ulong *rt) 323 { 324 CPUPPCState *env = &cpu->env; 325 ppc_slb_t *slb; 326 327 if (!msr_is_64bit(env, env->msr)) { 328 rb &= 0xffffffff; 329 } 330 slb = slb_lookup(cpu, rb); 331 if (slb == NULL) { 332 *rt = (target_ulong)-1ul; 333 } else { 334 *rt = slb->vsid; 335 } 336 return 0; 337 } 338 339 void helper_SLBMTE(CPUPPCState *env, target_ulong rb, target_ulong rs) 340 { 341 PowerPCCPU *cpu = env_archcpu(env); 342 343 if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) { 344 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 345 POWERPC_EXCP_INVAL, GETPC()); 346 } 347 } 348 349 target_ulong helper_SLBMFEE(CPUPPCState *env, target_ulong rb) 350 { 351 PowerPCCPU *cpu = env_archcpu(env); 352 target_ulong rt = 0; 353 354 if (ppc_load_slb_esid(cpu, rb, &rt) < 0) { 355 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 356 POWERPC_EXCP_INVAL, GETPC()); 357 } 358 return rt; 359 } 360 361 target_ulong helper_SLBFEE(CPUPPCState *env, target_ulong rb) 362 { 363 PowerPCCPU *cpu = env_archcpu(env); 364 target_ulong rt = 0; 365 366 if (ppc_find_slb_vsid(cpu, rb, &rt) < 0) { 367 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 368 POWERPC_EXCP_INVAL, GETPC()); 369 } 370 return rt; 371 } 372 373 target_ulong helper_SLBMFEV(CPUPPCState *env, target_ulong rb) 374 { 375 PowerPCCPU *cpu = env_archcpu(env); 376 target_ulong rt = 0; 377 378 if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) { 379 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 380 POWERPC_EXCP_INVAL, GETPC()); 381 } 382 return rt; 383 } 384 #endif 385 386 /* Check No-Execute or Guarded Storage */ 387 static inline int ppc_hash64_pte_noexec_guard(PowerPCCPU *cpu, 388 ppc_hash_pte64_t pte) 389 { 390 /* Exec permissions CANNOT take away read or write permissions */ 391 return (pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G) ? 392 PAGE_READ | PAGE_WRITE : PAGE_READ | PAGE_WRITE | PAGE_EXEC; 393 } 394 395 /* Check Basic Storage Protection */ 396 static int ppc_hash64_pte_prot(int mmu_idx, 397 ppc_slb_t *slb, ppc_hash_pte64_t pte) 398 { 399 unsigned pp, key; 400 /* 401 * Some pp bit combinations have undefined behaviour, so default 402 * to no access in those cases 403 */ 404 int prot = 0; 405 406 key = !!(mmuidx_pr(mmu_idx) ? (slb->vsid & SLB_VSID_KP) 407 : (slb->vsid & SLB_VSID_KS)); 408 pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61); 409 410 if (key == 0) { 411 switch (pp) { 412 case 0x0: 413 case 0x1: 414 case 0x2: 415 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 416 break; 417 418 case 0x3: 419 case 0x6: 420 prot = PAGE_READ | PAGE_EXEC; 421 break; 422 } 423 } else { 424 switch (pp) { 425 case 0x0: 426 case 0x6: 427 break; 428 429 case 0x1: 430 case 0x3: 431 prot = PAGE_READ | PAGE_EXEC; 432 break; 433 434 case 0x2: 435 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 436 break; 437 } 438 } 439 440 return prot; 441 } 442 443 /* Check the instruction access permissions specified in the IAMR */ 444 static int ppc_hash64_iamr_prot(PowerPCCPU *cpu, int key) 445 { 446 CPUPPCState *env = &cpu->env; 447 int iamr_bits = (env->spr[SPR_IAMR] >> 2 * (31 - key)) & 0x3; 448 449 /* 450 * An instruction fetch is permitted if the IAMR bit is 0. 451 * If the bit is set, return PAGE_READ | PAGE_WRITE because this bit 452 * can only take away EXEC permissions not READ or WRITE permissions. 453 * If bit is cleared return PAGE_READ | PAGE_WRITE | PAGE_EXEC since 454 * EXEC permissions are allowed. 455 */ 456 return (iamr_bits & 0x1) ? PAGE_READ | PAGE_WRITE : 457 PAGE_READ | PAGE_WRITE | PAGE_EXEC; 458 } 459 460 static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte) 461 { 462 CPUPPCState *env = &cpu->env; 463 int key, amrbits; 464 int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 465 466 /* Only recent MMUs implement Virtual Page Class Key Protection */ 467 if (!ppc_hash64_has(cpu, PPC_HASH64_AMR)) { 468 return prot; 469 } 470 471 key = HPTE64_R_KEY(pte.pte1); 472 amrbits = (env->spr[SPR_AMR] >> 2 * (31 - key)) & 0x3; 473 474 /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */ 475 /* env->spr[SPR_AMR]); */ 476 477 /* 478 * A store is permitted if the AMR bit is 0. Remove write 479 * protection if it is set. 480 */ 481 if (amrbits & 0x2) { 482 prot &= ~PAGE_WRITE; 483 } 484 /* 485 * A load is permitted if the AMR bit is 0. Remove read 486 * protection if it is set. 487 */ 488 if (amrbits & 0x1) { 489 prot &= ~PAGE_READ; 490 } 491 492 switch (env->mmu_model) { 493 /* 494 * MMU version 2.07 and later support IAMR 495 * Check if the IAMR allows the instruction access - it will return 496 * PAGE_EXEC if it doesn't (and thus that bit will be cleared) or 0 497 * if it does (and prot will be unchanged indicating execution support). 498 */ 499 case POWERPC_MMU_2_07: 500 case POWERPC_MMU_3_00: 501 prot &= ppc_hash64_iamr_prot(cpu, key); 502 break; 503 default: 504 break; 505 } 506 507 return prot; 508 } 509 510 const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu, 511 hwaddr ptex, int n) 512 { 513 hwaddr pte_offset = ptex * HASH_PTE_SIZE_64; 514 hwaddr base; 515 hwaddr plen = n * HASH_PTE_SIZE_64; 516 const ppc_hash_pte64_t *hptes; 517 518 if (cpu->vhyp) { 519 PPCVirtualHypervisorClass *vhc = 520 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 521 return vhc->map_hptes(cpu->vhyp, ptex, n); 522 } 523 base = ppc_hash64_hpt_base(cpu); 524 525 if (!base) { 526 return NULL; 527 } 528 529 hptes = address_space_map(CPU(cpu)->as, base + pte_offset, &plen, false, 530 MEMTXATTRS_UNSPECIFIED); 531 if (plen < (n * HASH_PTE_SIZE_64)) { 532 hw_error("%s: Unable to map all requested HPTEs\n", __func__); 533 } 534 return hptes; 535 } 536 537 void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes, 538 hwaddr ptex, int n) 539 { 540 if (cpu->vhyp) { 541 PPCVirtualHypervisorClass *vhc = 542 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 543 vhc->unmap_hptes(cpu->vhyp, hptes, ptex, n); 544 return; 545 } 546 547 address_space_unmap(CPU(cpu)->as, (void *)hptes, n * HASH_PTE_SIZE_64, 548 false, n * HASH_PTE_SIZE_64); 549 } 550 551 static unsigned hpte_page_shift(const PPCHash64SegmentPageSizes *sps, 552 uint64_t pte0, uint64_t pte1) 553 { 554 int i; 555 556 if (!(pte0 & HPTE64_V_LARGE)) { 557 if (sps->page_shift != 12) { 558 /* 4kiB page in a non 4kiB segment */ 559 return 0; 560 } 561 /* Normal 4kiB page */ 562 return 12; 563 } 564 565 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 566 const PPCHash64PageSize *ps = &sps->enc[i]; 567 uint64_t mask; 568 569 if (!ps->page_shift) { 570 break; 571 } 572 573 if (ps->page_shift == 12) { 574 /* L bit is set so this can't be a 4kiB page */ 575 continue; 576 } 577 578 mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN; 579 580 if ((pte1 & mask) == ((uint64_t)ps->pte_enc << HPTE64_R_RPN_SHIFT)) { 581 return ps->page_shift; 582 } 583 } 584 585 return 0; /* Bad page size encoding */ 586 } 587 588 static void ppc64_v3_new_to_old_hpte(target_ulong *pte0, target_ulong *pte1) 589 { 590 /* Insert B into pte0 */ 591 *pte0 = (*pte0 & HPTE64_V_COMMON_BITS) | 592 ((*pte1 & HPTE64_R_3_0_SSIZE_MASK) << 593 (HPTE64_V_SSIZE_SHIFT - HPTE64_R_3_0_SSIZE_SHIFT)); 594 595 /* Remove B from pte1 */ 596 *pte1 = *pte1 & ~HPTE64_R_3_0_SSIZE_MASK; 597 } 598 599 600 static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash, 601 const PPCHash64SegmentPageSizes *sps, 602 target_ulong ptem, 603 ppc_hash_pte64_t *pte, unsigned *pshift) 604 { 605 int i; 606 const ppc_hash_pte64_t *pteg; 607 target_ulong pte0, pte1; 608 target_ulong ptex; 609 610 ptex = (hash & ppc_hash64_hpt_mask(cpu)) * HPTES_PER_GROUP; 611 pteg = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP); 612 if (!pteg) { 613 return -1; 614 } 615 for (i = 0; i < HPTES_PER_GROUP; i++) { 616 pte0 = ppc_hash64_hpte0(cpu, pteg, i); 617 /* 618 * pte0 contains the valid bit and must be read before pte1, 619 * otherwise we might see an old pte1 with a new valid bit and 620 * thus an inconsistent hpte value 621 */ 622 smp_rmb(); 623 pte1 = ppc_hash64_hpte1(cpu, pteg, i); 624 625 /* Convert format if necessary */ 626 if (cpu->env.mmu_model == POWERPC_MMU_3_00 && !cpu->vhyp) { 627 ppc64_v3_new_to_old_hpte(&pte0, &pte1); 628 } 629 630 /* This compares V, B, H (secondary) and the AVPN */ 631 if (HPTE64_V_COMPARE(pte0, ptem)) { 632 *pshift = hpte_page_shift(sps, pte0, pte1); 633 /* 634 * If there is no match, ignore the PTE, it could simply 635 * be for a different segment size encoding and the 636 * architecture specifies we should not match. Linux will 637 * potentially leave behind PTEs for the wrong base page 638 * size when demoting segments. 639 */ 640 if (*pshift == 0) { 641 continue; 642 } 643 /* 644 * We don't do anything with pshift yet as qemu TLB only 645 * deals with 4K pages anyway 646 */ 647 pte->pte0 = pte0; 648 pte->pte1 = pte1; 649 ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP); 650 return ptex + i; 651 } 652 } 653 ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP); 654 /* 655 * We didn't find a valid entry. 656 */ 657 return -1; 658 } 659 660 static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu, 661 ppc_slb_t *slb, target_ulong eaddr, 662 ppc_hash_pte64_t *pte, unsigned *pshift) 663 { 664 CPUPPCState *env = &cpu->env; 665 hwaddr hash, ptex; 666 uint64_t vsid, epnmask, epn, ptem; 667 const PPCHash64SegmentPageSizes *sps = slb->sps; 668 669 /* 670 * The SLB store path should prevent any bad page size encodings 671 * getting in there, so: 672 */ 673 assert(sps); 674 675 /* If ISL is set in LPCR we need to clamp the page size to 4K */ 676 if (env->spr[SPR_LPCR] & LPCR_ISL) { 677 /* We assume that when using TCG, 4k is first entry of SPS */ 678 sps = &cpu->hash64_opts->sps[0]; 679 assert(sps->page_shift == 12); 680 } 681 682 epnmask = ~((1ULL << sps->page_shift) - 1); 683 684 if (slb->vsid & SLB_VSID_B) { 685 /* 1TB segment */ 686 vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T; 687 epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask; 688 hash = vsid ^ (vsid << 25) ^ (epn >> sps->page_shift); 689 } else { 690 /* 256M segment */ 691 vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT; 692 epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask; 693 hash = vsid ^ (epn >> sps->page_shift); 694 } 695 ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN); 696 ptem |= HPTE64_V_VALID; 697 698 /* Page address translation */ 699 qemu_log_mask(CPU_LOG_MMU, 700 "htab_base " HWADDR_FMT_plx " htab_mask " HWADDR_FMT_plx 701 " hash " HWADDR_FMT_plx "\n", 702 ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), hash); 703 704 /* Primary PTEG lookup */ 705 qemu_log_mask(CPU_LOG_MMU, 706 "0 htab=" HWADDR_FMT_plx "/" HWADDR_FMT_plx 707 " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx 708 " hash=" HWADDR_FMT_plx "\n", 709 ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), 710 vsid, ptem, hash); 711 ptex = ppc_hash64_pteg_search(cpu, hash, sps, ptem, pte, pshift); 712 713 if (ptex == -1) { 714 /* Secondary PTEG lookup */ 715 ptem |= HPTE64_V_SECONDARY; 716 qemu_log_mask(CPU_LOG_MMU, 717 "1 htab=" HWADDR_FMT_plx "/" HWADDR_FMT_plx 718 " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx 719 " hash=" HWADDR_FMT_plx "\n", ppc_hash64_hpt_base(cpu), 720 ppc_hash64_hpt_mask(cpu), vsid, ptem, ~hash); 721 722 ptex = ppc_hash64_pteg_search(cpu, ~hash, sps, ptem, pte, pshift); 723 } 724 725 return ptex; 726 } 727 728 unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu, 729 uint64_t pte0, uint64_t pte1) 730 { 731 int i; 732 733 if (!(pte0 & HPTE64_V_LARGE)) { 734 return 12; 735 } 736 737 /* 738 * The encodings in env->sps need to be carefully chosen so that 739 * this gives an unambiguous result. 740 */ 741 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 742 const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i]; 743 unsigned shift; 744 745 if (!sps->page_shift) { 746 break; 747 } 748 749 shift = hpte_page_shift(sps, pte0, pte1); 750 if (shift) { 751 return shift; 752 } 753 } 754 755 return 0; 756 } 757 758 static bool ppc_hash64_use_vrma(CPUPPCState *env) 759 { 760 switch (env->mmu_model) { 761 case POWERPC_MMU_3_00: 762 /* 763 * ISAv3.0 (POWER9) always uses VRMA, the VPM0 field and RMOR 764 * register no longer exist 765 */ 766 return true; 767 768 default: 769 return !!(env->spr[SPR_LPCR] & LPCR_VPM0); 770 } 771 } 772 773 static void ppc_hash64_set_isi(CPUState *cs, int mmu_idx, uint64_t slb_vsid, 774 uint64_t error_code) 775 { 776 CPUPPCState *env = &POWERPC_CPU(cs)->env; 777 bool vpm; 778 779 if (!mmuidx_real(mmu_idx)) { 780 vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1); 781 } else { 782 vpm = ppc_hash64_use_vrma(env); 783 } 784 if (vpm && !mmuidx_hv(mmu_idx)) { 785 cs->exception_index = POWERPC_EXCP_HISI; 786 env->spr[SPR_ASDR] = slb_vsid; 787 } else { 788 cs->exception_index = POWERPC_EXCP_ISI; 789 } 790 env->error_code = error_code; 791 } 792 793 static void ppc_hash64_set_dsi(CPUState *cs, int mmu_idx, uint64_t slb_vsid, 794 uint64_t dar, uint64_t dsisr) 795 { 796 CPUPPCState *env = &POWERPC_CPU(cs)->env; 797 bool vpm; 798 799 if (!mmuidx_real(mmu_idx)) { 800 vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1); 801 } else { 802 vpm = ppc_hash64_use_vrma(env); 803 } 804 if (vpm && !mmuidx_hv(mmu_idx)) { 805 cs->exception_index = POWERPC_EXCP_HDSI; 806 env->spr[SPR_HDAR] = dar; 807 env->spr[SPR_HDSISR] = dsisr; 808 env->spr[SPR_ASDR] = slb_vsid; 809 } else { 810 cs->exception_index = POWERPC_EXCP_DSI; 811 env->spr[SPR_DAR] = dar; 812 env->spr[SPR_DSISR] = dsisr; 813 } 814 env->error_code = 0; 815 } 816 817 818 static void ppc_hash64_set_r(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1) 819 { 820 hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_R; 821 822 if (cpu->vhyp) { 823 PPCVirtualHypervisorClass *vhc = 824 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 825 vhc->hpte_set_r(cpu->vhyp, ptex, pte1); 826 return; 827 } 828 base = ppc_hash64_hpt_base(cpu); 829 830 831 /* The HW performs a non-atomic byte update */ 832 stb_phys(CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01); 833 } 834 835 static void ppc_hash64_set_c(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1) 836 { 837 hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_C; 838 839 if (cpu->vhyp) { 840 PPCVirtualHypervisorClass *vhc = 841 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 842 vhc->hpte_set_c(cpu->vhyp, ptex, pte1); 843 return; 844 } 845 base = ppc_hash64_hpt_base(cpu); 846 847 /* The HW performs a non-atomic byte update */ 848 stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80); 849 } 850 851 static target_ulong rmls_limit(PowerPCCPU *cpu) 852 { 853 CPUPPCState *env = &cpu->env; 854 /* 855 * In theory the meanings of RMLS values are implementation 856 * dependent. In practice, this seems to have been the set from 857 * POWER4+..POWER8, and RMLS is no longer supported in POWER9. 858 * 859 * Unsupported values mean the OS has shot itself in the 860 * foot. Return a 0-sized RMA in this case, which we expect 861 * to trigger an immediate DSI or ISI 862 */ 863 static const target_ulong rma_sizes[16] = { 864 [0] = 256 * GiB, 865 [1] = 16 * GiB, 866 [2] = 1 * GiB, 867 [3] = 64 * MiB, 868 [4] = 256 * MiB, 869 [7] = 128 * MiB, 870 [8] = 32 * MiB, 871 }; 872 target_ulong rmls = (env->spr[SPR_LPCR] & LPCR_RMLS) >> LPCR_RMLS_SHIFT; 873 874 return rma_sizes[rmls]; 875 } 876 877 /* Return the LLP in SLB_VSID format */ 878 static uint64_t get_vrma_llp(PowerPCCPU *cpu) 879 { 880 CPUPPCState *env = &cpu->env; 881 uint64_t llp; 882 883 if (env->mmu_model == POWERPC_MMU_3_00) { 884 ppc_v3_pate_t pate; 885 uint64_t ps, l, lp; 886 887 /* 888 * ISA v3.0 removes the LPCR[VRMASD] field and puts the VRMA base 889 * page size (L||LP equivalent) in the PS field in the HPT partition 890 * table entry. 891 */ 892 if (!ppc64_v3_get_pate(cpu, cpu->env.spr[SPR_LPIDR], &pate)) { 893 error_report("Bad VRMA with no partition table entry"); 894 return 0; 895 } 896 ps = PATE0_GET_PS(pate.dw0); 897 /* PS has L||LP in 3 consecutive bits, put them into SLB LLP format */ 898 l = (ps >> 2) & 0x1; 899 lp = ps & 0x3; 900 llp = (l << SLB_VSID_L_SHIFT) | (lp << SLB_VSID_LP_SHIFT); 901 902 } else { 903 uint64_t lpcr = env->spr[SPR_LPCR]; 904 target_ulong vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT; 905 906 /* VRMASD LLP matches SLB format, just shift and mask it */ 907 llp = (vrmasd << SLB_VSID_LP_SHIFT) & SLB_VSID_LLP_MASK; 908 } 909 910 return llp; 911 } 912 913 static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb) 914 { 915 uint64_t llp = get_vrma_llp(cpu); 916 target_ulong vsid = SLB_VSID_VRMA | llp; 917 int i; 918 919 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 920 const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i]; 921 922 if (!sps->page_shift) { 923 break; 924 } 925 926 if ((vsid & SLB_VSID_LLP_MASK) == sps->slb_enc) { 927 slb->esid = SLB_ESID_V; 928 slb->vsid = vsid; 929 slb->sps = sps; 930 return 0; 931 } 932 } 933 934 error_report("Bad VRMA page size encoding 0x" TARGET_FMT_lx, llp); 935 936 return -1; 937 } 938 939 bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, 940 hwaddr *raddrp, int *psizep, int *protp, int mmu_idx, 941 bool guest_visible) 942 { 943 CPUState *cs = CPU(cpu); 944 CPUPPCState *env = &cpu->env; 945 ppc_slb_t vrma_slbe; 946 ppc_slb_t *slb; 947 unsigned apshift; 948 hwaddr ptex; 949 ppc_hash_pte64_t pte; 950 int exec_prot, pp_prot, amr_prot, prot; 951 int need_prot; 952 hwaddr raddr; 953 954 /* 955 * Note on LPCR usage: 970 uses HID4, but our special variant of 956 * store_spr copies relevant fields into env->spr[SPR_LPCR]. 957 * Similarly we filter unimplemented bits when storing into LPCR 958 * depending on the MMU version. This code can thus just use the 959 * LPCR "as-is". 960 */ 961 962 /* 1. Handle real mode accesses */ 963 if (mmuidx_real(mmu_idx)) { 964 /* 965 * Translation is supposedly "off", but in real mode the top 4 966 * effective address bits are (mostly) ignored 967 */ 968 raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; 969 970 if (cpu->vhyp) { 971 /* 972 * In virtual hypervisor mode, there's nothing to do: 973 * EA == GPA == qemu guest address 974 */ 975 } else if (mmuidx_hv(mmu_idx) || !env->has_hv_mode) { 976 /* In HV mode, add HRMOR if top EA bit is clear */ 977 if (!(eaddr >> 63)) { 978 raddr |= env->spr[SPR_HRMOR]; 979 } 980 } else if (ppc_hash64_use_vrma(env)) { 981 /* Emulated VRMA mode */ 982 slb = &vrma_slbe; 983 if (build_vrma_slbe(cpu, slb) != 0) { 984 /* Invalid VRMA setup, machine check */ 985 if (guest_visible) { 986 cs->exception_index = POWERPC_EXCP_MCHECK; 987 env->error_code = 0; 988 } 989 return false; 990 } 991 992 goto skip_slb_search; 993 } else { 994 target_ulong limit = rmls_limit(cpu); 995 996 /* Emulated old-style RMO mode, bounds check against RMLS */ 997 if (raddr >= limit) { 998 if (!guest_visible) { 999 return false; 1000 } 1001 switch (access_type) { 1002 case MMU_INST_FETCH: 1003 ppc_hash64_set_isi(cs, mmu_idx, 0, SRR1_PROTFAULT); 1004 break; 1005 case MMU_DATA_LOAD: 1006 ppc_hash64_set_dsi(cs, mmu_idx, 0, eaddr, DSISR_PROTFAULT); 1007 break; 1008 case MMU_DATA_STORE: 1009 ppc_hash64_set_dsi(cs, mmu_idx, 0, eaddr, 1010 DSISR_PROTFAULT | DSISR_ISSTORE); 1011 break; 1012 default: 1013 g_assert_not_reached(); 1014 } 1015 return false; 1016 } 1017 1018 raddr |= env->spr[SPR_RMOR]; 1019 } 1020 1021 *raddrp = raddr; 1022 *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 1023 *psizep = TARGET_PAGE_BITS; 1024 return true; 1025 } 1026 1027 /* 2. Translation is on, so look up the SLB */ 1028 slb = slb_lookup(cpu, eaddr); 1029 if (!slb) { 1030 /* No entry found, check if in-memory segment tables are in use */ 1031 if (ppc64_use_proc_tbl(cpu)) { 1032 /* TODO - Unsupported */ 1033 error_report("Segment Table Support Unimplemented"); 1034 exit(1); 1035 } 1036 /* Segment still not found, generate the appropriate interrupt */ 1037 if (!guest_visible) { 1038 return false; 1039 } 1040 switch (access_type) { 1041 case MMU_INST_FETCH: 1042 cs->exception_index = POWERPC_EXCP_ISEG; 1043 env->error_code = 0; 1044 break; 1045 case MMU_DATA_LOAD: 1046 case MMU_DATA_STORE: 1047 cs->exception_index = POWERPC_EXCP_DSEG; 1048 env->error_code = 0; 1049 env->spr[SPR_DAR] = eaddr; 1050 break; 1051 default: 1052 g_assert_not_reached(); 1053 } 1054 return false; 1055 } 1056 1057 skip_slb_search: 1058 1059 /* 3. Check for segment level no-execute violation */ 1060 if (access_type == MMU_INST_FETCH && (slb->vsid & SLB_VSID_N)) { 1061 if (guest_visible) { 1062 ppc_hash64_set_isi(cs, mmu_idx, slb->vsid, SRR1_NOEXEC_GUARD); 1063 } 1064 return false; 1065 } 1066 1067 /* 4. Locate the PTE in the hash table */ 1068 ptex = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift); 1069 if (ptex == -1) { 1070 if (!guest_visible) { 1071 return false; 1072 } 1073 switch (access_type) { 1074 case MMU_INST_FETCH: 1075 ppc_hash64_set_isi(cs, mmu_idx, slb->vsid, SRR1_NOPTE); 1076 break; 1077 case MMU_DATA_LOAD: 1078 ppc_hash64_set_dsi(cs, mmu_idx, slb->vsid, eaddr, DSISR_NOPTE); 1079 break; 1080 case MMU_DATA_STORE: 1081 ppc_hash64_set_dsi(cs, mmu_idx, slb->vsid, eaddr, 1082 DSISR_NOPTE | DSISR_ISSTORE); 1083 break; 1084 default: 1085 g_assert_not_reached(); 1086 } 1087 return false; 1088 } 1089 qemu_log_mask(CPU_LOG_MMU, 1090 "found PTE at index %08" HWADDR_PRIx "\n", ptex); 1091 1092 /* 5. Check access permissions */ 1093 1094 exec_prot = ppc_hash64_pte_noexec_guard(cpu, pte); 1095 pp_prot = ppc_hash64_pte_prot(mmu_idx, slb, pte); 1096 amr_prot = ppc_hash64_amr_prot(cpu, pte); 1097 prot = exec_prot & pp_prot & amr_prot; 1098 1099 need_prot = prot_for_access_type(access_type); 1100 if (need_prot & ~prot) { 1101 /* Access right violation */ 1102 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); 1103 if (!guest_visible) { 1104 return false; 1105 } 1106 if (access_type == MMU_INST_FETCH) { 1107 int srr1 = 0; 1108 if (PAGE_EXEC & ~exec_prot) { 1109 srr1 |= SRR1_NOEXEC_GUARD; /* Access violates noexec or guard */ 1110 } else if (PAGE_EXEC & ~pp_prot) { 1111 srr1 |= SRR1_PROTFAULT; /* Access violates access authority */ 1112 } 1113 if (PAGE_EXEC & ~amr_prot) { 1114 srr1 |= SRR1_IAMR; /* Access violates virt pg class key prot */ 1115 } 1116 ppc_hash64_set_isi(cs, mmu_idx, slb->vsid, srr1); 1117 } else { 1118 int dsisr = 0; 1119 if (need_prot & ~pp_prot) { 1120 dsisr |= DSISR_PROTFAULT; 1121 } 1122 if (access_type == MMU_DATA_STORE) { 1123 dsisr |= DSISR_ISSTORE; 1124 } 1125 if (need_prot & ~amr_prot) { 1126 dsisr |= DSISR_AMR; 1127 } 1128 ppc_hash64_set_dsi(cs, mmu_idx, slb->vsid, eaddr, dsisr); 1129 } 1130 return false; 1131 } 1132 1133 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); 1134 1135 /* 6. Update PTE referenced and changed bits if necessary */ 1136 1137 if (!(pte.pte1 & HPTE64_R_R)) { 1138 ppc_hash64_set_r(cpu, ptex, pte.pte1); 1139 } 1140 if (!(pte.pte1 & HPTE64_R_C)) { 1141 if (access_type == MMU_DATA_STORE) { 1142 ppc_hash64_set_c(cpu, ptex, pte.pte1); 1143 } else { 1144 /* 1145 * Treat the page as read-only for now, so that a later write 1146 * will pass through this function again to set the C bit 1147 */ 1148 prot &= ~PAGE_WRITE; 1149 } 1150 } 1151 1152 /* 7. Determine the real address from the PTE */ 1153 1154 *raddrp = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr); 1155 *protp = prot; 1156 *psizep = apshift; 1157 return true; 1158 } 1159 1160 void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex, 1161 target_ulong pte0, target_ulong pte1) 1162 { 1163 /* 1164 * XXX: given the fact that there are too many segments to 1165 * invalidate, and we still don't have a tlb_flush_mask(env, n, 1166 * mask) in QEMU, we just invalidate all TLBs 1167 */ 1168 cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH; 1169 } 1170 1171 #ifdef CONFIG_TCG 1172 void helper_store_lpcr(CPUPPCState *env, target_ulong val) 1173 { 1174 PowerPCCPU *cpu = env_archcpu(env); 1175 1176 ppc_store_lpcr(cpu, val); 1177 } 1178 #endif 1179 1180 void ppc_hash64_init(PowerPCCPU *cpu) 1181 { 1182 CPUPPCState *env = &cpu->env; 1183 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 1184 1185 if (!pcc->hash64_opts) { 1186 assert(!mmu_is_64bit(env->mmu_model)); 1187 return; 1188 } 1189 1190 cpu->hash64_opts = g_memdup(pcc->hash64_opts, sizeof(*cpu->hash64_opts)); 1191 } 1192 1193 void ppc_hash64_finalize(PowerPCCPU *cpu) 1194 { 1195 g_free(cpu->hash64_opts); 1196 } 1197 1198 const PPCHash64Options ppc_hash64_opts_basic = { 1199 .flags = 0, 1200 .slb_size = 64, 1201 .sps = { 1202 { .page_shift = 12, /* 4K */ 1203 .slb_enc = 0, 1204 .enc = { { .page_shift = 12, .pte_enc = 0 } } 1205 }, 1206 { .page_shift = 24, /* 16M */ 1207 .slb_enc = 0x100, 1208 .enc = { { .page_shift = 24, .pte_enc = 0 } } 1209 }, 1210 }, 1211 }; 1212 1213 const PPCHash64Options ppc_hash64_opts_POWER7 = { 1214 .flags = PPC_HASH64_1TSEG | PPC_HASH64_AMR | PPC_HASH64_CI_LARGEPAGE, 1215 .slb_size = 32, 1216 .sps = { 1217 { 1218 .page_shift = 12, /* 4K */ 1219 .slb_enc = 0, 1220 .enc = { { .page_shift = 12, .pte_enc = 0 }, 1221 { .page_shift = 16, .pte_enc = 0x7 }, 1222 { .page_shift = 24, .pte_enc = 0x38 }, }, 1223 }, 1224 { 1225 .page_shift = 16, /* 64K */ 1226 .slb_enc = SLB_VSID_64K, 1227 .enc = { { .page_shift = 16, .pte_enc = 0x1 }, 1228 { .page_shift = 24, .pte_enc = 0x8 }, }, 1229 }, 1230 { 1231 .page_shift = 24, /* 16M */ 1232 .slb_enc = SLB_VSID_16M, 1233 .enc = { { .page_shift = 24, .pte_enc = 0 }, }, 1234 }, 1235 { 1236 .page_shift = 34, /* 16G */ 1237 .slb_enc = SLB_VSID_16G, 1238 .enc = { { .page_shift = 34, .pte_enc = 0x3 }, }, 1239 }, 1240 } 1241 }; 1242 1243 1244