1 /* 2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * Copyright (c) 2013 David Gibson, IBM Corporation 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "exec/page-protection.h" 25 #include "qemu/error-report.h" 26 #include "qemu/qemu-print.h" 27 #include "sysemu/hw_accel.h" 28 #include "kvm_ppc.h" 29 #include "mmu-hash64.h" 30 #include "exec/log.h" 31 #include "hw/hw.h" 32 #include "internal.h" 33 #include "mmu-book3s-v3.h" 34 #include "helper_regs.h" 35 36 #ifdef CONFIG_TCG 37 #include "exec/helper-proto.h" 38 #endif 39 40 /* #define DEBUG_SLB */ 41 42 #ifdef DEBUG_SLB 43 # define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) 44 #else 45 # define LOG_SLB(...) do { } while (0) 46 #endif 47 48 /* 49 * SLB handling 50 */ 51 52 static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr) 53 { 54 CPUPPCState *env = &cpu->env; 55 uint64_t esid_256M, esid_1T; 56 int n; 57 58 LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr); 59 60 esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V; 61 esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V; 62 63 for (n = 0; n < cpu->hash64_opts->slb_size; n++) { 64 ppc_slb_t *slb = &env->slb[n]; 65 66 LOG_SLB("%s: slot %d %016" PRIx64 " %016" 67 PRIx64 "\n", __func__, n, slb->esid, slb->vsid); 68 /* 69 * We check for 1T matches on all MMUs here - if the MMU 70 * doesn't have 1T segment support, we will have prevented 1T 71 * entries from being inserted in the slbmte code. 72 */ 73 if (((slb->esid == esid_256M) && 74 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M)) 75 || ((slb->esid == esid_1T) && 76 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) { 77 return slb; 78 } 79 } 80 81 return NULL; 82 } 83 84 void dump_slb(PowerPCCPU *cpu) 85 { 86 CPUPPCState *env = &cpu->env; 87 int i; 88 uint64_t slbe, slbv; 89 90 cpu_synchronize_state(CPU(cpu)); 91 92 qemu_printf("SLB\tESID\t\t\tVSID\n"); 93 for (i = 0; i < cpu->hash64_opts->slb_size; i++) { 94 slbe = env->slb[i].esid; 95 slbv = env->slb[i].vsid; 96 if (slbe == 0 && slbv == 0) { 97 continue; 98 } 99 qemu_printf("%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n", 100 i, slbe, slbv); 101 } 102 } 103 104 #ifdef CONFIG_TCG 105 void helper_SLBIA(CPUPPCState *env, uint32_t ih) 106 { 107 PowerPCCPU *cpu = env_archcpu(env); 108 int starting_entry; 109 int n; 110 111 /* 112 * slbia must always flush all TLB (which is equivalent to ERAT in ppc 113 * architecture). Matching on SLB_ESID_V is not good enough, because slbmte 114 * can overwrite a valid SLB without flushing its lookaside information. 115 * 116 * It would be possible to keep the TLB in synch with the SLB by flushing 117 * when a valid entry is overwritten by slbmte, and therefore slbia would 118 * not have to flush unless it evicts a valid SLB entry. However it is 119 * expected that slbmte is more common than slbia, and slbia is usually 120 * going to evict valid SLB entries, so that tradeoff is unlikely to be a 121 * good one. 122 * 123 * ISA v2.05 introduced IH field with values 0,1,2,6. These all invalidate 124 * the same SLB entries (everything but entry 0), but differ in what 125 * "lookaside information" is invalidated. TCG can ignore this and flush 126 * everything. 127 * 128 * ISA v3.0 introduced additional values 3,4,7, which change what SLBs are 129 * invalidated. 130 */ 131 132 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 133 134 starting_entry = 1; /* default for IH=0,1,2,6 */ 135 136 if (env->mmu_model == POWERPC_MMU_3_00) { 137 switch (ih) { 138 case 0x7: 139 /* invalidate no SLBs, but all lookaside information */ 140 return; 141 142 case 0x3: 143 case 0x4: 144 /* also considers SLB entry 0 */ 145 starting_entry = 0; 146 break; 147 148 case 0x5: 149 /* treat undefined values as ih==0, and warn */ 150 qemu_log_mask(LOG_GUEST_ERROR, 151 "slbia undefined IH field %u.\n", ih); 152 break; 153 154 default: 155 /* 0,1,2,6 */ 156 break; 157 } 158 } 159 160 for (n = starting_entry; n < cpu->hash64_opts->slb_size; n++) { 161 ppc_slb_t *slb = &env->slb[n]; 162 163 if (!(slb->esid & SLB_ESID_V)) { 164 continue; 165 } 166 if (env->mmu_model == POWERPC_MMU_3_00) { 167 if (ih == 0x3 && (slb->vsid & SLB_VSID_C) == 0) { 168 /* preserves entries with a class value of 0 */ 169 continue; 170 } 171 } 172 173 slb->esid &= ~SLB_ESID_V; 174 } 175 } 176 177 #if defined(TARGET_PPC64) 178 void helper_SLBIAG(CPUPPCState *env, target_ulong rs, uint32_t l) 179 { 180 PowerPCCPU *cpu = env_archcpu(env); 181 int n; 182 183 /* 184 * slbiag must always flush all TLB (which is equivalent to ERAT in ppc 185 * architecture). Matching on SLB_ESID_V is not good enough, because slbmte 186 * can overwrite a valid SLB without flushing its lookaside information. 187 * 188 * It would be possible to keep the TLB in synch with the SLB by flushing 189 * when a valid entry is overwritten by slbmte, and therefore slbiag would 190 * not have to flush unless it evicts a valid SLB entry. However it is 191 * expected that slbmte is more common than slbiag, and slbiag is usually 192 * going to evict valid SLB entries, so that tradeoff is unlikely to be a 193 * good one. 194 */ 195 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 196 197 for (n = 0; n < cpu->hash64_opts->slb_size; n++) { 198 ppc_slb_t *slb = &env->slb[n]; 199 slb->esid &= ~SLB_ESID_V; 200 } 201 } 202 #endif 203 204 static void __helper_slbie(CPUPPCState *env, target_ulong addr, 205 target_ulong global) 206 { 207 PowerPCCPU *cpu = env_archcpu(env); 208 ppc_slb_t *slb; 209 210 slb = slb_lookup(cpu, addr); 211 if (!slb) { 212 return; 213 } 214 215 if (slb->esid & SLB_ESID_V) { 216 slb->esid &= ~SLB_ESID_V; 217 218 /* 219 * XXX: given the fact that segment size is 256 MB or 1TB, 220 * and we still don't have a tlb_flush_mask(env, n, mask) 221 * in QEMU, we just invalidate all TLBs 222 */ 223 env->tlb_need_flush |= 224 (global == false ? TLB_NEED_LOCAL_FLUSH : TLB_NEED_GLOBAL_FLUSH); 225 } 226 } 227 228 void helper_SLBIE(CPUPPCState *env, target_ulong addr) 229 { 230 __helper_slbie(env, addr, false); 231 } 232 233 void helper_SLBIEG(CPUPPCState *env, target_ulong addr) 234 { 235 __helper_slbie(env, addr, true); 236 } 237 #endif 238 239 int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot, 240 target_ulong esid, target_ulong vsid) 241 { 242 CPUPPCState *env = &cpu->env; 243 ppc_slb_t *slb = &env->slb[slot]; 244 const PPCHash64SegmentPageSizes *sps = NULL; 245 int i; 246 247 if (slot >= cpu->hash64_opts->slb_size) { 248 return -1; /* Bad slot number */ 249 } 250 if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) { 251 return -1; /* Reserved bits set */ 252 } 253 if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) { 254 return -1; /* Bad segment size */ 255 } 256 if ((vsid & SLB_VSID_B) && !(ppc_hash64_has(cpu, PPC_HASH64_1TSEG))) { 257 return -1; /* 1T segment on MMU that doesn't support it */ 258 } 259 260 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 261 const PPCHash64SegmentPageSizes *sps1 = &cpu->hash64_opts->sps[i]; 262 263 if (!sps1->page_shift) { 264 break; 265 } 266 267 if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) { 268 sps = sps1; 269 break; 270 } 271 } 272 273 if (!sps) { 274 error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu 275 " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx, 276 slot, esid, vsid); 277 return -1; 278 } 279 280 slb->esid = esid; 281 slb->vsid = vsid; 282 slb->sps = sps; 283 284 LOG_SLB("%s: " TARGET_FMT_lu " " TARGET_FMT_lx " - " TARGET_FMT_lx 285 " => %016" PRIx64 " %016" PRIx64 "\n", __func__, slot, esid, vsid, 286 slb->esid, slb->vsid); 287 288 return 0; 289 } 290 291 #ifdef CONFIG_TCG 292 static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb, 293 target_ulong *rt) 294 { 295 CPUPPCState *env = &cpu->env; 296 int slot = rb & 0xfff; 297 ppc_slb_t *slb = &env->slb[slot]; 298 299 if (slot >= cpu->hash64_opts->slb_size) { 300 return -1; 301 } 302 303 *rt = slb->esid; 304 return 0; 305 } 306 307 static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb, 308 target_ulong *rt) 309 { 310 CPUPPCState *env = &cpu->env; 311 int slot = rb & 0xfff; 312 ppc_slb_t *slb = &env->slb[slot]; 313 314 if (slot >= cpu->hash64_opts->slb_size) { 315 return -1; 316 } 317 318 *rt = slb->vsid; 319 return 0; 320 } 321 322 static int ppc_find_slb_vsid(PowerPCCPU *cpu, target_ulong rb, 323 target_ulong *rt) 324 { 325 CPUPPCState *env = &cpu->env; 326 ppc_slb_t *slb; 327 328 if (!msr_is_64bit(env, env->msr)) { 329 rb &= 0xffffffff; 330 } 331 slb = slb_lookup(cpu, rb); 332 if (slb == NULL) { 333 *rt = (target_ulong)-1ul; 334 } else { 335 *rt = slb->vsid; 336 } 337 return 0; 338 } 339 340 void helper_SLBMTE(CPUPPCState *env, target_ulong rb, target_ulong rs) 341 { 342 PowerPCCPU *cpu = env_archcpu(env); 343 344 if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) { 345 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 346 POWERPC_EXCP_INVAL, GETPC()); 347 } 348 } 349 350 target_ulong helper_SLBMFEE(CPUPPCState *env, target_ulong rb) 351 { 352 PowerPCCPU *cpu = env_archcpu(env); 353 target_ulong rt = 0; 354 355 if (ppc_load_slb_esid(cpu, rb, &rt) < 0) { 356 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 357 POWERPC_EXCP_INVAL, GETPC()); 358 } 359 return rt; 360 } 361 362 target_ulong helper_SLBFEE(CPUPPCState *env, target_ulong rb) 363 { 364 PowerPCCPU *cpu = env_archcpu(env); 365 target_ulong rt = 0; 366 367 if (ppc_find_slb_vsid(cpu, rb, &rt) < 0) { 368 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 369 POWERPC_EXCP_INVAL, GETPC()); 370 } 371 return rt; 372 } 373 374 target_ulong helper_SLBMFEV(CPUPPCState *env, target_ulong rb) 375 { 376 PowerPCCPU *cpu = env_archcpu(env); 377 target_ulong rt = 0; 378 379 if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) { 380 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 381 POWERPC_EXCP_INVAL, GETPC()); 382 } 383 return rt; 384 } 385 #endif 386 387 /* Check No-Execute or Guarded Storage */ 388 static inline int ppc_hash64_pte_noexec_guard(PowerPCCPU *cpu, 389 ppc_hash_pte64_t pte) 390 { 391 /* Exec permissions CANNOT take away read or write permissions */ 392 return (pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G) ? 393 PAGE_READ | PAGE_WRITE : PAGE_READ | PAGE_WRITE | PAGE_EXEC; 394 } 395 396 /* Check Basic Storage Protection */ 397 static int ppc_hash64_pte_prot(int mmu_idx, 398 ppc_slb_t *slb, ppc_hash_pte64_t pte) 399 { 400 unsigned pp, key; 401 /* 402 * Some pp bit combinations have undefined behaviour, so default 403 * to no access in those cases 404 */ 405 int prot = 0; 406 407 key = !!(mmuidx_pr(mmu_idx) ? (slb->vsid & SLB_VSID_KP) 408 : (slb->vsid & SLB_VSID_KS)); 409 pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61); 410 411 if (key == 0) { 412 switch (pp) { 413 case 0x0: 414 case 0x1: 415 case 0x2: 416 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 417 break; 418 419 case 0x3: 420 case 0x6: 421 prot = PAGE_READ | PAGE_EXEC; 422 break; 423 } 424 } else { 425 switch (pp) { 426 case 0x0: 427 case 0x6: 428 break; 429 430 case 0x1: 431 case 0x3: 432 prot = PAGE_READ | PAGE_EXEC; 433 break; 434 435 case 0x2: 436 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 437 break; 438 } 439 } 440 441 return prot; 442 } 443 444 /* Check the instruction access permissions specified in the IAMR */ 445 static int ppc_hash64_iamr_prot(PowerPCCPU *cpu, int key) 446 { 447 CPUPPCState *env = &cpu->env; 448 int iamr_bits = (env->spr[SPR_IAMR] >> 2 * (31 - key)) & 0x3; 449 450 /* 451 * An instruction fetch is permitted if the IAMR bit is 0. 452 * If the bit is set, return PAGE_READ | PAGE_WRITE because this bit 453 * can only take away EXEC permissions not READ or WRITE permissions. 454 * If bit is cleared return PAGE_READ | PAGE_WRITE | PAGE_EXEC since 455 * EXEC permissions are allowed. 456 */ 457 return (iamr_bits & 0x1) ? PAGE_READ | PAGE_WRITE : 458 PAGE_READ | PAGE_WRITE | PAGE_EXEC; 459 } 460 461 static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte) 462 { 463 CPUPPCState *env = &cpu->env; 464 int key, amrbits; 465 int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 466 467 /* Only recent MMUs implement Virtual Page Class Key Protection */ 468 if (!ppc_hash64_has(cpu, PPC_HASH64_AMR)) { 469 return prot; 470 } 471 472 key = HPTE64_R_KEY(pte.pte1); 473 amrbits = (env->spr[SPR_AMR] >> 2 * (31 - key)) & 0x3; 474 475 /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */ 476 /* env->spr[SPR_AMR]); */ 477 478 /* 479 * A store is permitted if the AMR bit is 0. Remove write 480 * protection if it is set. 481 */ 482 if (amrbits & 0x2) { 483 prot &= ~PAGE_WRITE; 484 } 485 /* 486 * A load is permitted if the AMR bit is 0. Remove read 487 * protection if it is set. 488 */ 489 if (amrbits & 0x1) { 490 prot &= ~PAGE_READ; 491 } 492 493 switch (env->mmu_model) { 494 /* 495 * MMU version 2.07 and later support IAMR 496 * Check if the IAMR allows the instruction access - it will return 497 * PAGE_EXEC if it doesn't (and thus that bit will be cleared) or 0 498 * if it does (and prot will be unchanged indicating execution support). 499 */ 500 case POWERPC_MMU_2_07: 501 case POWERPC_MMU_3_00: 502 prot &= ppc_hash64_iamr_prot(cpu, key); 503 break; 504 default: 505 break; 506 } 507 508 return prot; 509 } 510 511 const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu, 512 hwaddr ptex, int n) 513 { 514 hwaddr pte_offset = ptex * HASH_PTE_SIZE_64; 515 hwaddr base; 516 hwaddr plen = n * HASH_PTE_SIZE_64; 517 const ppc_hash_pte64_t *hptes; 518 519 if (cpu->vhyp) { 520 return cpu->vhyp_class->map_hptes(cpu->vhyp, ptex, n); 521 } 522 base = ppc_hash64_hpt_base(cpu); 523 524 if (!base) { 525 return NULL; 526 } 527 528 hptes = address_space_map(CPU(cpu)->as, base + pte_offset, &plen, false, 529 MEMTXATTRS_UNSPECIFIED); 530 if (plen < (n * HASH_PTE_SIZE_64)) { 531 hw_error("%s: Unable to map all requested HPTEs\n", __func__); 532 } 533 return hptes; 534 } 535 536 void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes, 537 hwaddr ptex, int n) 538 { 539 if (cpu->vhyp) { 540 cpu->vhyp_class->unmap_hptes(cpu->vhyp, hptes, ptex, n); 541 return; 542 } 543 544 address_space_unmap(CPU(cpu)->as, (void *)hptes, n * HASH_PTE_SIZE_64, 545 false, n * HASH_PTE_SIZE_64); 546 } 547 548 static unsigned hpte_page_shift(const PPCHash64SegmentPageSizes *sps, 549 uint64_t pte0, uint64_t pte1) 550 { 551 int i; 552 553 if (!(pte0 & HPTE64_V_LARGE)) { 554 if (sps->page_shift != 12) { 555 /* 4kiB page in a non 4kiB segment */ 556 return 0; 557 } 558 /* Normal 4kiB page */ 559 return 12; 560 } 561 562 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 563 const PPCHash64PageSize *ps = &sps->enc[i]; 564 uint64_t mask; 565 566 if (!ps->page_shift) { 567 break; 568 } 569 570 if (ps->page_shift == 12) { 571 /* L bit is set so this can't be a 4kiB page */ 572 continue; 573 } 574 575 mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN; 576 577 if ((pte1 & mask) == ((uint64_t)ps->pte_enc << HPTE64_R_RPN_SHIFT)) { 578 return ps->page_shift; 579 } 580 } 581 582 return 0; /* Bad page size encoding */ 583 } 584 585 static void ppc64_v3_new_to_old_hpte(target_ulong *pte0, target_ulong *pte1) 586 { 587 /* Insert B into pte0 */ 588 *pte0 = (*pte0 & HPTE64_V_COMMON_BITS) | 589 ((*pte1 & HPTE64_R_3_0_SSIZE_MASK) << 590 (HPTE64_V_SSIZE_SHIFT - HPTE64_R_3_0_SSIZE_SHIFT)); 591 592 /* Remove B from pte1 */ 593 *pte1 = *pte1 & ~HPTE64_R_3_0_SSIZE_MASK; 594 } 595 596 597 static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash, 598 const PPCHash64SegmentPageSizes *sps, 599 target_ulong ptem, 600 ppc_hash_pte64_t *pte, unsigned *pshift) 601 { 602 int i; 603 const ppc_hash_pte64_t *pteg; 604 target_ulong pte0, pte1; 605 target_ulong ptex; 606 607 ptex = (hash & ppc_hash64_hpt_mask(cpu)) * HPTES_PER_GROUP; 608 pteg = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP); 609 if (!pteg) { 610 return -1; 611 } 612 for (i = 0; i < HPTES_PER_GROUP; i++) { 613 pte0 = ppc_hash64_hpte0(cpu, pteg, i); 614 /* 615 * pte0 contains the valid bit and must be read before pte1, 616 * otherwise we might see an old pte1 with a new valid bit and 617 * thus an inconsistent hpte value 618 */ 619 smp_rmb(); 620 pte1 = ppc_hash64_hpte1(cpu, pteg, i); 621 622 /* Convert format if necessary */ 623 if (cpu->env.mmu_model == POWERPC_MMU_3_00 && !cpu->vhyp) { 624 ppc64_v3_new_to_old_hpte(&pte0, &pte1); 625 } 626 627 /* This compares V, B, H (secondary) and the AVPN */ 628 if (HPTE64_V_COMPARE(pte0, ptem)) { 629 *pshift = hpte_page_shift(sps, pte0, pte1); 630 /* 631 * If there is no match, ignore the PTE, it could simply 632 * be for a different segment size encoding and the 633 * architecture specifies we should not match. Linux will 634 * potentially leave behind PTEs for the wrong base page 635 * size when demoting segments. 636 */ 637 if (*pshift == 0) { 638 continue; 639 } 640 /* 641 * We don't do anything with pshift yet as qemu TLB only 642 * deals with 4K pages anyway 643 */ 644 pte->pte0 = pte0; 645 pte->pte1 = pte1; 646 ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP); 647 return ptex + i; 648 } 649 } 650 ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP); 651 /* 652 * We didn't find a valid entry. 653 */ 654 return -1; 655 } 656 657 static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu, 658 ppc_slb_t *slb, target_ulong eaddr, 659 ppc_hash_pte64_t *pte, unsigned *pshift) 660 { 661 CPUPPCState *env = &cpu->env; 662 hwaddr hash, ptex; 663 uint64_t vsid, epnmask, epn, ptem; 664 const PPCHash64SegmentPageSizes *sps = slb->sps; 665 666 /* 667 * The SLB store path should prevent any bad page size encodings 668 * getting in there, so: 669 */ 670 assert(sps); 671 672 /* If ISL is set in LPCR we need to clamp the page size to 4K */ 673 if (env->spr[SPR_LPCR] & LPCR_ISL) { 674 /* We assume that when using TCG, 4k is first entry of SPS */ 675 sps = &cpu->hash64_opts->sps[0]; 676 assert(sps->page_shift == 12); 677 } 678 679 epnmask = ~((1ULL << sps->page_shift) - 1); 680 681 if (slb->vsid & SLB_VSID_B) { 682 /* 1TB segment */ 683 vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T; 684 epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask; 685 hash = vsid ^ (vsid << 25) ^ (epn >> sps->page_shift); 686 } else { 687 /* 256M segment */ 688 vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT; 689 epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask; 690 hash = vsid ^ (epn >> sps->page_shift); 691 } 692 ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN); 693 ptem |= HPTE64_V_VALID; 694 695 /* Page address translation */ 696 qemu_log_mask(CPU_LOG_MMU, 697 "htab_base " HWADDR_FMT_plx " htab_mask " HWADDR_FMT_plx 698 " hash " HWADDR_FMT_plx "\n", 699 ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), hash); 700 701 /* Primary PTEG lookup */ 702 qemu_log_mask(CPU_LOG_MMU, 703 "0 htab=" HWADDR_FMT_plx "/" HWADDR_FMT_plx 704 " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx 705 " hash=" HWADDR_FMT_plx "\n", 706 ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), 707 vsid, ptem, hash); 708 ptex = ppc_hash64_pteg_search(cpu, hash, sps, ptem, pte, pshift); 709 710 if (ptex == -1) { 711 /* Secondary PTEG lookup */ 712 ptem |= HPTE64_V_SECONDARY; 713 qemu_log_mask(CPU_LOG_MMU, 714 "1 htab=" HWADDR_FMT_plx "/" HWADDR_FMT_plx 715 " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx 716 " hash=" HWADDR_FMT_plx "\n", ppc_hash64_hpt_base(cpu), 717 ppc_hash64_hpt_mask(cpu), vsid, ptem, ~hash); 718 719 ptex = ppc_hash64_pteg_search(cpu, ~hash, sps, ptem, pte, pshift); 720 } 721 722 return ptex; 723 } 724 725 unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu, 726 uint64_t pte0, uint64_t pte1) 727 { 728 int i; 729 730 if (!(pte0 & HPTE64_V_LARGE)) { 731 return 12; 732 } 733 734 /* 735 * The encodings in env->sps need to be carefully chosen so that 736 * this gives an unambiguous result. 737 */ 738 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 739 const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i]; 740 unsigned shift; 741 742 if (!sps->page_shift) { 743 break; 744 } 745 746 shift = hpte_page_shift(sps, pte0, pte1); 747 if (shift) { 748 return shift; 749 } 750 } 751 752 return 0; 753 } 754 755 static bool ppc_hash64_use_vrma(CPUPPCState *env) 756 { 757 switch (env->mmu_model) { 758 case POWERPC_MMU_3_00: 759 /* 760 * ISAv3.0 (POWER9) always uses VRMA, the VPM0 field and RMOR 761 * register no longer exist 762 */ 763 return true; 764 765 default: 766 return !!(env->spr[SPR_LPCR] & LPCR_VPM0); 767 } 768 } 769 770 static void ppc_hash64_set_isi(CPUState *cs, int mmu_idx, uint64_t slb_vsid, 771 uint64_t error_code) 772 { 773 CPUPPCState *env = &POWERPC_CPU(cs)->env; 774 bool vpm; 775 776 if (!mmuidx_real(mmu_idx)) { 777 vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1); 778 } else { 779 vpm = ppc_hash64_use_vrma(env); 780 } 781 if (vpm && !mmuidx_hv(mmu_idx)) { 782 cs->exception_index = POWERPC_EXCP_HISI; 783 env->spr[SPR_ASDR] = slb_vsid; 784 } else { 785 cs->exception_index = POWERPC_EXCP_ISI; 786 } 787 env->error_code = error_code; 788 } 789 790 static void ppc_hash64_set_dsi(CPUState *cs, int mmu_idx, uint64_t slb_vsid, 791 uint64_t dar, uint64_t dsisr) 792 { 793 CPUPPCState *env = &POWERPC_CPU(cs)->env; 794 bool vpm; 795 796 if (!mmuidx_real(mmu_idx)) { 797 vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1); 798 } else { 799 vpm = ppc_hash64_use_vrma(env); 800 } 801 if (vpm && !mmuidx_hv(mmu_idx)) { 802 cs->exception_index = POWERPC_EXCP_HDSI; 803 env->spr[SPR_HDAR] = dar; 804 env->spr[SPR_HDSISR] = dsisr; 805 env->spr[SPR_ASDR] = slb_vsid; 806 } else { 807 cs->exception_index = POWERPC_EXCP_DSI; 808 env->spr[SPR_DAR] = dar; 809 env->spr[SPR_DSISR] = dsisr; 810 } 811 env->error_code = 0; 812 } 813 814 815 static void ppc_hash64_set_r(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1) 816 { 817 hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_R; 818 819 if (cpu->vhyp) { 820 cpu->vhyp_class->hpte_set_r(cpu->vhyp, ptex, pte1); 821 return; 822 } 823 base = ppc_hash64_hpt_base(cpu); 824 825 826 /* The HW performs a non-atomic byte update */ 827 stb_phys(CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01); 828 } 829 830 static void ppc_hash64_set_c(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1) 831 { 832 hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_C; 833 834 if (cpu->vhyp) { 835 cpu->vhyp_class->hpte_set_c(cpu->vhyp, ptex, pte1); 836 return; 837 } 838 base = ppc_hash64_hpt_base(cpu); 839 840 /* The HW performs a non-atomic byte update */ 841 stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80); 842 } 843 844 static target_ulong rmls_limit(PowerPCCPU *cpu) 845 { 846 CPUPPCState *env = &cpu->env; 847 /* 848 * In theory the meanings of RMLS values are implementation 849 * dependent. In practice, this seems to have been the set from 850 * POWER4+..POWER8, and RMLS is no longer supported in POWER9. 851 * 852 * Unsupported values mean the OS has shot itself in the 853 * foot. Return a 0-sized RMA in this case, which we expect 854 * to trigger an immediate DSI or ISI 855 */ 856 static const target_ulong rma_sizes[16] = { 857 [0] = 256 * GiB, 858 [1] = 16 * GiB, 859 [2] = 1 * GiB, 860 [3] = 64 * MiB, 861 [4] = 256 * MiB, 862 [7] = 128 * MiB, 863 [8] = 32 * MiB, 864 }; 865 target_ulong rmls = (env->spr[SPR_LPCR] & LPCR_RMLS) >> LPCR_RMLS_SHIFT; 866 867 return rma_sizes[rmls]; 868 } 869 870 /* Return the LLP in SLB_VSID format */ 871 static uint64_t get_vrma_llp(PowerPCCPU *cpu) 872 { 873 CPUPPCState *env = &cpu->env; 874 uint64_t llp; 875 876 if (env->mmu_model == POWERPC_MMU_3_00) { 877 ppc_v3_pate_t pate; 878 uint64_t ps, l, lp; 879 880 /* 881 * ISA v3.0 removes the LPCR[VRMASD] field and puts the VRMA base 882 * page size (L||LP equivalent) in the PS field in the HPT partition 883 * table entry. 884 */ 885 if (!ppc64_v3_get_pate(cpu, cpu->env.spr[SPR_LPIDR], &pate)) { 886 error_report("Bad VRMA with no partition table entry"); 887 return 0; 888 } 889 ps = PATE0_GET_PS(pate.dw0); 890 /* PS has L||LP in 3 consecutive bits, put them into SLB LLP format */ 891 l = (ps >> 2) & 0x1; 892 lp = ps & 0x3; 893 llp = (l << SLB_VSID_L_SHIFT) | (lp << SLB_VSID_LP_SHIFT); 894 895 } else { 896 uint64_t lpcr = env->spr[SPR_LPCR]; 897 target_ulong vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT; 898 899 /* VRMASD LLP matches SLB format, just shift and mask it */ 900 llp = (vrmasd << SLB_VSID_LP_SHIFT) & SLB_VSID_LLP_MASK; 901 } 902 903 return llp; 904 } 905 906 static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb) 907 { 908 uint64_t llp = get_vrma_llp(cpu); 909 target_ulong vsid = SLB_VSID_VRMA | llp; 910 int i; 911 912 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 913 const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i]; 914 915 if (!sps->page_shift) { 916 break; 917 } 918 919 if ((vsid & SLB_VSID_LLP_MASK) == sps->slb_enc) { 920 slb->esid = SLB_ESID_V; 921 slb->vsid = vsid; 922 slb->sps = sps; 923 return 0; 924 } 925 } 926 927 error_report("Bad VRMA page size encoding 0x" TARGET_FMT_lx, llp); 928 929 return -1; 930 } 931 932 bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, 933 hwaddr *raddrp, int *psizep, int *protp, int mmu_idx, 934 bool guest_visible) 935 { 936 CPUState *cs = CPU(cpu); 937 CPUPPCState *env = &cpu->env; 938 ppc_slb_t vrma_slbe; 939 ppc_slb_t *slb; 940 unsigned apshift; 941 hwaddr ptex; 942 ppc_hash_pte64_t pte; 943 int exec_prot, pp_prot, amr_prot, prot; 944 int need_prot; 945 hwaddr raddr; 946 947 /* 948 * Note on LPCR usage: 970 uses HID4, but our special variant of 949 * store_spr copies relevant fields into env->spr[SPR_LPCR]. 950 * Similarly we filter unimplemented bits when storing into LPCR 951 * depending on the MMU version. This code can thus just use the 952 * LPCR "as-is". 953 */ 954 955 /* 1. Handle real mode accesses */ 956 if (mmuidx_real(mmu_idx)) { 957 /* 958 * Translation is supposedly "off", but in real mode the top 4 959 * effective address bits are (mostly) ignored 960 */ 961 raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; 962 963 if (cpu->vhyp) { 964 /* 965 * In virtual hypervisor mode, there's nothing to do: 966 * EA == GPA == qemu guest address 967 */ 968 } else if (mmuidx_hv(mmu_idx) || !env->has_hv_mode) { 969 /* In HV mode, add HRMOR if top EA bit is clear */ 970 if (!(eaddr >> 63)) { 971 raddr |= env->spr[SPR_HRMOR]; 972 } 973 } else if (ppc_hash64_use_vrma(env)) { 974 /* Emulated VRMA mode */ 975 slb = &vrma_slbe; 976 if (build_vrma_slbe(cpu, slb) != 0) { 977 /* Invalid VRMA setup, machine check */ 978 if (guest_visible) { 979 cs->exception_index = POWERPC_EXCP_MCHECK; 980 env->error_code = 0; 981 } 982 return false; 983 } 984 985 goto skip_slb_search; 986 } else { 987 target_ulong limit = rmls_limit(cpu); 988 989 /* Emulated old-style RMO mode, bounds check against RMLS */ 990 if (raddr >= limit) { 991 if (!guest_visible) { 992 return false; 993 } 994 switch (access_type) { 995 case MMU_INST_FETCH: 996 ppc_hash64_set_isi(cs, mmu_idx, 0, SRR1_PROTFAULT); 997 break; 998 case MMU_DATA_LOAD: 999 ppc_hash64_set_dsi(cs, mmu_idx, 0, eaddr, DSISR_PROTFAULT); 1000 break; 1001 case MMU_DATA_STORE: 1002 ppc_hash64_set_dsi(cs, mmu_idx, 0, eaddr, 1003 DSISR_PROTFAULT | DSISR_ISSTORE); 1004 break; 1005 default: 1006 g_assert_not_reached(); 1007 } 1008 return false; 1009 } 1010 1011 raddr |= env->spr[SPR_RMOR]; 1012 } 1013 1014 *raddrp = raddr; 1015 *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 1016 *psizep = TARGET_PAGE_BITS; 1017 return true; 1018 } 1019 1020 /* 2. Translation is on, so look up the SLB */ 1021 slb = slb_lookup(cpu, eaddr); 1022 if (!slb) { 1023 /* No entry found, check if in-memory segment tables are in use */ 1024 if (ppc64_use_proc_tbl(cpu)) { 1025 /* TODO - Unsupported */ 1026 error_report("Segment Table Support Unimplemented"); 1027 exit(1); 1028 } 1029 /* Segment still not found, generate the appropriate interrupt */ 1030 if (!guest_visible) { 1031 return false; 1032 } 1033 switch (access_type) { 1034 case MMU_INST_FETCH: 1035 cs->exception_index = POWERPC_EXCP_ISEG; 1036 env->error_code = 0; 1037 break; 1038 case MMU_DATA_LOAD: 1039 case MMU_DATA_STORE: 1040 cs->exception_index = POWERPC_EXCP_DSEG; 1041 env->error_code = 0; 1042 env->spr[SPR_DAR] = eaddr; 1043 break; 1044 default: 1045 g_assert_not_reached(); 1046 } 1047 return false; 1048 } 1049 1050 skip_slb_search: 1051 1052 /* 3. Check for segment level no-execute violation */ 1053 if (access_type == MMU_INST_FETCH && (slb->vsid & SLB_VSID_N)) { 1054 if (guest_visible) { 1055 ppc_hash64_set_isi(cs, mmu_idx, slb->vsid, SRR1_NOEXEC_GUARD); 1056 } 1057 return false; 1058 } 1059 1060 /* 4. Locate the PTE in the hash table */ 1061 ptex = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift); 1062 if (ptex == -1) { 1063 if (!guest_visible) { 1064 return false; 1065 } 1066 switch (access_type) { 1067 case MMU_INST_FETCH: 1068 ppc_hash64_set_isi(cs, mmu_idx, slb->vsid, SRR1_NOPTE); 1069 break; 1070 case MMU_DATA_LOAD: 1071 ppc_hash64_set_dsi(cs, mmu_idx, slb->vsid, eaddr, DSISR_NOPTE); 1072 break; 1073 case MMU_DATA_STORE: 1074 ppc_hash64_set_dsi(cs, mmu_idx, slb->vsid, eaddr, 1075 DSISR_NOPTE | DSISR_ISSTORE); 1076 break; 1077 default: 1078 g_assert_not_reached(); 1079 } 1080 return false; 1081 } 1082 qemu_log_mask(CPU_LOG_MMU, 1083 "found PTE at index %08" HWADDR_PRIx "\n", ptex); 1084 1085 /* 5. Check access permissions */ 1086 1087 exec_prot = ppc_hash64_pte_noexec_guard(cpu, pte); 1088 pp_prot = ppc_hash64_pte_prot(mmu_idx, slb, pte); 1089 amr_prot = ppc_hash64_amr_prot(cpu, pte); 1090 prot = exec_prot & pp_prot & amr_prot; 1091 1092 need_prot = check_prot_access_type(PAGE_RWX, access_type); 1093 if (need_prot & ~prot) { 1094 /* Access right violation */ 1095 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); 1096 if (!guest_visible) { 1097 return false; 1098 } 1099 if (access_type == MMU_INST_FETCH) { 1100 int srr1 = 0; 1101 if (PAGE_EXEC & ~exec_prot) { 1102 srr1 |= SRR1_NOEXEC_GUARD; /* Access violates noexec or guard */ 1103 } else if (PAGE_EXEC & ~pp_prot) { 1104 srr1 |= SRR1_PROTFAULT; /* Access violates access authority */ 1105 } 1106 if (PAGE_EXEC & ~amr_prot) { 1107 srr1 |= SRR1_IAMR; /* Access violates virt pg class key prot */ 1108 } 1109 ppc_hash64_set_isi(cs, mmu_idx, slb->vsid, srr1); 1110 } else { 1111 int dsisr = 0; 1112 if (need_prot & ~pp_prot) { 1113 dsisr |= DSISR_PROTFAULT; 1114 } 1115 if (access_type == MMU_DATA_STORE) { 1116 dsisr |= DSISR_ISSTORE; 1117 } 1118 if (need_prot & ~amr_prot) { 1119 dsisr |= DSISR_AMR; 1120 } 1121 ppc_hash64_set_dsi(cs, mmu_idx, slb->vsid, eaddr, dsisr); 1122 } 1123 return false; 1124 } 1125 1126 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); 1127 1128 /* 6. Update PTE referenced and changed bits if necessary */ 1129 1130 if (!(pte.pte1 & HPTE64_R_R)) { 1131 ppc_hash64_set_r(cpu, ptex, pte.pte1); 1132 } 1133 if (!(pte.pte1 & HPTE64_R_C)) { 1134 if (access_type == MMU_DATA_STORE) { 1135 ppc_hash64_set_c(cpu, ptex, pte.pte1); 1136 } else { 1137 /* 1138 * Treat the page as read-only for now, so that a later write 1139 * will pass through this function again to set the C bit 1140 */ 1141 prot &= ~PAGE_WRITE; 1142 } 1143 } 1144 1145 /* 7. Determine the real address from the PTE */ 1146 1147 *raddrp = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr); 1148 *protp = prot; 1149 *psizep = apshift; 1150 return true; 1151 } 1152 1153 void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex, 1154 target_ulong pte0, target_ulong pte1) 1155 { 1156 /* 1157 * XXX: given the fact that there are too many segments to 1158 * invalidate, and we still don't have a tlb_flush_mask(env, n, 1159 * mask) in QEMU, we just invalidate all TLBs 1160 */ 1161 cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH; 1162 } 1163 1164 #ifdef CONFIG_TCG 1165 void helper_store_lpcr(CPUPPCState *env, target_ulong val) 1166 { 1167 PowerPCCPU *cpu = env_archcpu(env); 1168 1169 ppc_store_lpcr(cpu, val); 1170 } 1171 #endif 1172 1173 void ppc_hash64_init(PowerPCCPU *cpu) 1174 { 1175 CPUPPCState *env = &cpu->env; 1176 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 1177 1178 if (!pcc->hash64_opts) { 1179 assert(!mmu_is_64bit(env->mmu_model)); 1180 return; 1181 } 1182 1183 cpu->hash64_opts = g_memdup2(pcc->hash64_opts, sizeof(*cpu->hash64_opts)); 1184 } 1185 1186 void ppc_hash64_finalize(PowerPCCPU *cpu) 1187 { 1188 g_free(cpu->hash64_opts); 1189 } 1190 1191 const PPCHash64Options ppc_hash64_opts_basic = { 1192 .flags = 0, 1193 .slb_size = 64, 1194 .sps = { 1195 { .page_shift = 12, /* 4K */ 1196 .slb_enc = 0, 1197 .enc = { { .page_shift = 12, .pte_enc = 0 } } 1198 }, 1199 { .page_shift = 24, /* 16M */ 1200 .slb_enc = 0x100, 1201 .enc = { { .page_shift = 24, .pte_enc = 0 } } 1202 }, 1203 }, 1204 }; 1205 1206 const PPCHash64Options ppc_hash64_opts_POWER7 = { 1207 .flags = PPC_HASH64_1TSEG | PPC_HASH64_AMR | PPC_HASH64_CI_LARGEPAGE, 1208 .slb_size = 32, 1209 .sps = { 1210 { 1211 .page_shift = 12, /* 4K */ 1212 .slb_enc = 0, 1213 .enc = { { .page_shift = 12, .pte_enc = 0 }, 1214 { .page_shift = 16, .pte_enc = 0x7 }, 1215 { .page_shift = 24, .pte_enc = 0x38 }, }, 1216 }, 1217 { 1218 .page_shift = 16, /* 64K */ 1219 .slb_enc = SLB_VSID_64K, 1220 .enc = { { .page_shift = 16, .pte_enc = 0x1 }, 1221 { .page_shift = 24, .pte_enc = 0x8 }, }, 1222 }, 1223 { 1224 .page_shift = 24, /* 16M */ 1225 .slb_enc = SLB_VSID_16M, 1226 .enc = { { .page_shift = 24, .pte_enc = 0 }, }, 1227 }, 1228 { 1229 .page_shift = 34, /* 16G */ 1230 .slb_enc = SLB_VSID_16G, 1231 .enc = { { .page_shift = 34, .pte_enc = 0x3 }, }, 1232 }, 1233 } 1234 }; 1235 1236 1237