1 /* 2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * Copyright (c) 2013 David Gibson, IBM Corporation 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "exec/page-protection.h" 25 #include "qemu/error-report.h" 26 #include "qemu/qemu-print.h" 27 #include "sysemu/hw_accel.h" 28 #include "kvm_ppc.h" 29 #include "mmu-hash64.h" 30 #include "exec/log.h" 31 #include "hw/hw.h" 32 #include "internal.h" 33 #include "mmu-book3s-v3.h" 34 #include "helper_regs.h" 35 36 #ifdef CONFIG_TCG 37 #include "exec/helper-proto.h" 38 #endif 39 40 /* #define DEBUG_SLB */ 41 42 #ifdef DEBUG_SLB 43 # define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) 44 #else 45 # define LOG_SLB(...) do { } while (0) 46 #endif 47 48 /* 49 * SLB handling 50 */ 51 52 static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr) 53 { 54 CPUPPCState *env = &cpu->env; 55 uint64_t esid_256M, esid_1T; 56 int n; 57 58 LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr); 59 60 esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V; 61 esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V; 62 63 for (n = 0; n < cpu->hash64_opts->slb_size; n++) { 64 ppc_slb_t *slb = &env->slb[n]; 65 66 LOG_SLB("%s: slot %d %016" PRIx64 " %016" 67 PRIx64 "\n", __func__, n, slb->esid, slb->vsid); 68 /* 69 * We check for 1T matches on all MMUs here - if the MMU 70 * doesn't have 1T segment support, we will have prevented 1T 71 * entries from being inserted in the slbmte code. 72 */ 73 if (((slb->esid == esid_256M) && 74 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M)) 75 || ((slb->esid == esid_1T) && 76 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) { 77 return slb; 78 } 79 } 80 81 return NULL; 82 } 83 84 void dump_slb(PowerPCCPU *cpu) 85 { 86 CPUPPCState *env = &cpu->env; 87 int i; 88 uint64_t slbe, slbv; 89 90 cpu_synchronize_state(CPU(cpu)); 91 92 qemu_printf("SLB\tESID\t\t\tVSID\n"); 93 for (i = 0; i < cpu->hash64_opts->slb_size; i++) { 94 slbe = env->slb[i].esid; 95 slbv = env->slb[i].vsid; 96 if (slbe == 0 && slbv == 0) { 97 continue; 98 } 99 qemu_printf("%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n", 100 i, slbe, slbv); 101 } 102 } 103 104 #ifdef CONFIG_TCG 105 void helper_SLBIA(CPUPPCState *env, uint32_t ih) 106 { 107 PowerPCCPU *cpu = env_archcpu(env); 108 int starting_entry; 109 int n; 110 111 /* 112 * slbia must always flush all TLB (which is equivalent to ERAT in ppc 113 * architecture). Matching on SLB_ESID_V is not good enough, because slbmte 114 * can overwrite a valid SLB without flushing its lookaside information. 115 * 116 * It would be possible to keep the TLB in synch with the SLB by flushing 117 * when a valid entry is overwritten by slbmte, and therefore slbia would 118 * not have to flush unless it evicts a valid SLB entry. However it is 119 * expected that slbmte is more common than slbia, and slbia is usually 120 * going to evict valid SLB entries, so that tradeoff is unlikely to be a 121 * good one. 122 * 123 * ISA v2.05 introduced IH field with values 0,1,2,6. These all invalidate 124 * the same SLB entries (everything but entry 0), but differ in what 125 * "lookaside information" is invalidated. TCG can ignore this and flush 126 * everything. 127 * 128 * ISA v3.0 introduced additional values 3,4,7, which change what SLBs are 129 * invalidated. 130 */ 131 132 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 133 134 starting_entry = 1; /* default for IH=0,1,2,6 */ 135 136 if (env->mmu_model == POWERPC_MMU_3_00) { 137 switch (ih) { 138 case 0x7: 139 /* invalidate no SLBs, but all lookaside information */ 140 return; 141 142 case 0x3: 143 case 0x4: 144 /* also considers SLB entry 0 */ 145 starting_entry = 0; 146 break; 147 148 case 0x5: 149 /* treat undefined values as ih==0, and warn */ 150 qemu_log_mask(LOG_GUEST_ERROR, 151 "slbia undefined IH field %u.\n", ih); 152 break; 153 154 default: 155 /* 0,1,2,6 */ 156 break; 157 } 158 } 159 160 for (n = starting_entry; n < cpu->hash64_opts->slb_size; n++) { 161 ppc_slb_t *slb = &env->slb[n]; 162 163 if (!(slb->esid & SLB_ESID_V)) { 164 continue; 165 } 166 if (env->mmu_model == POWERPC_MMU_3_00) { 167 if (ih == 0x3 && (slb->vsid & SLB_VSID_C) == 0) { 168 /* preserves entries with a class value of 0 */ 169 continue; 170 } 171 } 172 173 slb->esid &= ~SLB_ESID_V; 174 } 175 } 176 177 #if defined(TARGET_PPC64) 178 void helper_SLBIAG(CPUPPCState *env, target_ulong rs, uint32_t l) 179 { 180 PowerPCCPU *cpu = env_archcpu(env); 181 int n; 182 183 /* 184 * slbiag must always flush all TLB (which is equivalent to ERAT in ppc 185 * architecture). Matching on SLB_ESID_V is not good enough, because slbmte 186 * can overwrite a valid SLB without flushing its lookaside information. 187 * 188 * It would be possible to keep the TLB in synch with the SLB by flushing 189 * when a valid entry is overwritten by slbmte, and therefore slbiag would 190 * not have to flush unless it evicts a valid SLB entry. However it is 191 * expected that slbmte is more common than slbiag, and slbiag is usually 192 * going to evict valid SLB entries, so that tradeoff is unlikely to be a 193 * good one. 194 */ 195 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 196 197 for (n = 0; n < cpu->hash64_opts->slb_size; n++) { 198 ppc_slb_t *slb = &env->slb[n]; 199 slb->esid &= ~SLB_ESID_V; 200 } 201 } 202 #endif 203 204 static void __helper_slbie(CPUPPCState *env, target_ulong addr, 205 target_ulong global) 206 { 207 PowerPCCPU *cpu = env_archcpu(env); 208 ppc_slb_t *slb; 209 210 slb = slb_lookup(cpu, addr); 211 if (!slb) { 212 return; 213 } 214 215 if (slb->esid & SLB_ESID_V) { 216 slb->esid &= ~SLB_ESID_V; 217 218 /* 219 * XXX: given the fact that segment size is 256 MB or 1TB, 220 * and we still don't have a tlb_flush_mask(env, n, mask) 221 * in QEMU, we just invalidate all TLBs 222 */ 223 env->tlb_need_flush |= 224 (global == false ? TLB_NEED_LOCAL_FLUSH : TLB_NEED_GLOBAL_FLUSH); 225 } 226 } 227 228 void helper_SLBIE(CPUPPCState *env, target_ulong addr) 229 { 230 __helper_slbie(env, addr, false); 231 } 232 233 void helper_SLBIEG(CPUPPCState *env, target_ulong addr) 234 { 235 __helper_slbie(env, addr, true); 236 } 237 #endif 238 239 int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot, 240 target_ulong esid, target_ulong vsid) 241 { 242 CPUPPCState *env = &cpu->env; 243 ppc_slb_t *slb = &env->slb[slot]; 244 const PPCHash64SegmentPageSizes *sps = NULL; 245 int i; 246 247 if (slot >= cpu->hash64_opts->slb_size) { 248 return -1; /* Bad slot number */ 249 } 250 if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) { 251 return -1; /* Reserved bits set */ 252 } 253 if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) { 254 return -1; /* Bad segment size */ 255 } 256 if ((vsid & SLB_VSID_B) && !(ppc_hash64_has(cpu, PPC_HASH64_1TSEG))) { 257 return -1; /* 1T segment on MMU that doesn't support it */ 258 } 259 260 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 261 const PPCHash64SegmentPageSizes *sps1 = &cpu->hash64_opts->sps[i]; 262 263 if (!sps1->page_shift) { 264 break; 265 } 266 267 if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) { 268 sps = sps1; 269 break; 270 } 271 } 272 273 if (!sps) { 274 error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu 275 " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx, 276 slot, esid, vsid); 277 return -1; 278 } 279 280 slb->esid = esid; 281 slb->vsid = vsid; 282 slb->sps = sps; 283 284 LOG_SLB("%s: " TARGET_FMT_lu " " TARGET_FMT_lx " - " TARGET_FMT_lx 285 " => %016" PRIx64 " %016" PRIx64 "\n", __func__, slot, esid, vsid, 286 slb->esid, slb->vsid); 287 288 return 0; 289 } 290 291 #ifdef CONFIG_TCG 292 static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb, 293 target_ulong *rt) 294 { 295 CPUPPCState *env = &cpu->env; 296 int slot = rb & 0xfff; 297 ppc_slb_t *slb = &env->slb[slot]; 298 299 if (slot >= cpu->hash64_opts->slb_size) { 300 return -1; 301 } 302 303 *rt = slb->esid; 304 return 0; 305 } 306 307 static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb, 308 target_ulong *rt) 309 { 310 CPUPPCState *env = &cpu->env; 311 int slot = rb & 0xfff; 312 ppc_slb_t *slb = &env->slb[slot]; 313 314 if (slot >= cpu->hash64_opts->slb_size) { 315 return -1; 316 } 317 318 *rt = slb->vsid; 319 return 0; 320 } 321 322 static int ppc_find_slb_vsid(PowerPCCPU *cpu, target_ulong rb, 323 target_ulong *rt) 324 { 325 CPUPPCState *env = &cpu->env; 326 ppc_slb_t *slb; 327 328 if (!msr_is_64bit(env, env->msr)) { 329 rb &= 0xffffffff; 330 } 331 slb = slb_lookup(cpu, rb); 332 if (slb == NULL) { 333 *rt = (target_ulong)-1ul; 334 } else { 335 *rt = slb->vsid; 336 } 337 return 0; 338 } 339 340 void helper_SLBMTE(CPUPPCState *env, target_ulong rb, target_ulong rs) 341 { 342 PowerPCCPU *cpu = env_archcpu(env); 343 344 if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) { 345 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 346 POWERPC_EXCP_INVAL, GETPC()); 347 } 348 } 349 350 target_ulong helper_SLBMFEE(CPUPPCState *env, target_ulong rb) 351 { 352 PowerPCCPU *cpu = env_archcpu(env); 353 target_ulong rt = 0; 354 355 if (ppc_load_slb_esid(cpu, rb, &rt) < 0) { 356 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 357 POWERPC_EXCP_INVAL, GETPC()); 358 } 359 return rt; 360 } 361 362 target_ulong helper_SLBFEE(CPUPPCState *env, target_ulong rb) 363 { 364 PowerPCCPU *cpu = env_archcpu(env); 365 target_ulong rt = 0; 366 367 if (ppc_find_slb_vsid(cpu, rb, &rt) < 0) { 368 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 369 POWERPC_EXCP_INVAL, GETPC()); 370 } 371 return rt; 372 } 373 374 target_ulong helper_SLBMFEV(CPUPPCState *env, target_ulong rb) 375 { 376 PowerPCCPU *cpu = env_archcpu(env); 377 target_ulong rt = 0; 378 379 if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) { 380 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 381 POWERPC_EXCP_INVAL, GETPC()); 382 } 383 return rt; 384 } 385 #endif 386 387 /* Check No-Execute or Guarded Storage */ 388 static inline int ppc_hash64_pte_noexec_guard(PowerPCCPU *cpu, 389 ppc_hash_pte64_t pte) 390 { 391 /* Exec permissions CANNOT take away read or write permissions */ 392 return (pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G) ? 393 PAGE_READ | PAGE_WRITE : PAGE_READ | PAGE_WRITE | PAGE_EXEC; 394 } 395 396 /* Check Basic Storage Protection */ 397 static int ppc_hash64_pte_prot(int mmu_idx, 398 ppc_slb_t *slb, ppc_hash_pte64_t pte) 399 { 400 unsigned pp, key; 401 /* 402 * Some pp bit combinations have undefined behaviour, so default 403 * to no access in those cases 404 */ 405 int prot = 0; 406 407 key = !!(mmuidx_pr(mmu_idx) ? (slb->vsid & SLB_VSID_KP) 408 : (slb->vsid & SLB_VSID_KS)); 409 pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61); 410 411 if (key == 0) { 412 switch (pp) { 413 case 0x0: 414 case 0x1: 415 case 0x2: 416 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 417 break; 418 419 case 0x3: 420 case 0x6: 421 prot = PAGE_READ | PAGE_EXEC; 422 break; 423 } 424 } else { 425 switch (pp) { 426 case 0x0: 427 case 0x6: 428 break; 429 430 case 0x1: 431 case 0x3: 432 prot = PAGE_READ | PAGE_EXEC; 433 break; 434 435 case 0x2: 436 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 437 break; 438 } 439 } 440 441 return prot; 442 } 443 444 /* Check the instruction access permissions specified in the IAMR */ 445 static int ppc_hash64_iamr_prot(PowerPCCPU *cpu, int key) 446 { 447 CPUPPCState *env = &cpu->env; 448 int iamr_bits = (env->spr[SPR_IAMR] >> 2 * (31 - key)) & 0x3; 449 450 /* 451 * An instruction fetch is permitted if the IAMR bit is 0. 452 * If the bit is set, return PAGE_READ | PAGE_WRITE because this bit 453 * can only take away EXEC permissions not READ or WRITE permissions. 454 * If bit is cleared return PAGE_READ | PAGE_WRITE | PAGE_EXEC since 455 * EXEC permissions are allowed. 456 */ 457 return (iamr_bits & 0x1) ? PAGE_READ | PAGE_WRITE : 458 PAGE_READ | PAGE_WRITE | PAGE_EXEC; 459 } 460 461 static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte) 462 { 463 CPUPPCState *env = &cpu->env; 464 int key, amrbits; 465 int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 466 467 /* Only recent MMUs implement Virtual Page Class Key Protection */ 468 if (!ppc_hash64_has(cpu, PPC_HASH64_AMR)) { 469 return prot; 470 } 471 472 key = HPTE64_R_KEY(pte.pte1); 473 amrbits = (env->spr[SPR_AMR] >> 2 * (31 - key)) & 0x3; 474 475 /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */ 476 /* env->spr[SPR_AMR]); */ 477 478 /* 479 * A store is permitted if the AMR bit is 0. Remove write 480 * protection if it is set. 481 */ 482 if (amrbits & 0x2) { 483 prot &= ~PAGE_WRITE; 484 } 485 /* 486 * A load is permitted if the AMR bit is 0. Remove read 487 * protection if it is set. 488 */ 489 if (amrbits & 0x1) { 490 prot &= ~PAGE_READ; 491 } 492 493 switch (env->mmu_model) { 494 /* 495 * MMU version 2.07 and later support IAMR 496 * Check if the IAMR allows the instruction access - it will return 497 * PAGE_EXEC if it doesn't (and thus that bit will be cleared) or 0 498 * if it does (and prot will be unchanged indicating execution support). 499 */ 500 case POWERPC_MMU_2_07: 501 case POWERPC_MMU_3_00: 502 prot &= ppc_hash64_iamr_prot(cpu, key); 503 break; 504 default: 505 break; 506 } 507 508 return prot; 509 } 510 511 static hwaddr ppc_hash64_hpt_base(PowerPCCPU *cpu) 512 { 513 uint64_t base; 514 515 if (cpu->vhyp) { 516 return 0; 517 } 518 if (cpu->env.mmu_model == POWERPC_MMU_3_00) { 519 ppc_v3_pate_t pate; 520 521 if (!ppc64_v3_get_pate(cpu, cpu->env.spr[SPR_LPIDR], &pate)) { 522 return 0; 523 } 524 base = pate.dw0; 525 } else { 526 base = cpu->env.spr[SPR_SDR1]; 527 } 528 return base & SDR_64_HTABORG; 529 } 530 531 static hwaddr ppc_hash64_hpt_mask(PowerPCCPU *cpu) 532 { 533 uint64_t base; 534 535 if (cpu->vhyp) { 536 return cpu->vhyp_class->hpt_mask(cpu->vhyp); 537 } 538 if (cpu->env.mmu_model == POWERPC_MMU_3_00) { 539 ppc_v3_pate_t pate; 540 541 if (!ppc64_v3_get_pate(cpu, cpu->env.spr[SPR_LPIDR], &pate)) { 542 return 0; 543 } 544 base = pate.dw0; 545 } else { 546 base = cpu->env.spr[SPR_SDR1]; 547 } 548 return (1ULL << ((base & SDR_64_HTABSIZE) + 18 - 7)) - 1; 549 } 550 551 const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu, 552 hwaddr ptex, int n) 553 { 554 hwaddr pte_offset = ptex * HASH_PTE_SIZE_64; 555 hwaddr base; 556 hwaddr plen = n * HASH_PTE_SIZE_64; 557 const ppc_hash_pte64_t *hptes; 558 559 if (cpu->vhyp) { 560 return cpu->vhyp_class->map_hptes(cpu->vhyp, ptex, n); 561 } 562 base = ppc_hash64_hpt_base(cpu); 563 564 if (!base) { 565 return NULL; 566 } 567 568 hptes = address_space_map(CPU(cpu)->as, base + pte_offset, &plen, false, 569 MEMTXATTRS_UNSPECIFIED); 570 if (plen < (n * HASH_PTE_SIZE_64)) { 571 hw_error("%s: Unable to map all requested HPTEs\n", __func__); 572 } 573 return hptes; 574 } 575 576 void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes, 577 hwaddr ptex, int n) 578 { 579 if (cpu->vhyp) { 580 cpu->vhyp_class->unmap_hptes(cpu->vhyp, hptes, ptex, n); 581 return; 582 } 583 584 address_space_unmap(CPU(cpu)->as, (void *)hptes, n * HASH_PTE_SIZE_64, 585 false, n * HASH_PTE_SIZE_64); 586 } 587 588 bool ppc_hash64_valid_ptex(PowerPCCPU *cpu, target_ulong ptex) 589 { 590 /* hash value/pteg group index is normalized by HPT mask */ 591 if (((ptex & ~7ULL) / HPTES_PER_GROUP) & ~ppc_hash64_hpt_mask(cpu)) { 592 return false; 593 } 594 return true; 595 } 596 597 static unsigned hpte_page_shift(const PPCHash64SegmentPageSizes *sps, 598 uint64_t pte0, uint64_t pte1) 599 { 600 int i; 601 602 if (!(pte0 & HPTE64_V_LARGE)) { 603 if (sps->page_shift != 12) { 604 /* 4kiB page in a non 4kiB segment */ 605 return 0; 606 } 607 /* Normal 4kiB page */ 608 return 12; 609 } 610 611 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 612 const PPCHash64PageSize *ps = &sps->enc[i]; 613 uint64_t mask; 614 615 if (!ps->page_shift) { 616 break; 617 } 618 619 if (ps->page_shift == 12) { 620 /* L bit is set so this can't be a 4kiB page */ 621 continue; 622 } 623 624 mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN; 625 626 if ((pte1 & mask) == ((uint64_t)ps->pte_enc << HPTE64_R_RPN_SHIFT)) { 627 return ps->page_shift; 628 } 629 } 630 631 return 0; /* Bad page size encoding */ 632 } 633 634 static void ppc64_v3_new_to_old_hpte(target_ulong *pte0, target_ulong *pte1) 635 { 636 /* Insert B into pte0 */ 637 *pte0 = (*pte0 & HPTE64_V_COMMON_BITS) | 638 ((*pte1 & HPTE64_R_3_0_SSIZE_MASK) << 639 (HPTE64_V_SSIZE_SHIFT - HPTE64_R_3_0_SSIZE_SHIFT)); 640 641 /* Remove B from pte1 */ 642 *pte1 = *pte1 & ~HPTE64_R_3_0_SSIZE_MASK; 643 } 644 645 646 static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash, 647 const PPCHash64SegmentPageSizes *sps, 648 target_ulong ptem, 649 ppc_hash_pte64_t *pte, unsigned *pshift) 650 { 651 int i; 652 const ppc_hash_pte64_t *pteg; 653 target_ulong pte0, pte1; 654 target_ulong ptex; 655 656 ptex = (hash & ppc_hash64_hpt_mask(cpu)) * HPTES_PER_GROUP; 657 pteg = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP); 658 if (!pteg) { 659 return -1; 660 } 661 for (i = 0; i < HPTES_PER_GROUP; i++) { 662 pte0 = ppc_hash64_hpte0(cpu, pteg, i); 663 /* 664 * pte0 contains the valid bit and must be read before pte1, 665 * otherwise we might see an old pte1 with a new valid bit and 666 * thus an inconsistent hpte value 667 */ 668 smp_rmb(); 669 pte1 = ppc_hash64_hpte1(cpu, pteg, i); 670 671 /* Convert format if necessary */ 672 if (cpu->env.mmu_model == POWERPC_MMU_3_00 && !cpu->vhyp) { 673 ppc64_v3_new_to_old_hpte(&pte0, &pte1); 674 } 675 676 /* This compares V, B, H (secondary) and the AVPN */ 677 if (HPTE64_V_COMPARE(pte0, ptem)) { 678 *pshift = hpte_page_shift(sps, pte0, pte1); 679 /* 680 * If there is no match, ignore the PTE, it could simply 681 * be for a different segment size encoding and the 682 * architecture specifies we should not match. Linux will 683 * potentially leave behind PTEs for the wrong base page 684 * size when demoting segments. 685 */ 686 if (*pshift == 0) { 687 continue; 688 } 689 /* 690 * We don't do anything with pshift yet as qemu TLB only 691 * deals with 4K pages anyway 692 */ 693 pte->pte0 = pte0; 694 pte->pte1 = pte1; 695 ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP); 696 return ptex + i; 697 } 698 } 699 ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP); 700 /* 701 * We didn't find a valid entry. 702 */ 703 return -1; 704 } 705 706 static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu, 707 ppc_slb_t *slb, target_ulong eaddr, 708 ppc_hash_pte64_t *pte, unsigned *pshift) 709 { 710 CPUPPCState *env = &cpu->env; 711 hwaddr hash, ptex; 712 uint64_t vsid, epnmask, epn, ptem; 713 const PPCHash64SegmentPageSizes *sps = slb->sps; 714 715 /* 716 * The SLB store path should prevent any bad page size encodings 717 * getting in there, so: 718 */ 719 assert(sps); 720 721 /* If ISL is set in LPCR we need to clamp the page size to 4K */ 722 if (env->spr[SPR_LPCR] & LPCR_ISL) { 723 /* We assume that when using TCG, 4k is first entry of SPS */ 724 sps = &cpu->hash64_opts->sps[0]; 725 assert(sps->page_shift == 12); 726 } 727 728 epnmask = ~((1ULL << sps->page_shift) - 1); 729 730 if (slb->vsid & SLB_VSID_B) { 731 /* 1TB segment */ 732 vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T; 733 epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask; 734 hash = vsid ^ (vsid << 25) ^ (epn >> sps->page_shift); 735 } else { 736 /* 256M segment */ 737 vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT; 738 epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask; 739 hash = vsid ^ (epn >> sps->page_shift); 740 } 741 ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN); 742 ptem |= HPTE64_V_VALID; 743 744 /* Page address translation */ 745 qemu_log_mask(CPU_LOG_MMU, 746 "htab_base " HWADDR_FMT_plx " htab_mask " HWADDR_FMT_plx 747 " hash " HWADDR_FMT_plx "\n", 748 ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), hash); 749 750 /* Primary PTEG lookup */ 751 qemu_log_mask(CPU_LOG_MMU, 752 "0 htab=" HWADDR_FMT_plx "/" HWADDR_FMT_plx 753 " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx 754 " hash=" HWADDR_FMT_plx "\n", 755 ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), 756 vsid, ptem, hash); 757 ptex = ppc_hash64_pteg_search(cpu, hash, sps, ptem, pte, pshift); 758 759 if (ptex == -1) { 760 /* Secondary PTEG lookup */ 761 ptem |= HPTE64_V_SECONDARY; 762 qemu_log_mask(CPU_LOG_MMU, 763 "1 htab=" HWADDR_FMT_plx "/" HWADDR_FMT_plx 764 " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx 765 " hash=" HWADDR_FMT_plx "\n", ppc_hash64_hpt_base(cpu), 766 ppc_hash64_hpt_mask(cpu), vsid, ptem, ~hash); 767 768 ptex = ppc_hash64_pteg_search(cpu, ~hash, sps, ptem, pte, pshift); 769 } 770 771 return ptex; 772 } 773 774 unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu, 775 uint64_t pte0, uint64_t pte1) 776 { 777 int i; 778 779 if (!(pte0 & HPTE64_V_LARGE)) { 780 return 12; 781 } 782 783 /* 784 * The encodings in env->sps need to be carefully chosen so that 785 * this gives an unambiguous result. 786 */ 787 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 788 const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i]; 789 unsigned shift; 790 791 if (!sps->page_shift) { 792 break; 793 } 794 795 shift = hpte_page_shift(sps, pte0, pte1); 796 if (shift) { 797 return shift; 798 } 799 } 800 801 return 0; 802 } 803 804 static bool ppc_hash64_use_vrma(CPUPPCState *env) 805 { 806 switch (env->mmu_model) { 807 case POWERPC_MMU_3_00: 808 /* 809 * ISAv3.0 (POWER9) always uses VRMA, the VPM0 field and RMOR 810 * register no longer exist 811 */ 812 return true; 813 814 default: 815 return !!(env->spr[SPR_LPCR] & LPCR_VPM0); 816 } 817 } 818 819 static void ppc_hash64_set_isi(CPUState *cs, int mmu_idx, uint64_t slb_vsid, 820 uint64_t error_code) 821 { 822 CPUPPCState *env = &POWERPC_CPU(cs)->env; 823 bool vpm; 824 825 if (!mmuidx_real(mmu_idx)) { 826 vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1); 827 } else { 828 vpm = ppc_hash64_use_vrma(env); 829 } 830 if (vpm && !mmuidx_hv(mmu_idx)) { 831 cs->exception_index = POWERPC_EXCP_HISI; 832 env->spr[SPR_ASDR] = slb_vsid; 833 } else { 834 cs->exception_index = POWERPC_EXCP_ISI; 835 } 836 env->error_code = error_code; 837 } 838 839 static void ppc_hash64_set_dsi(CPUState *cs, int mmu_idx, uint64_t slb_vsid, 840 uint64_t dar, uint64_t dsisr) 841 { 842 CPUPPCState *env = &POWERPC_CPU(cs)->env; 843 bool vpm; 844 845 if (!mmuidx_real(mmu_idx)) { 846 vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1); 847 } else { 848 vpm = ppc_hash64_use_vrma(env); 849 } 850 if (vpm && !mmuidx_hv(mmu_idx)) { 851 cs->exception_index = POWERPC_EXCP_HDSI; 852 env->spr[SPR_HDAR] = dar; 853 env->spr[SPR_HDSISR] = dsisr; 854 env->spr[SPR_ASDR] = slb_vsid; 855 } else { 856 cs->exception_index = POWERPC_EXCP_DSI; 857 env->spr[SPR_DAR] = dar; 858 env->spr[SPR_DSISR] = dsisr; 859 } 860 env->error_code = 0; 861 } 862 863 864 static void ppc_hash64_set_r(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1) 865 { 866 hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_R; 867 868 if (cpu->vhyp) { 869 cpu->vhyp_class->hpte_set_r(cpu->vhyp, ptex, pte1); 870 return; 871 } 872 base = ppc_hash64_hpt_base(cpu); 873 874 875 /* The HW performs a non-atomic byte update */ 876 stb_phys(CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01); 877 } 878 879 static void ppc_hash64_set_c(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1) 880 { 881 hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_C; 882 883 if (cpu->vhyp) { 884 cpu->vhyp_class->hpte_set_c(cpu->vhyp, ptex, pte1); 885 return; 886 } 887 base = ppc_hash64_hpt_base(cpu); 888 889 /* The HW performs a non-atomic byte update */ 890 stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80); 891 } 892 893 static target_ulong rmls_limit(PowerPCCPU *cpu) 894 { 895 CPUPPCState *env = &cpu->env; 896 /* 897 * In theory the meanings of RMLS values are implementation 898 * dependent. In practice, this seems to have been the set from 899 * POWER4+..POWER8, and RMLS is no longer supported in POWER9. 900 * 901 * Unsupported values mean the OS has shot itself in the 902 * foot. Return a 0-sized RMA in this case, which we expect 903 * to trigger an immediate DSI or ISI 904 */ 905 static const target_ulong rma_sizes[16] = { 906 [0] = 256 * GiB, 907 [1] = 16 * GiB, 908 [2] = 1 * GiB, 909 [3] = 64 * MiB, 910 [4] = 256 * MiB, 911 [7] = 128 * MiB, 912 [8] = 32 * MiB, 913 }; 914 target_ulong rmls = (env->spr[SPR_LPCR] & LPCR_RMLS) >> LPCR_RMLS_SHIFT; 915 916 return rma_sizes[rmls]; 917 } 918 919 /* Return the LLP in SLB_VSID format */ 920 static uint64_t get_vrma_llp(PowerPCCPU *cpu) 921 { 922 CPUPPCState *env = &cpu->env; 923 uint64_t llp; 924 925 if (env->mmu_model == POWERPC_MMU_3_00) { 926 ppc_v3_pate_t pate; 927 uint64_t ps, l, lp; 928 929 /* 930 * ISA v3.0 removes the LPCR[VRMASD] field and puts the VRMA base 931 * page size (L||LP equivalent) in the PS field in the HPT partition 932 * table entry. 933 */ 934 if (!ppc64_v3_get_pate(cpu, cpu->env.spr[SPR_LPIDR], &pate)) { 935 error_report("Bad VRMA with no partition table entry"); 936 return 0; 937 } 938 ps = PATE0_GET_PS(pate.dw0); 939 /* PS has L||LP in 3 consecutive bits, put them into SLB LLP format */ 940 l = (ps >> 2) & 0x1; 941 lp = ps & 0x3; 942 llp = (l << SLB_VSID_L_SHIFT) | (lp << SLB_VSID_LP_SHIFT); 943 944 } else { 945 uint64_t lpcr = env->spr[SPR_LPCR]; 946 target_ulong vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT; 947 948 /* VRMASD LLP matches SLB format, just shift and mask it */ 949 llp = (vrmasd << SLB_VSID_LP_SHIFT) & SLB_VSID_LLP_MASK; 950 } 951 952 return llp; 953 } 954 955 static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb) 956 { 957 uint64_t llp = get_vrma_llp(cpu); 958 target_ulong vsid = SLB_VSID_VRMA | llp; 959 int i; 960 961 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 962 const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i]; 963 964 if (!sps->page_shift) { 965 break; 966 } 967 968 if ((vsid & SLB_VSID_LLP_MASK) == sps->slb_enc) { 969 slb->esid = SLB_ESID_V; 970 slb->vsid = vsid; 971 slb->sps = sps; 972 return 0; 973 } 974 } 975 976 error_report("Bad VRMA page size encoding 0x" TARGET_FMT_lx, llp); 977 978 return -1; 979 } 980 981 bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, 982 hwaddr *raddrp, int *psizep, int *protp, int mmu_idx, 983 bool guest_visible) 984 { 985 CPUState *cs = CPU(cpu); 986 CPUPPCState *env = &cpu->env; 987 ppc_slb_t vrma_slbe; 988 ppc_slb_t *slb; 989 unsigned apshift; 990 hwaddr ptex; 991 ppc_hash_pte64_t pte; 992 int exec_prot, pp_prot, amr_prot, prot; 993 int need_prot; 994 hwaddr raddr; 995 996 /* 997 * Note on LPCR usage: 970 uses HID4, but our special variant of 998 * store_spr copies relevant fields into env->spr[SPR_LPCR]. 999 * Similarly we filter unimplemented bits when storing into LPCR 1000 * depending on the MMU version. This code can thus just use the 1001 * LPCR "as-is". 1002 */ 1003 1004 /* 1. Handle real mode accesses */ 1005 if (mmuidx_real(mmu_idx)) { 1006 /* 1007 * Translation is supposedly "off", but in real mode the top 4 1008 * effective address bits are (mostly) ignored 1009 */ 1010 raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; 1011 1012 if (cpu->vhyp) { 1013 /* 1014 * In virtual hypervisor mode, there's nothing to do: 1015 * EA == GPA == qemu guest address 1016 */ 1017 } else if (mmuidx_hv(mmu_idx) || !env->has_hv_mode) { 1018 /* In HV mode, add HRMOR if top EA bit is clear */ 1019 if (!(eaddr >> 63)) { 1020 raddr |= env->spr[SPR_HRMOR]; 1021 } 1022 } else if (ppc_hash64_use_vrma(env)) { 1023 /* Emulated VRMA mode */ 1024 slb = &vrma_slbe; 1025 if (build_vrma_slbe(cpu, slb) != 0) { 1026 /* Invalid VRMA setup, machine check */ 1027 if (guest_visible) { 1028 cs->exception_index = POWERPC_EXCP_MCHECK; 1029 env->error_code = 0; 1030 } 1031 return false; 1032 } 1033 1034 goto skip_slb_search; 1035 } else { 1036 target_ulong limit = rmls_limit(cpu); 1037 1038 /* Emulated old-style RMO mode, bounds check against RMLS */ 1039 if (raddr >= limit) { 1040 if (!guest_visible) { 1041 return false; 1042 } 1043 switch (access_type) { 1044 case MMU_INST_FETCH: 1045 ppc_hash64_set_isi(cs, mmu_idx, 0, SRR1_PROTFAULT); 1046 break; 1047 case MMU_DATA_LOAD: 1048 ppc_hash64_set_dsi(cs, mmu_idx, 0, eaddr, DSISR_PROTFAULT); 1049 break; 1050 case MMU_DATA_STORE: 1051 ppc_hash64_set_dsi(cs, mmu_idx, 0, eaddr, 1052 DSISR_PROTFAULT | DSISR_ISSTORE); 1053 break; 1054 default: 1055 g_assert_not_reached(); 1056 } 1057 return false; 1058 } 1059 1060 raddr |= env->spr[SPR_RMOR]; 1061 } 1062 1063 *raddrp = raddr; 1064 *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 1065 *psizep = TARGET_PAGE_BITS; 1066 return true; 1067 } 1068 1069 /* 2. Translation is on, so look up the SLB */ 1070 slb = slb_lookup(cpu, eaddr); 1071 if (!slb) { 1072 /* No entry found, check if in-memory segment tables are in use */ 1073 if (ppc64_use_proc_tbl(cpu)) { 1074 /* TODO - Unsupported */ 1075 error_report("Segment Table Support Unimplemented"); 1076 exit(1); 1077 } 1078 /* Segment still not found, generate the appropriate interrupt */ 1079 if (!guest_visible) { 1080 return false; 1081 } 1082 switch (access_type) { 1083 case MMU_INST_FETCH: 1084 cs->exception_index = POWERPC_EXCP_ISEG; 1085 env->error_code = 0; 1086 break; 1087 case MMU_DATA_LOAD: 1088 case MMU_DATA_STORE: 1089 cs->exception_index = POWERPC_EXCP_DSEG; 1090 env->error_code = 0; 1091 env->spr[SPR_DAR] = eaddr; 1092 break; 1093 default: 1094 g_assert_not_reached(); 1095 } 1096 return false; 1097 } 1098 1099 skip_slb_search: 1100 1101 /* 3. Check for segment level no-execute violation */ 1102 if (access_type == MMU_INST_FETCH && (slb->vsid & SLB_VSID_N)) { 1103 if (guest_visible) { 1104 ppc_hash64_set_isi(cs, mmu_idx, slb->vsid, SRR1_NOEXEC_GUARD); 1105 } 1106 return false; 1107 } 1108 1109 /* 4. Locate the PTE in the hash table */ 1110 ptex = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift); 1111 if (ptex == -1) { 1112 if (!guest_visible) { 1113 return false; 1114 } 1115 switch (access_type) { 1116 case MMU_INST_FETCH: 1117 ppc_hash64_set_isi(cs, mmu_idx, slb->vsid, SRR1_NOPTE); 1118 break; 1119 case MMU_DATA_LOAD: 1120 ppc_hash64_set_dsi(cs, mmu_idx, slb->vsid, eaddr, DSISR_NOPTE); 1121 break; 1122 case MMU_DATA_STORE: 1123 ppc_hash64_set_dsi(cs, mmu_idx, slb->vsid, eaddr, 1124 DSISR_NOPTE | DSISR_ISSTORE); 1125 break; 1126 default: 1127 g_assert_not_reached(); 1128 } 1129 return false; 1130 } 1131 qemu_log_mask(CPU_LOG_MMU, 1132 "found PTE at index %08" HWADDR_PRIx "\n", ptex); 1133 1134 /* 5. Check access permissions */ 1135 1136 exec_prot = ppc_hash64_pte_noexec_guard(cpu, pte); 1137 pp_prot = ppc_hash64_pte_prot(mmu_idx, slb, pte); 1138 amr_prot = ppc_hash64_amr_prot(cpu, pte); 1139 prot = exec_prot & pp_prot & amr_prot; 1140 1141 need_prot = check_prot_access_type(PAGE_RWX, access_type); 1142 if (need_prot & ~prot) { 1143 /* Access right violation */ 1144 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); 1145 if (!guest_visible) { 1146 return false; 1147 } 1148 if (access_type == MMU_INST_FETCH) { 1149 int srr1 = 0; 1150 if (PAGE_EXEC & ~exec_prot) { 1151 srr1 |= SRR1_NOEXEC_GUARD; /* Access violates noexec or guard */ 1152 } else if (PAGE_EXEC & ~pp_prot) { 1153 srr1 |= SRR1_PROTFAULT; /* Access violates access authority */ 1154 } 1155 if (PAGE_EXEC & ~amr_prot) { 1156 srr1 |= SRR1_IAMR; /* Access violates virt pg class key prot */ 1157 } 1158 ppc_hash64_set_isi(cs, mmu_idx, slb->vsid, srr1); 1159 } else { 1160 int dsisr = 0; 1161 if (need_prot & ~pp_prot) { 1162 dsisr |= DSISR_PROTFAULT; 1163 } 1164 if (access_type == MMU_DATA_STORE) { 1165 dsisr |= DSISR_ISSTORE; 1166 } 1167 if (need_prot & ~amr_prot) { 1168 dsisr |= DSISR_AMR; 1169 } 1170 ppc_hash64_set_dsi(cs, mmu_idx, slb->vsid, eaddr, dsisr); 1171 } 1172 return false; 1173 } 1174 1175 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); 1176 1177 /* 6. Update PTE referenced and changed bits if necessary */ 1178 1179 if (!(pte.pte1 & HPTE64_R_R)) { 1180 ppc_hash64_set_r(cpu, ptex, pte.pte1); 1181 } 1182 if (!(pte.pte1 & HPTE64_R_C)) { 1183 if (access_type == MMU_DATA_STORE) { 1184 ppc_hash64_set_c(cpu, ptex, pte.pte1); 1185 } else { 1186 /* 1187 * Treat the page as read-only for now, so that a later write 1188 * will pass through this function again to set the C bit 1189 */ 1190 prot &= ~PAGE_WRITE; 1191 } 1192 } 1193 1194 /* 7. Determine the real address from the PTE */ 1195 1196 *raddrp = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr); 1197 *protp = prot; 1198 *psizep = apshift; 1199 return true; 1200 } 1201 1202 void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex, 1203 target_ulong pte0, target_ulong pte1) 1204 { 1205 /* 1206 * XXX: given the fact that there are too many segments to 1207 * invalidate, and we still don't have a tlb_flush_mask(env, n, 1208 * mask) in QEMU, we just invalidate all TLBs 1209 */ 1210 cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH; 1211 } 1212 1213 #ifdef CONFIG_TCG 1214 void helper_store_lpcr(CPUPPCState *env, target_ulong val) 1215 { 1216 PowerPCCPU *cpu = env_archcpu(env); 1217 1218 ppc_store_lpcr(cpu, val); 1219 } 1220 #endif 1221 1222 void ppc_hash64_init(PowerPCCPU *cpu) 1223 { 1224 CPUPPCState *env = &cpu->env; 1225 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 1226 1227 if (!pcc->hash64_opts) { 1228 assert(!mmu_is_64bit(env->mmu_model)); 1229 return; 1230 } 1231 1232 cpu->hash64_opts = g_memdup2(pcc->hash64_opts, sizeof(*cpu->hash64_opts)); 1233 } 1234 1235 void ppc_hash64_finalize(PowerPCCPU *cpu) 1236 { 1237 g_free(cpu->hash64_opts); 1238 } 1239 1240 const PPCHash64Options ppc_hash64_opts_basic = { 1241 .flags = 0, 1242 .slb_size = 64, 1243 .sps = { 1244 { .page_shift = 12, /* 4K */ 1245 .slb_enc = 0, 1246 .enc = { { .page_shift = 12, .pte_enc = 0 } } 1247 }, 1248 { .page_shift = 24, /* 16M */ 1249 .slb_enc = 0x100, 1250 .enc = { { .page_shift = 24, .pte_enc = 0 } } 1251 }, 1252 }, 1253 }; 1254 1255 const PPCHash64Options ppc_hash64_opts_POWER7 = { 1256 .flags = PPC_HASH64_1TSEG | PPC_HASH64_AMR | PPC_HASH64_CI_LARGEPAGE, 1257 .slb_size = 32, 1258 .sps = { 1259 { 1260 .page_shift = 12, /* 4K */ 1261 .slb_enc = 0, 1262 .enc = { { .page_shift = 12, .pte_enc = 0 }, 1263 { .page_shift = 16, .pte_enc = 0x7 }, 1264 { .page_shift = 24, .pte_enc = 0x38 }, }, 1265 }, 1266 { 1267 .page_shift = 16, /* 64K */ 1268 .slb_enc = SLB_VSID_64K, 1269 .enc = { { .page_shift = 16, .pte_enc = 0x1 }, 1270 { .page_shift = 24, .pte_enc = 0x8 }, }, 1271 }, 1272 { 1273 .page_shift = 24, /* 16M */ 1274 .slb_enc = SLB_VSID_16M, 1275 .enc = { { .page_shift = 24, .pte_enc = 0 }, }, 1276 }, 1277 { 1278 .page_shift = 34, /* 16G */ 1279 .slb_enc = SLB_VSID_16G, 1280 .enc = { { .page_shift = 34, .pte_enc = 0x3 }, }, 1281 }, 1282 } 1283 }; 1284 1285 1286