1 /* 2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * Copyright (c) 2013 David Gibson, IBM Corporation 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "exec/page-protection.h" 25 #include "qemu/error-report.h" 26 #include "qemu/qemu-print.h" 27 #include "sysemu/hw_accel.h" 28 #include "kvm_ppc.h" 29 #include "mmu-hash64.h" 30 #include "exec/log.h" 31 #include "hw/hw.h" 32 #include "internal.h" 33 #include "mmu-book3s-v3.h" 34 #include "mmu-books.h" 35 #include "helper_regs.h" 36 37 #ifdef CONFIG_TCG 38 #include "exec/helper-proto.h" 39 #endif 40 41 /* #define DEBUG_SLB */ 42 43 #ifdef DEBUG_SLB 44 # define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) 45 #else 46 # define LOG_SLB(...) do { } while (0) 47 #endif 48 49 /* 50 * SLB handling 51 */ 52 53 static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr) 54 { 55 CPUPPCState *env = &cpu->env; 56 uint64_t esid_256M, esid_1T; 57 int n; 58 59 LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr); 60 61 esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V; 62 esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V; 63 64 for (n = 0; n < cpu->hash64_opts->slb_size; n++) { 65 ppc_slb_t *slb = &env->slb[n]; 66 67 LOG_SLB("%s: slot %d %016" PRIx64 " %016" 68 PRIx64 "\n", __func__, n, slb->esid, slb->vsid); 69 /* 70 * We check for 1T matches on all MMUs here - if the MMU 71 * doesn't have 1T segment support, we will have prevented 1T 72 * entries from being inserted in the slbmte code. 73 */ 74 if (((slb->esid == esid_256M) && 75 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M)) 76 || ((slb->esid == esid_1T) && 77 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) { 78 return slb; 79 } 80 } 81 82 return NULL; 83 } 84 85 void dump_slb(PowerPCCPU *cpu) 86 { 87 CPUPPCState *env = &cpu->env; 88 int i; 89 uint64_t slbe, slbv; 90 91 cpu_synchronize_state(CPU(cpu)); 92 93 qemu_printf("SLB\tESID\t\t\tVSID\n"); 94 for (i = 0; i < cpu->hash64_opts->slb_size; i++) { 95 slbe = env->slb[i].esid; 96 slbv = env->slb[i].vsid; 97 if (slbe == 0 && slbv == 0) { 98 continue; 99 } 100 qemu_printf("%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n", 101 i, slbe, slbv); 102 } 103 } 104 105 #ifdef CONFIG_TCG 106 void helper_SLBIA(CPUPPCState *env, uint32_t ih) 107 { 108 PowerPCCPU *cpu = env_archcpu(env); 109 int starting_entry; 110 int n; 111 112 /* 113 * slbia must always flush all TLB (which is equivalent to ERAT in ppc 114 * architecture). Matching on SLB_ESID_V is not good enough, because slbmte 115 * can overwrite a valid SLB without flushing its lookaside information. 116 * 117 * It would be possible to keep the TLB in synch with the SLB by flushing 118 * when a valid entry is overwritten by slbmte, and therefore slbia would 119 * not have to flush unless it evicts a valid SLB entry. However it is 120 * expected that slbmte is more common than slbia, and slbia is usually 121 * going to evict valid SLB entries, so that tradeoff is unlikely to be a 122 * good one. 123 * 124 * ISA v2.05 introduced IH field with values 0,1,2,6. These all invalidate 125 * the same SLB entries (everything but entry 0), but differ in what 126 * "lookaside information" is invalidated. TCG can ignore this and flush 127 * everything. 128 * 129 * ISA v3.0 introduced additional values 3,4,7, which change what SLBs are 130 * invalidated. 131 */ 132 133 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 134 135 starting_entry = 1; /* default for IH=0,1,2,6 */ 136 137 if (env->mmu_model == POWERPC_MMU_3_00) { 138 switch (ih) { 139 case 0x7: 140 /* invalidate no SLBs, but all lookaside information */ 141 return; 142 143 case 0x3: 144 case 0x4: 145 /* also considers SLB entry 0 */ 146 starting_entry = 0; 147 break; 148 149 case 0x5: 150 /* treat undefined values as ih==0, and warn */ 151 qemu_log_mask(LOG_GUEST_ERROR, 152 "slbia undefined IH field %u.\n", ih); 153 break; 154 155 default: 156 /* 0,1,2,6 */ 157 break; 158 } 159 } 160 161 for (n = starting_entry; n < cpu->hash64_opts->slb_size; n++) { 162 ppc_slb_t *slb = &env->slb[n]; 163 164 if (!(slb->esid & SLB_ESID_V)) { 165 continue; 166 } 167 if (env->mmu_model == POWERPC_MMU_3_00) { 168 if (ih == 0x3 && (slb->vsid & SLB_VSID_C) == 0) { 169 /* preserves entries with a class value of 0 */ 170 continue; 171 } 172 } 173 174 slb->esid &= ~SLB_ESID_V; 175 } 176 } 177 178 #if defined(TARGET_PPC64) 179 void helper_SLBIAG(CPUPPCState *env, target_ulong rs, uint32_t l) 180 { 181 PowerPCCPU *cpu = env_archcpu(env); 182 int n; 183 184 /* 185 * slbiag must always flush all TLB (which is equivalent to ERAT in ppc 186 * architecture). Matching on SLB_ESID_V is not good enough, because slbmte 187 * can overwrite a valid SLB without flushing its lookaside information. 188 * 189 * It would be possible to keep the TLB in synch with the SLB by flushing 190 * when a valid entry is overwritten by slbmte, and therefore slbiag would 191 * not have to flush unless it evicts a valid SLB entry. However it is 192 * expected that slbmte is more common than slbiag, and slbiag is usually 193 * going to evict valid SLB entries, so that tradeoff is unlikely to be a 194 * good one. 195 */ 196 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 197 198 for (n = 0; n < cpu->hash64_opts->slb_size; n++) { 199 ppc_slb_t *slb = &env->slb[n]; 200 slb->esid &= ~SLB_ESID_V; 201 } 202 } 203 #endif 204 205 static void __helper_slbie(CPUPPCState *env, target_ulong addr, 206 target_ulong global) 207 { 208 PowerPCCPU *cpu = env_archcpu(env); 209 ppc_slb_t *slb; 210 211 slb = slb_lookup(cpu, addr); 212 if (!slb) { 213 return; 214 } 215 216 if (slb->esid & SLB_ESID_V) { 217 slb->esid &= ~SLB_ESID_V; 218 219 /* 220 * XXX: given the fact that segment size is 256 MB or 1TB, 221 * and we still don't have a tlb_flush_mask(env, n, mask) 222 * in QEMU, we just invalidate all TLBs 223 */ 224 env->tlb_need_flush |= 225 (global == false ? TLB_NEED_LOCAL_FLUSH : TLB_NEED_GLOBAL_FLUSH); 226 } 227 } 228 229 void helper_SLBIE(CPUPPCState *env, target_ulong addr) 230 { 231 __helper_slbie(env, addr, false); 232 } 233 234 void helper_SLBIEG(CPUPPCState *env, target_ulong addr) 235 { 236 __helper_slbie(env, addr, true); 237 } 238 #endif 239 240 int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot, 241 target_ulong esid, target_ulong vsid) 242 { 243 CPUPPCState *env = &cpu->env; 244 ppc_slb_t *slb = &env->slb[slot]; 245 const PPCHash64SegmentPageSizes *sps = NULL; 246 int i; 247 248 if (slot >= cpu->hash64_opts->slb_size) { 249 return -1; /* Bad slot number */ 250 } 251 if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) { 252 return -1; /* Reserved bits set */ 253 } 254 if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) { 255 return -1; /* Bad segment size */ 256 } 257 if ((vsid & SLB_VSID_B) && !(ppc_hash64_has(cpu, PPC_HASH64_1TSEG))) { 258 return -1; /* 1T segment on MMU that doesn't support it */ 259 } 260 261 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 262 const PPCHash64SegmentPageSizes *sps1 = &cpu->hash64_opts->sps[i]; 263 264 if (!sps1->page_shift) { 265 break; 266 } 267 268 if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) { 269 sps = sps1; 270 break; 271 } 272 } 273 274 if (!sps) { 275 error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu 276 " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx, 277 slot, esid, vsid); 278 return -1; 279 } 280 281 slb->esid = esid; 282 slb->vsid = vsid; 283 slb->sps = sps; 284 285 LOG_SLB("%s: " TARGET_FMT_lu " " TARGET_FMT_lx " - " TARGET_FMT_lx 286 " => %016" PRIx64 " %016" PRIx64 "\n", __func__, slot, esid, vsid, 287 slb->esid, slb->vsid); 288 289 return 0; 290 } 291 292 #ifdef CONFIG_TCG 293 static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb, 294 target_ulong *rt) 295 { 296 CPUPPCState *env = &cpu->env; 297 int slot = rb & 0xfff; 298 ppc_slb_t *slb = &env->slb[slot]; 299 300 if (slot >= cpu->hash64_opts->slb_size) { 301 return -1; 302 } 303 304 *rt = slb->esid; 305 return 0; 306 } 307 308 static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb, 309 target_ulong *rt) 310 { 311 CPUPPCState *env = &cpu->env; 312 int slot = rb & 0xfff; 313 ppc_slb_t *slb = &env->slb[slot]; 314 315 if (slot >= cpu->hash64_opts->slb_size) { 316 return -1; 317 } 318 319 *rt = slb->vsid; 320 return 0; 321 } 322 323 static int ppc_find_slb_vsid(PowerPCCPU *cpu, target_ulong rb, 324 target_ulong *rt) 325 { 326 CPUPPCState *env = &cpu->env; 327 ppc_slb_t *slb; 328 329 if (!msr_is_64bit(env, env->msr)) { 330 rb &= 0xffffffff; 331 } 332 slb = slb_lookup(cpu, rb); 333 if (slb == NULL) { 334 *rt = (target_ulong)-1ul; 335 } else { 336 *rt = slb->vsid; 337 } 338 return 0; 339 } 340 341 void helper_SLBMTE(CPUPPCState *env, target_ulong rb, target_ulong rs) 342 { 343 PowerPCCPU *cpu = env_archcpu(env); 344 345 if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) { 346 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 347 POWERPC_EXCP_INVAL, GETPC()); 348 } 349 } 350 351 target_ulong helper_SLBMFEE(CPUPPCState *env, target_ulong rb) 352 { 353 PowerPCCPU *cpu = env_archcpu(env); 354 target_ulong rt = 0; 355 356 if (ppc_load_slb_esid(cpu, rb, &rt) < 0) { 357 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 358 POWERPC_EXCP_INVAL, GETPC()); 359 } 360 return rt; 361 } 362 363 target_ulong helper_SLBFEE(CPUPPCState *env, target_ulong rb) 364 { 365 PowerPCCPU *cpu = env_archcpu(env); 366 target_ulong rt = 0; 367 368 if (ppc_find_slb_vsid(cpu, rb, &rt) < 0) { 369 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 370 POWERPC_EXCP_INVAL, GETPC()); 371 } 372 return rt; 373 } 374 375 target_ulong helper_SLBMFEV(CPUPPCState *env, target_ulong rb) 376 { 377 PowerPCCPU *cpu = env_archcpu(env); 378 target_ulong rt = 0; 379 380 if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) { 381 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 382 POWERPC_EXCP_INVAL, GETPC()); 383 } 384 return rt; 385 } 386 #endif 387 388 /* Check No-Execute or Guarded Storage */ 389 static inline int ppc_hash64_pte_noexec_guard(PowerPCCPU *cpu, 390 ppc_hash_pte64_t pte) 391 { 392 /* Exec permissions CANNOT take away read or write permissions */ 393 return (pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G) ? 394 PAGE_READ | PAGE_WRITE : PAGE_READ | PAGE_WRITE | PAGE_EXEC; 395 } 396 397 /* Check Basic Storage Protection */ 398 static int ppc_hash64_pte_prot(int mmu_idx, 399 ppc_slb_t *slb, ppc_hash_pte64_t pte) 400 { 401 unsigned pp, key; 402 /* 403 * Some pp bit combinations have undefined behaviour, so default 404 * to no access in those cases 405 */ 406 int prot = 0; 407 408 key = !!(mmuidx_pr(mmu_idx) ? (slb->vsid & SLB_VSID_KP) 409 : (slb->vsid & SLB_VSID_KS)); 410 pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61); 411 412 if (key == 0) { 413 switch (pp) { 414 case 0x0: 415 case 0x1: 416 case 0x2: 417 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 418 break; 419 420 case 0x3: 421 case 0x6: 422 prot = PAGE_READ | PAGE_EXEC; 423 break; 424 } 425 } else { 426 switch (pp) { 427 case 0x0: 428 case 0x6: 429 break; 430 431 case 0x1: 432 case 0x3: 433 prot = PAGE_READ | PAGE_EXEC; 434 break; 435 436 case 0x2: 437 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 438 break; 439 } 440 } 441 442 return prot; 443 } 444 445 /* Check the instruction access permissions specified in the IAMR */ 446 static int ppc_hash64_iamr_prot(PowerPCCPU *cpu, int key) 447 { 448 CPUPPCState *env = &cpu->env; 449 int iamr_bits = (env->spr[SPR_IAMR] >> 2 * (31 - key)) & 0x3; 450 451 /* 452 * An instruction fetch is permitted if the IAMR bit is 0. 453 * If the bit is set, return PAGE_READ | PAGE_WRITE because this bit 454 * can only take away EXEC permissions not READ or WRITE permissions. 455 * If bit is cleared return PAGE_READ | PAGE_WRITE | PAGE_EXEC since 456 * EXEC permissions are allowed. 457 */ 458 return (iamr_bits & 0x1) ? PAGE_READ | PAGE_WRITE : 459 PAGE_READ | PAGE_WRITE | PAGE_EXEC; 460 } 461 462 static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte) 463 { 464 CPUPPCState *env = &cpu->env; 465 int key, amrbits; 466 int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 467 468 /* Only recent MMUs implement Virtual Page Class Key Protection */ 469 if (!ppc_hash64_has(cpu, PPC_HASH64_AMR)) { 470 return prot; 471 } 472 473 key = HPTE64_R_KEY(pte.pte1); 474 amrbits = (env->spr[SPR_AMR] >> 2 * (31 - key)) & 0x3; 475 476 /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */ 477 /* env->spr[SPR_AMR]); */ 478 479 /* 480 * A store is permitted if the AMR bit is 0. Remove write 481 * protection if it is set. 482 */ 483 if (amrbits & 0x2) { 484 prot &= ~PAGE_WRITE; 485 } 486 /* 487 * A load is permitted if the AMR bit is 0. Remove read 488 * protection if it is set. 489 */ 490 if (amrbits & 0x1) { 491 prot &= ~PAGE_READ; 492 } 493 494 switch (env->mmu_model) { 495 /* 496 * MMU version 2.07 and later support IAMR 497 * Check if the IAMR allows the instruction access - it will return 498 * PAGE_EXEC if it doesn't (and thus that bit will be cleared) or 0 499 * if it does (and prot will be unchanged indicating execution support). 500 */ 501 case POWERPC_MMU_2_07: 502 case POWERPC_MMU_3_00: 503 prot &= ppc_hash64_iamr_prot(cpu, key); 504 break; 505 default: 506 break; 507 } 508 509 return prot; 510 } 511 512 static hwaddr ppc_hash64_hpt_base(PowerPCCPU *cpu) 513 { 514 uint64_t base; 515 516 if (cpu->vhyp) { 517 return 0; 518 } 519 if (cpu->env.mmu_model == POWERPC_MMU_3_00) { 520 ppc_v3_pate_t pate; 521 522 if (!ppc64_v3_get_pate(cpu, cpu->env.spr[SPR_LPIDR], &pate)) { 523 return 0; 524 } 525 base = pate.dw0; 526 } else { 527 base = cpu->env.spr[SPR_SDR1]; 528 } 529 return base & SDR_64_HTABORG; 530 } 531 532 static hwaddr ppc_hash64_hpt_mask(PowerPCCPU *cpu) 533 { 534 uint64_t base; 535 536 if (cpu->vhyp) { 537 return cpu->vhyp_class->hpt_mask(cpu->vhyp); 538 } 539 if (cpu->env.mmu_model == POWERPC_MMU_3_00) { 540 ppc_v3_pate_t pate; 541 542 if (!ppc64_v3_get_pate(cpu, cpu->env.spr[SPR_LPIDR], &pate)) { 543 return 0; 544 } 545 base = pate.dw0; 546 } else { 547 base = cpu->env.spr[SPR_SDR1]; 548 } 549 return (1ULL << ((base & SDR_64_HTABSIZE) + 18 - 7)) - 1; 550 } 551 552 const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu, 553 hwaddr ptex, int n) 554 { 555 hwaddr pte_offset = ptex * HASH_PTE_SIZE_64; 556 hwaddr base; 557 hwaddr plen = n * HASH_PTE_SIZE_64; 558 const ppc_hash_pte64_t *hptes; 559 560 if (cpu->vhyp) { 561 return cpu->vhyp_class->map_hptes(cpu->vhyp, ptex, n); 562 } 563 base = ppc_hash64_hpt_base(cpu); 564 565 if (!base) { 566 return NULL; 567 } 568 569 hptes = address_space_map(CPU(cpu)->as, base + pte_offset, &plen, false, 570 MEMTXATTRS_UNSPECIFIED); 571 if (plen < (n * HASH_PTE_SIZE_64)) { 572 hw_error("%s: Unable to map all requested HPTEs\n", __func__); 573 } 574 return hptes; 575 } 576 577 void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes, 578 hwaddr ptex, int n) 579 { 580 if (cpu->vhyp) { 581 cpu->vhyp_class->unmap_hptes(cpu->vhyp, hptes, ptex, n); 582 return; 583 } 584 585 address_space_unmap(CPU(cpu)->as, (void *)hptes, n * HASH_PTE_SIZE_64, 586 false, n * HASH_PTE_SIZE_64); 587 } 588 589 bool ppc_hash64_valid_ptex(PowerPCCPU *cpu, target_ulong ptex) 590 { 591 /* hash value/pteg group index is normalized by HPT mask */ 592 if (((ptex & ~7ULL) / HPTES_PER_GROUP) & ~ppc_hash64_hpt_mask(cpu)) { 593 return false; 594 } 595 return true; 596 } 597 598 static unsigned hpte_page_shift(const PPCHash64SegmentPageSizes *sps, 599 uint64_t pte0, uint64_t pte1) 600 { 601 int i; 602 603 if (!(pte0 & HPTE64_V_LARGE)) { 604 if (sps->page_shift != 12) { 605 /* 4kiB page in a non 4kiB segment */ 606 return 0; 607 } 608 /* Normal 4kiB page */ 609 return 12; 610 } 611 612 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 613 const PPCHash64PageSize *ps = &sps->enc[i]; 614 uint64_t mask; 615 616 if (!ps->page_shift) { 617 break; 618 } 619 620 if (ps->page_shift == 12) { 621 /* L bit is set so this can't be a 4kiB page */ 622 continue; 623 } 624 625 mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN; 626 627 if ((pte1 & mask) == ((uint64_t)ps->pte_enc << HPTE64_R_RPN_SHIFT)) { 628 return ps->page_shift; 629 } 630 } 631 632 return 0; /* Bad page size encoding */ 633 } 634 635 static void ppc64_v3_new_to_old_hpte(target_ulong *pte0, target_ulong *pte1) 636 { 637 /* Insert B into pte0 */ 638 *pte0 = (*pte0 & HPTE64_V_COMMON_BITS) | 639 ((*pte1 & HPTE64_R_3_0_SSIZE_MASK) << 640 (HPTE64_V_SSIZE_SHIFT - HPTE64_R_3_0_SSIZE_SHIFT)); 641 642 /* Remove B from pte1 */ 643 *pte1 = *pte1 & ~HPTE64_R_3_0_SSIZE_MASK; 644 } 645 646 647 static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash, 648 const PPCHash64SegmentPageSizes *sps, 649 target_ulong ptem, 650 ppc_hash_pte64_t *pte, unsigned *pshift) 651 { 652 int i; 653 const ppc_hash_pte64_t *pteg; 654 target_ulong pte0, pte1; 655 target_ulong ptex; 656 657 ptex = (hash & ppc_hash64_hpt_mask(cpu)) * HPTES_PER_GROUP; 658 pteg = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP); 659 if (!pteg) { 660 return -1; 661 } 662 for (i = 0; i < HPTES_PER_GROUP; i++) { 663 pte0 = ppc_hash64_hpte0(cpu, pteg, i); 664 /* 665 * pte0 contains the valid bit and must be read before pte1, 666 * otherwise we might see an old pte1 with a new valid bit and 667 * thus an inconsistent hpte value 668 */ 669 smp_rmb(); 670 pte1 = ppc_hash64_hpte1(cpu, pteg, i); 671 672 /* Convert format if necessary */ 673 if (cpu->env.mmu_model == POWERPC_MMU_3_00 && !cpu->vhyp) { 674 ppc64_v3_new_to_old_hpte(&pte0, &pte1); 675 } 676 677 /* This compares V, B, H (secondary) and the AVPN */ 678 if (HPTE64_V_COMPARE(pte0, ptem)) { 679 *pshift = hpte_page_shift(sps, pte0, pte1); 680 /* 681 * If there is no match, ignore the PTE, it could simply 682 * be for a different segment size encoding and the 683 * architecture specifies we should not match. Linux will 684 * potentially leave behind PTEs for the wrong base page 685 * size when demoting segments. 686 */ 687 if (*pshift == 0) { 688 continue; 689 } 690 /* 691 * We don't do anything with pshift yet as qemu TLB only 692 * deals with 4K pages anyway 693 */ 694 pte->pte0 = pte0; 695 pte->pte1 = pte1; 696 ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP); 697 return ptex + i; 698 } 699 } 700 ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP); 701 /* 702 * We didn't find a valid entry. 703 */ 704 return -1; 705 } 706 707 static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu, 708 ppc_slb_t *slb, target_ulong eaddr, 709 ppc_hash_pte64_t *pte, unsigned *pshift) 710 { 711 CPUPPCState *env = &cpu->env; 712 hwaddr hash, ptex; 713 uint64_t vsid, epnmask, epn, ptem; 714 const PPCHash64SegmentPageSizes *sps = slb->sps; 715 716 /* 717 * The SLB store path should prevent any bad page size encodings 718 * getting in there, so: 719 */ 720 assert(sps); 721 722 /* If ISL is set in LPCR we need to clamp the page size to 4K */ 723 if (env->spr[SPR_LPCR] & LPCR_ISL) { 724 /* We assume that when using TCG, 4k is first entry of SPS */ 725 sps = &cpu->hash64_opts->sps[0]; 726 assert(sps->page_shift == 12); 727 } 728 729 epnmask = ~((1ULL << sps->page_shift) - 1); 730 731 if (slb->vsid & SLB_VSID_B) { 732 /* 1TB segment */ 733 vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T; 734 epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask; 735 hash = vsid ^ (vsid << 25) ^ (epn >> sps->page_shift); 736 } else { 737 /* 256M segment */ 738 vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT; 739 epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask; 740 hash = vsid ^ (epn >> sps->page_shift); 741 } 742 ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN); 743 ptem |= HPTE64_V_VALID; 744 745 /* Page address translation */ 746 qemu_log_mask(CPU_LOG_MMU, 747 "htab_base " HWADDR_FMT_plx " htab_mask " HWADDR_FMT_plx 748 " hash " HWADDR_FMT_plx "\n", 749 ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), hash); 750 751 /* Primary PTEG lookup */ 752 qemu_log_mask(CPU_LOG_MMU, 753 "0 htab=" HWADDR_FMT_plx "/" HWADDR_FMT_plx 754 " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx 755 " hash=" HWADDR_FMT_plx "\n", 756 ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), 757 vsid, ptem, hash); 758 ptex = ppc_hash64_pteg_search(cpu, hash, sps, ptem, pte, pshift); 759 760 if (ptex == -1) { 761 /* Secondary PTEG lookup */ 762 ptem |= HPTE64_V_SECONDARY; 763 qemu_log_mask(CPU_LOG_MMU, 764 "1 htab=" HWADDR_FMT_plx "/" HWADDR_FMT_plx 765 " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx 766 " hash=" HWADDR_FMT_plx "\n", ppc_hash64_hpt_base(cpu), 767 ppc_hash64_hpt_mask(cpu), vsid, ptem, ~hash); 768 769 ptex = ppc_hash64_pteg_search(cpu, ~hash, sps, ptem, pte, pshift); 770 } 771 772 return ptex; 773 } 774 775 unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu, 776 uint64_t pte0, uint64_t pte1) 777 { 778 int i; 779 780 if (!(pte0 & HPTE64_V_LARGE)) { 781 return 12; 782 } 783 784 /* 785 * The encodings in env->sps need to be carefully chosen so that 786 * this gives an unambiguous result. 787 */ 788 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 789 const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i]; 790 unsigned shift; 791 792 if (!sps->page_shift) { 793 break; 794 } 795 796 shift = hpte_page_shift(sps, pte0, pte1); 797 if (shift) { 798 return shift; 799 } 800 } 801 802 return 0; 803 } 804 805 static bool ppc_hash64_use_vrma(CPUPPCState *env) 806 { 807 switch (env->mmu_model) { 808 case POWERPC_MMU_3_00: 809 /* 810 * ISAv3.0 (POWER9) always uses VRMA, the VPM0 field and RMOR 811 * register no longer exist 812 */ 813 return true; 814 815 default: 816 return !!(env->spr[SPR_LPCR] & LPCR_VPM0); 817 } 818 } 819 820 static void ppc_hash64_set_isi(CPUState *cs, int mmu_idx, uint64_t slb_vsid, 821 uint64_t error_code) 822 { 823 CPUPPCState *env = &POWERPC_CPU(cs)->env; 824 bool vpm; 825 826 if (!mmuidx_real(mmu_idx)) { 827 vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1); 828 } else { 829 vpm = ppc_hash64_use_vrma(env); 830 } 831 if (vpm && !mmuidx_hv(mmu_idx)) { 832 cs->exception_index = POWERPC_EXCP_HISI; 833 env->spr[SPR_ASDR] = slb_vsid; 834 } else { 835 cs->exception_index = POWERPC_EXCP_ISI; 836 } 837 env->error_code = error_code; 838 } 839 840 static void ppc_hash64_set_dsi(CPUState *cs, int mmu_idx, uint64_t slb_vsid, 841 uint64_t dar, uint64_t dsisr) 842 { 843 CPUPPCState *env = &POWERPC_CPU(cs)->env; 844 bool vpm; 845 846 if (!mmuidx_real(mmu_idx)) { 847 vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1); 848 } else { 849 vpm = ppc_hash64_use_vrma(env); 850 } 851 if (vpm && !mmuidx_hv(mmu_idx)) { 852 cs->exception_index = POWERPC_EXCP_HDSI; 853 env->spr[SPR_HDAR] = dar; 854 env->spr[SPR_HDSISR] = dsisr; 855 env->spr[SPR_ASDR] = slb_vsid; 856 } else { 857 cs->exception_index = POWERPC_EXCP_DSI; 858 env->spr[SPR_DAR] = dar; 859 env->spr[SPR_DSISR] = dsisr; 860 } 861 env->error_code = 0; 862 } 863 864 865 static void ppc_hash64_set_r(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1) 866 { 867 hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_R; 868 869 if (cpu->vhyp) { 870 cpu->vhyp_class->hpte_set_r(cpu->vhyp, ptex, pte1); 871 return; 872 } 873 base = ppc_hash64_hpt_base(cpu); 874 875 876 /* The HW performs a non-atomic byte update */ 877 stb_phys(CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01); 878 } 879 880 static void ppc_hash64_set_c(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1) 881 { 882 hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_C; 883 884 if (cpu->vhyp) { 885 cpu->vhyp_class->hpte_set_c(cpu->vhyp, ptex, pte1); 886 return; 887 } 888 base = ppc_hash64_hpt_base(cpu); 889 890 /* The HW performs a non-atomic byte update */ 891 stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80); 892 } 893 894 static target_ulong rmls_limit(PowerPCCPU *cpu) 895 { 896 CPUPPCState *env = &cpu->env; 897 /* 898 * In theory the meanings of RMLS values are implementation 899 * dependent. In practice, this seems to have been the set from 900 * POWER4+..POWER8, and RMLS is no longer supported in POWER9. 901 * 902 * Unsupported values mean the OS has shot itself in the 903 * foot. Return a 0-sized RMA in this case, which we expect 904 * to trigger an immediate DSI or ISI 905 */ 906 static const target_ulong rma_sizes[16] = { 907 [0] = 256 * GiB, 908 [1] = 16 * GiB, 909 [2] = 1 * GiB, 910 [3] = 64 * MiB, 911 [4] = 256 * MiB, 912 [7] = 128 * MiB, 913 [8] = 32 * MiB, 914 }; 915 target_ulong rmls = (env->spr[SPR_LPCR] & LPCR_RMLS) >> LPCR_RMLS_SHIFT; 916 917 return rma_sizes[rmls]; 918 } 919 920 /* Return the LLP in SLB_VSID format */ 921 static uint64_t get_vrma_llp(PowerPCCPU *cpu) 922 { 923 CPUPPCState *env = &cpu->env; 924 uint64_t llp; 925 926 if (env->mmu_model == POWERPC_MMU_3_00) { 927 ppc_v3_pate_t pate; 928 uint64_t ps, l, lp; 929 930 /* 931 * ISA v3.0 removes the LPCR[VRMASD] field and puts the VRMA base 932 * page size (L||LP equivalent) in the PS field in the HPT partition 933 * table entry. 934 */ 935 if (!ppc64_v3_get_pate(cpu, cpu->env.spr[SPR_LPIDR], &pate)) { 936 error_report("Bad VRMA with no partition table entry"); 937 return 0; 938 } 939 ps = PATE0_GET_PS(pate.dw0); 940 /* PS has L||LP in 3 consecutive bits, put them into SLB LLP format */ 941 l = (ps >> 2) & 0x1; 942 lp = ps & 0x3; 943 llp = (l << SLB_VSID_L_SHIFT) | (lp << SLB_VSID_LP_SHIFT); 944 945 } else { 946 uint64_t lpcr = env->spr[SPR_LPCR]; 947 target_ulong vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT; 948 949 /* VRMASD LLP matches SLB format, just shift and mask it */ 950 llp = (vrmasd << SLB_VSID_LP_SHIFT) & SLB_VSID_LLP_MASK; 951 } 952 953 return llp; 954 } 955 956 static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb) 957 { 958 uint64_t llp = get_vrma_llp(cpu); 959 target_ulong vsid = SLB_VSID_VRMA | llp; 960 int i; 961 962 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 963 const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i]; 964 965 if (!sps->page_shift) { 966 break; 967 } 968 969 if ((vsid & SLB_VSID_LLP_MASK) == sps->slb_enc) { 970 slb->esid = SLB_ESID_V; 971 slb->vsid = vsid; 972 slb->sps = sps; 973 return 0; 974 } 975 } 976 977 error_report("Bad VRMA page size encoding 0x" TARGET_FMT_lx, llp); 978 979 return -1; 980 } 981 982 bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, 983 hwaddr *raddrp, int *psizep, int *protp, int mmu_idx, 984 bool guest_visible) 985 { 986 CPUState *cs = CPU(cpu); 987 CPUPPCState *env = &cpu->env; 988 ppc_slb_t vrma_slbe; 989 ppc_slb_t *slb; 990 unsigned apshift; 991 hwaddr ptex; 992 ppc_hash_pte64_t pte; 993 int exec_prot, pp_prot, amr_prot, prot; 994 int need_prot; 995 hwaddr raddr; 996 997 /* 998 * Note on LPCR usage: 970 uses HID4, but our special variant of 999 * store_spr copies relevant fields into env->spr[SPR_LPCR]. 1000 * Similarly we filter unimplemented bits when storing into LPCR 1001 * depending on the MMU version. This code can thus just use the 1002 * LPCR "as-is". 1003 */ 1004 1005 /* 1. Handle real mode accesses */ 1006 if (mmuidx_real(mmu_idx)) { 1007 /* 1008 * Translation is supposedly "off", but in real mode the top 4 1009 * effective address bits are (mostly) ignored 1010 */ 1011 raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; 1012 1013 if (cpu->vhyp) { 1014 /* 1015 * In virtual hypervisor mode, there's nothing to do: 1016 * EA == GPA == qemu guest address 1017 */ 1018 } else if (mmuidx_hv(mmu_idx) || !env->has_hv_mode) { 1019 /* In HV mode, add HRMOR if top EA bit is clear */ 1020 if (!(eaddr >> 63)) { 1021 raddr |= env->spr[SPR_HRMOR]; 1022 } 1023 } else if (ppc_hash64_use_vrma(env)) { 1024 /* Emulated VRMA mode */ 1025 slb = &vrma_slbe; 1026 if (build_vrma_slbe(cpu, slb) != 0) { 1027 /* Invalid VRMA setup, machine check */ 1028 if (guest_visible) { 1029 cs->exception_index = POWERPC_EXCP_MCHECK; 1030 env->error_code = 0; 1031 } 1032 return false; 1033 } 1034 1035 goto skip_slb_search; 1036 } else { 1037 target_ulong limit = rmls_limit(cpu); 1038 1039 /* Emulated old-style RMO mode, bounds check against RMLS */ 1040 if (raddr >= limit) { 1041 if (!guest_visible) { 1042 return false; 1043 } 1044 switch (access_type) { 1045 case MMU_INST_FETCH: 1046 ppc_hash64_set_isi(cs, mmu_idx, 0, SRR1_PROTFAULT); 1047 break; 1048 case MMU_DATA_LOAD: 1049 ppc_hash64_set_dsi(cs, mmu_idx, 0, eaddr, DSISR_PROTFAULT); 1050 break; 1051 case MMU_DATA_STORE: 1052 ppc_hash64_set_dsi(cs, mmu_idx, 0, eaddr, 1053 DSISR_PROTFAULT | DSISR_ISSTORE); 1054 break; 1055 default: 1056 g_assert_not_reached(); 1057 } 1058 return false; 1059 } 1060 1061 raddr |= env->spr[SPR_RMOR]; 1062 } 1063 1064 *raddrp = raddr; 1065 *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 1066 *psizep = TARGET_PAGE_BITS; 1067 return true; 1068 } 1069 1070 /* 2. Translation is on, so look up the SLB */ 1071 slb = slb_lookup(cpu, eaddr); 1072 if (!slb) { 1073 /* No entry found, check if in-memory segment tables are in use */ 1074 if (ppc64_use_proc_tbl(cpu)) { 1075 /* TODO - Unsupported */ 1076 error_report("Segment Table Support Unimplemented"); 1077 exit(1); 1078 } 1079 /* Segment still not found, generate the appropriate interrupt */ 1080 if (!guest_visible) { 1081 return false; 1082 } 1083 switch (access_type) { 1084 case MMU_INST_FETCH: 1085 cs->exception_index = POWERPC_EXCP_ISEG; 1086 env->error_code = 0; 1087 break; 1088 case MMU_DATA_LOAD: 1089 case MMU_DATA_STORE: 1090 cs->exception_index = POWERPC_EXCP_DSEG; 1091 env->error_code = 0; 1092 env->spr[SPR_DAR] = eaddr; 1093 break; 1094 default: 1095 g_assert_not_reached(); 1096 } 1097 return false; 1098 } 1099 1100 skip_slb_search: 1101 1102 /* 3. Check for segment level no-execute violation */ 1103 if (access_type == MMU_INST_FETCH && (slb->vsid & SLB_VSID_N)) { 1104 if (guest_visible) { 1105 ppc_hash64_set_isi(cs, mmu_idx, slb->vsid, SRR1_NOEXEC_GUARD); 1106 } 1107 return false; 1108 } 1109 1110 /* 4. Locate the PTE in the hash table */ 1111 ptex = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift); 1112 if (ptex == -1) { 1113 if (!guest_visible) { 1114 return false; 1115 } 1116 switch (access_type) { 1117 case MMU_INST_FETCH: 1118 ppc_hash64_set_isi(cs, mmu_idx, slb->vsid, SRR1_NOPTE); 1119 break; 1120 case MMU_DATA_LOAD: 1121 ppc_hash64_set_dsi(cs, mmu_idx, slb->vsid, eaddr, DSISR_NOPTE); 1122 break; 1123 case MMU_DATA_STORE: 1124 ppc_hash64_set_dsi(cs, mmu_idx, slb->vsid, eaddr, 1125 DSISR_NOPTE | DSISR_ISSTORE); 1126 break; 1127 default: 1128 g_assert_not_reached(); 1129 } 1130 return false; 1131 } 1132 qemu_log_mask(CPU_LOG_MMU, 1133 "found PTE at index %08" HWADDR_PRIx "\n", ptex); 1134 1135 /* 5. Check access permissions */ 1136 1137 exec_prot = ppc_hash64_pte_noexec_guard(cpu, pte); 1138 pp_prot = ppc_hash64_pte_prot(mmu_idx, slb, pte); 1139 amr_prot = ppc_hash64_amr_prot(cpu, pte); 1140 prot = exec_prot & pp_prot & amr_prot; 1141 1142 need_prot = check_prot_access_type(PAGE_RWX, access_type); 1143 if (need_prot & ~prot) { 1144 /* Access right violation */ 1145 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); 1146 if (!guest_visible) { 1147 return false; 1148 } 1149 if (access_type == MMU_INST_FETCH) { 1150 int srr1 = 0; 1151 if (PAGE_EXEC & ~exec_prot) { 1152 srr1 |= SRR1_NOEXEC_GUARD; /* Access violates noexec or guard */ 1153 } else if (PAGE_EXEC & ~pp_prot) { 1154 srr1 |= SRR1_PROTFAULT; /* Access violates access authority */ 1155 } 1156 if (PAGE_EXEC & ~amr_prot) { 1157 srr1 |= SRR1_IAMR; /* Access violates virt pg class key prot */ 1158 } 1159 ppc_hash64_set_isi(cs, mmu_idx, slb->vsid, srr1); 1160 } else { 1161 int dsisr = 0; 1162 if (need_prot & ~pp_prot) { 1163 dsisr |= DSISR_PROTFAULT; 1164 } 1165 if (access_type == MMU_DATA_STORE) { 1166 dsisr |= DSISR_ISSTORE; 1167 } 1168 if (need_prot & ~amr_prot) { 1169 dsisr |= DSISR_AMR; 1170 } 1171 ppc_hash64_set_dsi(cs, mmu_idx, slb->vsid, eaddr, dsisr); 1172 } 1173 return false; 1174 } 1175 1176 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); 1177 1178 /* 6. Update PTE referenced and changed bits if necessary */ 1179 1180 if (!(pte.pte1 & HPTE64_R_R)) { 1181 ppc_hash64_set_r(cpu, ptex, pte.pte1); 1182 } 1183 if (!(pte.pte1 & HPTE64_R_C)) { 1184 if (access_type == MMU_DATA_STORE) { 1185 ppc_hash64_set_c(cpu, ptex, pte.pte1); 1186 } else { 1187 /* 1188 * Treat the page as read-only for now, so that a later write 1189 * will pass through this function again to set the C bit 1190 */ 1191 prot &= ~PAGE_WRITE; 1192 } 1193 } 1194 1195 /* 7. Determine the real address from the PTE */ 1196 1197 *raddrp = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr); 1198 *protp = prot; 1199 *psizep = apshift; 1200 return true; 1201 } 1202 1203 void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex, 1204 target_ulong pte0, target_ulong pte1) 1205 { 1206 /* 1207 * XXX: given the fact that there are too many segments to 1208 * invalidate, and we still don't have a tlb_flush_mask(env, n, 1209 * mask) in QEMU, we just invalidate all TLBs 1210 */ 1211 cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH; 1212 } 1213 1214 #ifdef CONFIG_TCG 1215 void helper_store_lpcr(CPUPPCState *env, target_ulong val) 1216 { 1217 PowerPCCPU *cpu = env_archcpu(env); 1218 1219 ppc_store_lpcr(cpu, val); 1220 } 1221 #endif 1222 1223 void ppc_hash64_init(PowerPCCPU *cpu) 1224 { 1225 CPUPPCState *env = &cpu->env; 1226 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 1227 1228 if (!pcc->hash64_opts) { 1229 assert(!mmu_is_64bit(env->mmu_model)); 1230 return; 1231 } 1232 1233 cpu->hash64_opts = g_memdup2(pcc->hash64_opts, sizeof(*cpu->hash64_opts)); 1234 } 1235 1236 void ppc_hash64_finalize(PowerPCCPU *cpu) 1237 { 1238 g_free(cpu->hash64_opts); 1239 } 1240 1241 const PPCHash64Options ppc_hash64_opts_basic = { 1242 .flags = 0, 1243 .slb_size = 64, 1244 .sps = { 1245 { .page_shift = 12, /* 4K */ 1246 .slb_enc = 0, 1247 .enc = { { .page_shift = 12, .pte_enc = 0 } } 1248 }, 1249 { .page_shift = 24, /* 16M */ 1250 .slb_enc = 0x100, 1251 .enc = { { .page_shift = 24, .pte_enc = 0 } } 1252 }, 1253 }, 1254 }; 1255 1256 const PPCHash64Options ppc_hash64_opts_POWER7 = { 1257 .flags = PPC_HASH64_1TSEG | PPC_HASH64_AMR | PPC_HASH64_CI_LARGEPAGE, 1258 .slb_size = 32, 1259 .sps = { 1260 { 1261 .page_shift = 12, /* 4K */ 1262 .slb_enc = 0, 1263 .enc = { { .page_shift = 12, .pte_enc = 0 }, 1264 { .page_shift = 16, .pte_enc = 0x7 }, 1265 { .page_shift = 24, .pte_enc = 0x38 }, }, 1266 }, 1267 { 1268 .page_shift = 16, /* 64K */ 1269 .slb_enc = SLB_VSID_64K, 1270 .enc = { { .page_shift = 16, .pte_enc = 0x1 }, 1271 { .page_shift = 24, .pte_enc = 0x8 }, }, 1272 }, 1273 { 1274 .page_shift = 24, /* 16M */ 1275 .slb_enc = SLB_VSID_16M, 1276 .enc = { { .page_shift = 24, .pte_enc = 0 }, }, 1277 }, 1278 { 1279 .page_shift = 34, /* 16G */ 1280 .slb_enc = SLB_VSID_16G, 1281 .enc = { { .page_shift = 34, .pte_enc = 0x3 }, }, 1282 }, 1283 } 1284 }; 1285 1286 1287