1 /* 2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * Copyright (c) 2013 David Gibson, IBM Corporation 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "exec/exec-all.h" 23 #include "exec/helper-proto.h" 24 #include "qemu/error-report.h" 25 #include "qemu/qemu-print.h" 26 #include "sysemu/hw_accel.h" 27 #include "kvm_ppc.h" 28 #include "mmu-hash64.h" 29 #include "exec/log.h" 30 #include "hw/hw.h" 31 #include "mmu-book3s-v3.h" 32 33 //#define DEBUG_SLB 34 35 #ifdef DEBUG_SLB 36 # define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) 37 #else 38 # define LOG_SLB(...) do { } while (0) 39 #endif 40 41 /* 42 * SLB handling 43 */ 44 45 static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr) 46 { 47 CPUPPCState *env = &cpu->env; 48 uint64_t esid_256M, esid_1T; 49 int n; 50 51 LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr); 52 53 esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V; 54 esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V; 55 56 for (n = 0; n < cpu->hash64_opts->slb_size; n++) { 57 ppc_slb_t *slb = &env->slb[n]; 58 59 LOG_SLB("%s: slot %d %016" PRIx64 " %016" 60 PRIx64 "\n", __func__, n, slb->esid, slb->vsid); 61 /* We check for 1T matches on all MMUs here - if the MMU 62 * doesn't have 1T segment support, we will have prevented 1T 63 * entries from being inserted in the slbmte code. */ 64 if (((slb->esid == esid_256M) && 65 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M)) 66 || ((slb->esid == esid_1T) && 67 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) { 68 return slb; 69 } 70 } 71 72 return NULL; 73 } 74 75 void dump_slb(PowerPCCPU *cpu) 76 { 77 CPUPPCState *env = &cpu->env; 78 int i; 79 uint64_t slbe, slbv; 80 81 cpu_synchronize_state(CPU(cpu)); 82 83 qemu_printf("SLB\tESID\t\t\tVSID\n"); 84 for (i = 0; i < cpu->hash64_opts->slb_size; i++) { 85 slbe = env->slb[i].esid; 86 slbv = env->slb[i].vsid; 87 if (slbe == 0 && slbv == 0) { 88 continue; 89 } 90 qemu_printf("%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n", 91 i, slbe, slbv); 92 } 93 } 94 95 void helper_slbia(CPUPPCState *env) 96 { 97 PowerPCCPU *cpu = ppc_env_get_cpu(env); 98 int n; 99 100 /* XXX: Warning: slbia never invalidates the first segment */ 101 for (n = 1; n < cpu->hash64_opts->slb_size; n++) { 102 ppc_slb_t *slb = &env->slb[n]; 103 104 if (slb->esid & SLB_ESID_V) { 105 slb->esid &= ~SLB_ESID_V; 106 /* XXX: given the fact that segment size is 256 MB or 1TB, 107 * and we still don't have a tlb_flush_mask(env, n, mask) 108 * in QEMU, we just invalidate all TLBs 109 */ 110 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 111 } 112 } 113 } 114 115 static void __helper_slbie(CPUPPCState *env, target_ulong addr, 116 target_ulong global) 117 { 118 PowerPCCPU *cpu = ppc_env_get_cpu(env); 119 ppc_slb_t *slb; 120 121 slb = slb_lookup(cpu, addr); 122 if (!slb) { 123 return; 124 } 125 126 if (slb->esid & SLB_ESID_V) { 127 slb->esid &= ~SLB_ESID_V; 128 129 /* XXX: given the fact that segment size is 256 MB or 1TB, 130 * and we still don't have a tlb_flush_mask(env, n, mask) 131 * in QEMU, we just invalidate all TLBs 132 */ 133 env->tlb_need_flush |= 134 (global == false ? TLB_NEED_LOCAL_FLUSH : TLB_NEED_GLOBAL_FLUSH); 135 } 136 } 137 138 void helper_slbie(CPUPPCState *env, target_ulong addr) 139 { 140 __helper_slbie(env, addr, false); 141 } 142 143 void helper_slbieg(CPUPPCState *env, target_ulong addr) 144 { 145 __helper_slbie(env, addr, true); 146 } 147 148 int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot, 149 target_ulong esid, target_ulong vsid) 150 { 151 CPUPPCState *env = &cpu->env; 152 ppc_slb_t *slb = &env->slb[slot]; 153 const PPCHash64SegmentPageSizes *sps = NULL; 154 int i; 155 156 if (slot >= cpu->hash64_opts->slb_size) { 157 return -1; /* Bad slot number */ 158 } 159 if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) { 160 return -1; /* Reserved bits set */ 161 } 162 if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) { 163 return -1; /* Bad segment size */ 164 } 165 if ((vsid & SLB_VSID_B) && !(ppc_hash64_has(cpu, PPC_HASH64_1TSEG))) { 166 return -1; /* 1T segment on MMU that doesn't support it */ 167 } 168 169 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 170 const PPCHash64SegmentPageSizes *sps1 = &cpu->hash64_opts->sps[i]; 171 172 if (!sps1->page_shift) { 173 break; 174 } 175 176 if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) { 177 sps = sps1; 178 break; 179 } 180 } 181 182 if (!sps) { 183 error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu 184 " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx, 185 slot, esid, vsid); 186 return -1; 187 } 188 189 slb->esid = esid; 190 slb->vsid = vsid; 191 slb->sps = sps; 192 193 LOG_SLB("%s: " TARGET_FMT_lu " " TARGET_FMT_lx " - " TARGET_FMT_lx 194 " => %016" PRIx64 " %016" PRIx64 "\n", __func__, slot, esid, vsid, 195 slb->esid, slb->vsid); 196 197 return 0; 198 } 199 200 static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb, 201 target_ulong *rt) 202 { 203 CPUPPCState *env = &cpu->env; 204 int slot = rb & 0xfff; 205 ppc_slb_t *slb = &env->slb[slot]; 206 207 if (slot >= cpu->hash64_opts->slb_size) { 208 return -1; 209 } 210 211 *rt = slb->esid; 212 return 0; 213 } 214 215 static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb, 216 target_ulong *rt) 217 { 218 CPUPPCState *env = &cpu->env; 219 int slot = rb & 0xfff; 220 ppc_slb_t *slb = &env->slb[slot]; 221 222 if (slot >= cpu->hash64_opts->slb_size) { 223 return -1; 224 } 225 226 *rt = slb->vsid; 227 return 0; 228 } 229 230 static int ppc_find_slb_vsid(PowerPCCPU *cpu, target_ulong rb, 231 target_ulong *rt) 232 { 233 CPUPPCState *env = &cpu->env; 234 ppc_slb_t *slb; 235 236 if (!msr_is_64bit(env, env->msr)) { 237 rb &= 0xffffffff; 238 } 239 slb = slb_lookup(cpu, rb); 240 if (slb == NULL) { 241 *rt = (target_ulong)-1ul; 242 } else { 243 *rt = slb->vsid; 244 } 245 return 0; 246 } 247 248 void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs) 249 { 250 PowerPCCPU *cpu = ppc_env_get_cpu(env); 251 252 if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) { 253 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 254 POWERPC_EXCP_INVAL, GETPC()); 255 } 256 } 257 258 target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb) 259 { 260 PowerPCCPU *cpu = ppc_env_get_cpu(env); 261 target_ulong rt = 0; 262 263 if (ppc_load_slb_esid(cpu, rb, &rt) < 0) { 264 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 265 POWERPC_EXCP_INVAL, GETPC()); 266 } 267 return rt; 268 } 269 270 target_ulong helper_find_slb_vsid(CPUPPCState *env, target_ulong rb) 271 { 272 PowerPCCPU *cpu = ppc_env_get_cpu(env); 273 target_ulong rt = 0; 274 275 if (ppc_find_slb_vsid(cpu, rb, &rt) < 0) { 276 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 277 POWERPC_EXCP_INVAL, GETPC()); 278 } 279 return rt; 280 } 281 282 target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb) 283 { 284 PowerPCCPU *cpu = ppc_env_get_cpu(env); 285 target_ulong rt = 0; 286 287 if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) { 288 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 289 POWERPC_EXCP_INVAL, GETPC()); 290 } 291 return rt; 292 } 293 294 /* Check No-Execute or Guarded Storage */ 295 static inline int ppc_hash64_pte_noexec_guard(PowerPCCPU *cpu, 296 ppc_hash_pte64_t pte) 297 { 298 /* Exec permissions CANNOT take away read or write permissions */ 299 return (pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G) ? 300 PAGE_READ | PAGE_WRITE : PAGE_READ | PAGE_WRITE | PAGE_EXEC; 301 } 302 303 /* Check Basic Storage Protection */ 304 static int ppc_hash64_pte_prot(PowerPCCPU *cpu, 305 ppc_slb_t *slb, ppc_hash_pte64_t pte) 306 { 307 CPUPPCState *env = &cpu->env; 308 unsigned pp, key; 309 /* Some pp bit combinations have undefined behaviour, so default 310 * to no access in those cases */ 311 int prot = 0; 312 313 key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP) 314 : (slb->vsid & SLB_VSID_KS)); 315 pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61); 316 317 if (key == 0) { 318 switch (pp) { 319 case 0x0: 320 case 0x1: 321 case 0x2: 322 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 323 break; 324 325 case 0x3: 326 case 0x6: 327 prot = PAGE_READ | PAGE_EXEC; 328 break; 329 } 330 } else { 331 switch (pp) { 332 case 0x0: 333 case 0x6: 334 break; 335 336 case 0x1: 337 case 0x3: 338 prot = PAGE_READ | PAGE_EXEC; 339 break; 340 341 case 0x2: 342 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 343 break; 344 } 345 } 346 347 return prot; 348 } 349 350 /* Check the instruction access permissions specified in the IAMR */ 351 static int ppc_hash64_iamr_prot(PowerPCCPU *cpu, int key) 352 { 353 CPUPPCState *env = &cpu->env; 354 int iamr_bits = (env->spr[SPR_IAMR] >> 2 * (31 - key)) & 0x3; 355 356 /* 357 * An instruction fetch is permitted if the IAMR bit is 0. 358 * If the bit is set, return PAGE_READ | PAGE_WRITE because this bit 359 * can only take away EXEC permissions not READ or WRITE permissions. 360 * If bit is cleared return PAGE_READ | PAGE_WRITE | PAGE_EXEC since 361 * EXEC permissions are allowed. 362 */ 363 return (iamr_bits & 0x1) ? PAGE_READ | PAGE_WRITE : 364 PAGE_READ | PAGE_WRITE | PAGE_EXEC; 365 } 366 367 static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte) 368 { 369 CPUPPCState *env = &cpu->env; 370 int key, amrbits; 371 int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 372 373 /* Only recent MMUs implement Virtual Page Class Key Protection */ 374 if (!ppc_hash64_has(cpu, PPC_HASH64_AMR)) { 375 return prot; 376 } 377 378 key = HPTE64_R_KEY(pte.pte1); 379 amrbits = (env->spr[SPR_AMR] >> 2*(31 - key)) & 0x3; 380 381 /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */ 382 /* env->spr[SPR_AMR]); */ 383 384 /* 385 * A store is permitted if the AMR bit is 0. Remove write 386 * protection if it is set. 387 */ 388 if (amrbits & 0x2) { 389 prot &= ~PAGE_WRITE; 390 } 391 /* 392 * A load is permitted if the AMR bit is 0. Remove read 393 * protection if it is set. 394 */ 395 if (amrbits & 0x1) { 396 prot &= ~PAGE_READ; 397 } 398 399 switch (env->mmu_model) { 400 /* 401 * MMU version 2.07 and later support IAMR 402 * Check if the IAMR allows the instruction access - it will return 403 * PAGE_EXEC if it doesn't (and thus that bit will be cleared) or 0 404 * if it does (and prot will be unchanged indicating execution support). 405 */ 406 case POWERPC_MMU_2_07: 407 case POWERPC_MMU_3_00: 408 prot &= ppc_hash64_iamr_prot(cpu, key); 409 break; 410 default: 411 break; 412 } 413 414 return prot; 415 } 416 417 const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu, 418 hwaddr ptex, int n) 419 { 420 hwaddr pte_offset = ptex * HASH_PTE_SIZE_64; 421 hwaddr base; 422 hwaddr plen = n * HASH_PTE_SIZE_64; 423 const ppc_hash_pte64_t *hptes; 424 425 if (cpu->vhyp) { 426 PPCVirtualHypervisorClass *vhc = 427 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 428 return vhc->map_hptes(cpu->vhyp, ptex, n); 429 } 430 base = ppc_hash64_hpt_base(cpu); 431 432 if (!base) { 433 return NULL; 434 } 435 436 hptes = address_space_map(CPU(cpu)->as, base + pte_offset, &plen, false, 437 MEMTXATTRS_UNSPECIFIED); 438 if (plen < (n * HASH_PTE_SIZE_64)) { 439 hw_error("%s: Unable to map all requested HPTEs\n", __func__); 440 } 441 return hptes; 442 } 443 444 void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes, 445 hwaddr ptex, int n) 446 { 447 if (cpu->vhyp) { 448 PPCVirtualHypervisorClass *vhc = 449 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 450 vhc->unmap_hptes(cpu->vhyp, hptes, ptex, n); 451 return; 452 } 453 454 address_space_unmap(CPU(cpu)->as, (void *)hptes, n * HASH_PTE_SIZE_64, 455 false, n * HASH_PTE_SIZE_64); 456 } 457 458 static unsigned hpte_page_shift(const PPCHash64SegmentPageSizes *sps, 459 uint64_t pte0, uint64_t pte1) 460 { 461 int i; 462 463 if (!(pte0 & HPTE64_V_LARGE)) { 464 if (sps->page_shift != 12) { 465 /* 4kiB page in a non 4kiB segment */ 466 return 0; 467 } 468 /* Normal 4kiB page */ 469 return 12; 470 } 471 472 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 473 const PPCHash64PageSize *ps = &sps->enc[i]; 474 uint64_t mask; 475 476 if (!ps->page_shift) { 477 break; 478 } 479 480 if (ps->page_shift == 12) { 481 /* L bit is set so this can't be a 4kiB page */ 482 continue; 483 } 484 485 mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN; 486 487 if ((pte1 & mask) == ((uint64_t)ps->pte_enc << HPTE64_R_RPN_SHIFT)) { 488 return ps->page_shift; 489 } 490 } 491 492 return 0; /* Bad page size encoding */ 493 } 494 495 static void ppc64_v3_new_to_old_hpte(target_ulong *pte0, target_ulong *pte1) 496 { 497 /* Insert B into pte0 */ 498 *pte0 = (*pte0 & HPTE64_V_COMMON_BITS) | 499 ((*pte1 & HPTE64_R_3_0_SSIZE_MASK) << 500 (HPTE64_V_SSIZE_SHIFT - HPTE64_R_3_0_SSIZE_SHIFT)); 501 502 /* Remove B from pte1 */ 503 *pte1 = *pte1 & ~HPTE64_R_3_0_SSIZE_MASK; 504 } 505 506 507 static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash, 508 const PPCHash64SegmentPageSizes *sps, 509 target_ulong ptem, 510 ppc_hash_pte64_t *pte, unsigned *pshift) 511 { 512 int i; 513 const ppc_hash_pte64_t *pteg; 514 target_ulong pte0, pte1; 515 target_ulong ptex; 516 517 ptex = (hash & ppc_hash64_hpt_mask(cpu)) * HPTES_PER_GROUP; 518 pteg = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP); 519 if (!pteg) { 520 return -1; 521 } 522 for (i = 0; i < HPTES_PER_GROUP; i++) { 523 pte0 = ppc_hash64_hpte0(cpu, pteg, i); 524 /* 525 * pte0 contains the valid bit and must be read before pte1, 526 * otherwise we might see an old pte1 with a new valid bit and 527 * thus an inconsistent hpte value 528 */ 529 smp_rmb(); 530 pte1 = ppc_hash64_hpte1(cpu, pteg, i); 531 532 /* Convert format if necessary */ 533 if (cpu->env.mmu_model == POWERPC_MMU_3_00 && !cpu->vhyp) { 534 ppc64_v3_new_to_old_hpte(&pte0, &pte1); 535 } 536 537 /* This compares V, B, H (secondary) and the AVPN */ 538 if (HPTE64_V_COMPARE(pte0, ptem)) { 539 *pshift = hpte_page_shift(sps, pte0, pte1); 540 /* 541 * If there is no match, ignore the PTE, it could simply 542 * be for a different segment size encoding and the 543 * architecture specifies we should not match. Linux will 544 * potentially leave behind PTEs for the wrong base page 545 * size when demoting segments. 546 */ 547 if (*pshift == 0) { 548 continue; 549 } 550 /* We don't do anything with pshift yet as qemu TLB only deals 551 * with 4K pages anyway 552 */ 553 pte->pte0 = pte0; 554 pte->pte1 = pte1; 555 ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP); 556 return ptex + i; 557 } 558 } 559 ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP); 560 /* 561 * We didn't find a valid entry. 562 */ 563 return -1; 564 } 565 566 static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu, 567 ppc_slb_t *slb, target_ulong eaddr, 568 ppc_hash_pte64_t *pte, unsigned *pshift) 569 { 570 CPUPPCState *env = &cpu->env; 571 hwaddr hash, ptex; 572 uint64_t vsid, epnmask, epn, ptem; 573 const PPCHash64SegmentPageSizes *sps = slb->sps; 574 575 /* The SLB store path should prevent any bad page size encodings 576 * getting in there, so: */ 577 assert(sps); 578 579 /* If ISL is set in LPCR we need to clamp the page size to 4K */ 580 if (env->spr[SPR_LPCR] & LPCR_ISL) { 581 /* We assume that when using TCG, 4k is first entry of SPS */ 582 sps = &cpu->hash64_opts->sps[0]; 583 assert(sps->page_shift == 12); 584 } 585 586 epnmask = ~((1ULL << sps->page_shift) - 1); 587 588 if (slb->vsid & SLB_VSID_B) { 589 /* 1TB segment */ 590 vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T; 591 epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask; 592 hash = vsid ^ (vsid << 25) ^ (epn >> sps->page_shift); 593 } else { 594 /* 256M segment */ 595 vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT; 596 epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask; 597 hash = vsid ^ (epn >> sps->page_shift); 598 } 599 ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN); 600 ptem |= HPTE64_V_VALID; 601 602 /* Page address translation */ 603 qemu_log_mask(CPU_LOG_MMU, 604 "htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx 605 " hash " TARGET_FMT_plx "\n", 606 ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), hash); 607 608 /* Primary PTEG lookup */ 609 qemu_log_mask(CPU_LOG_MMU, 610 "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx 611 " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx 612 " hash=" TARGET_FMT_plx "\n", 613 ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), 614 vsid, ptem, hash); 615 ptex = ppc_hash64_pteg_search(cpu, hash, sps, ptem, pte, pshift); 616 617 if (ptex == -1) { 618 /* Secondary PTEG lookup */ 619 ptem |= HPTE64_V_SECONDARY; 620 qemu_log_mask(CPU_LOG_MMU, 621 "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx 622 " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx 623 " hash=" TARGET_FMT_plx "\n", ppc_hash64_hpt_base(cpu), 624 ppc_hash64_hpt_mask(cpu), vsid, ptem, ~hash); 625 626 ptex = ppc_hash64_pteg_search(cpu, ~hash, sps, ptem, pte, pshift); 627 } 628 629 return ptex; 630 } 631 632 unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu, 633 uint64_t pte0, uint64_t pte1) 634 { 635 int i; 636 637 if (!(pte0 & HPTE64_V_LARGE)) { 638 return 12; 639 } 640 641 /* 642 * The encodings in env->sps need to be carefully chosen so that 643 * this gives an unambiguous result. 644 */ 645 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 646 const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i]; 647 unsigned shift; 648 649 if (!sps->page_shift) { 650 break; 651 } 652 653 shift = hpte_page_shift(sps, pte0, pte1); 654 if (shift) { 655 return shift; 656 } 657 } 658 659 return 0; 660 } 661 662 static void ppc_hash64_set_isi(CPUState *cs, uint64_t error_code) 663 { 664 CPUPPCState *env = &POWERPC_CPU(cs)->env; 665 bool vpm; 666 667 if (msr_ir) { 668 vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1); 669 } else { 670 switch (env->mmu_model) { 671 case POWERPC_MMU_3_00: 672 /* Field deprecated in ISAv3.00 - interrupts always go to hyperv */ 673 vpm = true; 674 break; 675 default: 676 vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0); 677 break; 678 } 679 } 680 if (vpm && !msr_hv) { 681 cs->exception_index = POWERPC_EXCP_HISI; 682 } else { 683 cs->exception_index = POWERPC_EXCP_ISI; 684 } 685 env->error_code = error_code; 686 } 687 688 static void ppc_hash64_set_dsi(CPUState *cs, uint64_t dar, uint64_t dsisr) 689 { 690 CPUPPCState *env = &POWERPC_CPU(cs)->env; 691 bool vpm; 692 693 if (msr_dr) { 694 vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1); 695 } else { 696 switch (env->mmu_model) { 697 case POWERPC_MMU_3_00: 698 /* Field deprecated in ISAv3.00 - interrupts always go to hyperv */ 699 vpm = true; 700 break; 701 default: 702 vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0); 703 break; 704 } 705 } 706 if (vpm && !msr_hv) { 707 cs->exception_index = POWERPC_EXCP_HDSI; 708 env->spr[SPR_HDAR] = dar; 709 env->spr[SPR_HDSISR] = dsisr; 710 } else { 711 cs->exception_index = POWERPC_EXCP_DSI; 712 env->spr[SPR_DAR] = dar; 713 env->spr[SPR_DSISR] = dsisr; 714 } 715 env->error_code = 0; 716 } 717 718 719 int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, 720 int rwx, int mmu_idx) 721 { 722 CPUState *cs = CPU(cpu); 723 CPUPPCState *env = &cpu->env; 724 ppc_slb_t *slb; 725 unsigned apshift; 726 hwaddr ptex; 727 ppc_hash_pte64_t pte; 728 int exec_prot, pp_prot, amr_prot, prot; 729 uint64_t new_pte1; 730 const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC}; 731 hwaddr raddr; 732 733 assert((rwx == 0) || (rwx == 1) || (rwx == 2)); 734 735 /* Note on LPCR usage: 970 uses HID4, but our special variant 736 * of store_spr copies relevant fields into env->spr[SPR_LPCR]. 737 * Similarily we filter unimplemented bits when storing into 738 * LPCR depending on the MMU version. This code can thus just 739 * use the LPCR "as-is". 740 */ 741 742 /* 1. Handle real mode accesses */ 743 if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) { 744 /* Translation is supposedly "off" */ 745 /* In real mode the top 4 effective address bits are (mostly) ignored */ 746 raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; 747 748 /* In HV mode, add HRMOR if top EA bit is clear */ 749 if (msr_hv || !env->has_hv_mode) { 750 if (!(eaddr >> 63)) { 751 raddr |= env->spr[SPR_HRMOR]; 752 } 753 } else { 754 /* Otherwise, check VPM for RMA vs VRMA */ 755 if (env->spr[SPR_LPCR] & LPCR_VPM0) { 756 slb = &env->vrma_slb; 757 if (slb->sps) { 758 goto skip_slb_search; 759 } 760 /* Not much else to do here */ 761 cs->exception_index = POWERPC_EXCP_MCHECK; 762 env->error_code = 0; 763 return 1; 764 } else if (raddr < env->rmls) { 765 /* RMA. Check bounds in RMLS */ 766 raddr |= env->spr[SPR_RMOR]; 767 } else { 768 /* The access failed, generate the approriate interrupt */ 769 if (rwx == 2) { 770 ppc_hash64_set_isi(cs, SRR1_PROTFAULT); 771 } else { 772 int dsisr = DSISR_PROTFAULT; 773 if (rwx == 1) { 774 dsisr |= DSISR_ISSTORE; 775 } 776 ppc_hash64_set_dsi(cs, eaddr, dsisr); 777 } 778 return 1; 779 } 780 } 781 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, 782 PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, 783 TARGET_PAGE_SIZE); 784 return 0; 785 } 786 787 /* 2. Translation is on, so look up the SLB */ 788 slb = slb_lookup(cpu, eaddr); 789 if (!slb) { 790 /* No entry found, check if in-memory segment tables are in use */ 791 if (ppc64_use_proc_tbl(cpu)) { 792 /* TODO - Unsupported */ 793 error_report("Segment Table Support Unimplemented"); 794 exit(1); 795 } 796 /* Segment still not found, generate the appropriate interrupt */ 797 if (rwx == 2) { 798 cs->exception_index = POWERPC_EXCP_ISEG; 799 env->error_code = 0; 800 } else { 801 cs->exception_index = POWERPC_EXCP_DSEG; 802 env->error_code = 0; 803 env->spr[SPR_DAR] = eaddr; 804 } 805 return 1; 806 } 807 808 skip_slb_search: 809 810 /* 3. Check for segment level no-execute violation */ 811 if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) { 812 ppc_hash64_set_isi(cs, SRR1_NOEXEC_GUARD); 813 return 1; 814 } 815 816 /* 4. Locate the PTE in the hash table */ 817 ptex = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift); 818 if (ptex == -1) { 819 if (rwx == 2) { 820 ppc_hash64_set_isi(cs, SRR1_NOPTE); 821 } else { 822 int dsisr = DSISR_NOPTE; 823 if (rwx == 1) { 824 dsisr |= DSISR_ISSTORE; 825 } 826 ppc_hash64_set_dsi(cs, eaddr, dsisr); 827 } 828 return 1; 829 } 830 qemu_log_mask(CPU_LOG_MMU, 831 "found PTE at index %08" HWADDR_PRIx "\n", ptex); 832 833 /* 5. Check access permissions */ 834 835 exec_prot = ppc_hash64_pte_noexec_guard(cpu, pte); 836 pp_prot = ppc_hash64_pte_prot(cpu, slb, pte); 837 amr_prot = ppc_hash64_amr_prot(cpu, pte); 838 prot = exec_prot & pp_prot & amr_prot; 839 840 if ((need_prot[rwx] & ~prot) != 0) { 841 /* Access right violation */ 842 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); 843 if (rwx == 2) { 844 int srr1 = 0; 845 if (PAGE_EXEC & ~exec_prot) { 846 srr1 |= SRR1_NOEXEC_GUARD; /* Access violates noexec or guard */ 847 } else if (PAGE_EXEC & ~pp_prot) { 848 srr1 |= SRR1_PROTFAULT; /* Access violates access authority */ 849 } 850 if (PAGE_EXEC & ~amr_prot) { 851 srr1 |= SRR1_IAMR; /* Access violates virt pg class key prot */ 852 } 853 ppc_hash64_set_isi(cs, srr1); 854 } else { 855 int dsisr = 0; 856 if (need_prot[rwx] & ~pp_prot) { 857 dsisr |= DSISR_PROTFAULT; 858 } 859 if (rwx == 1) { 860 dsisr |= DSISR_ISSTORE; 861 } 862 if (need_prot[rwx] & ~amr_prot) { 863 dsisr |= DSISR_AMR; 864 } 865 ppc_hash64_set_dsi(cs, eaddr, dsisr); 866 } 867 return 1; 868 } 869 870 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); 871 872 /* 6. Update PTE referenced and changed bits if necessary */ 873 874 new_pte1 = pte.pte1 | HPTE64_R_R; /* set referenced bit */ 875 if (rwx == 1) { 876 new_pte1 |= HPTE64_R_C; /* set changed (dirty) bit */ 877 } else { 878 /* Treat the page as read-only for now, so that a later write 879 * will pass through this function again to set the C bit */ 880 prot &= ~PAGE_WRITE; 881 } 882 883 if (new_pte1 != pte.pte1) { 884 ppc_hash64_store_hpte(cpu, ptex, pte.pte0, new_pte1); 885 } 886 887 /* 7. Determine the real address from the PTE */ 888 889 raddr = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr); 890 891 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, 892 prot, mmu_idx, 1ULL << apshift); 893 894 return 0; 895 } 896 897 hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr) 898 { 899 CPUPPCState *env = &cpu->env; 900 ppc_slb_t *slb; 901 hwaddr ptex, raddr; 902 ppc_hash_pte64_t pte; 903 unsigned apshift; 904 905 /* Handle real mode */ 906 if (msr_dr == 0) { 907 /* In real mode the top 4 effective address bits are ignored */ 908 raddr = addr & 0x0FFFFFFFFFFFFFFFULL; 909 910 /* In HV mode, add HRMOR if top EA bit is clear */ 911 if ((msr_hv || !env->has_hv_mode) && !(addr >> 63)) { 912 return raddr | env->spr[SPR_HRMOR]; 913 } 914 915 /* Otherwise, check VPM for RMA vs VRMA */ 916 if (env->spr[SPR_LPCR] & LPCR_VPM0) { 917 slb = &env->vrma_slb; 918 if (!slb->sps) { 919 return -1; 920 } 921 } else if (raddr < env->rmls) { 922 /* RMA. Check bounds in RMLS */ 923 return raddr | env->spr[SPR_RMOR]; 924 } else { 925 return -1; 926 } 927 } else { 928 slb = slb_lookup(cpu, addr); 929 if (!slb) { 930 return -1; 931 } 932 } 933 934 ptex = ppc_hash64_htab_lookup(cpu, slb, addr, &pte, &apshift); 935 if (ptex == -1) { 936 return -1; 937 } 938 939 return deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, addr) 940 & TARGET_PAGE_MASK; 941 } 942 943 void ppc_hash64_store_hpte(PowerPCCPU *cpu, hwaddr ptex, 944 uint64_t pte0, uint64_t pte1) 945 { 946 hwaddr base; 947 hwaddr offset = ptex * HASH_PTE_SIZE_64; 948 949 if (cpu->vhyp) { 950 PPCVirtualHypervisorClass *vhc = 951 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 952 vhc->store_hpte(cpu->vhyp, ptex, pte0, pte1); 953 return; 954 } 955 base = ppc_hash64_hpt_base(cpu); 956 957 stq_phys(CPU(cpu)->as, base + offset, pte0); 958 stq_phys(CPU(cpu)->as, base + offset + HASH_PTE_SIZE_64 / 2, pte1); 959 } 960 961 void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex, 962 target_ulong pte0, target_ulong pte1) 963 { 964 /* 965 * XXX: given the fact that there are too many segments to 966 * invalidate, and we still don't have a tlb_flush_mask(env, n, 967 * mask) in QEMU, we just invalidate all TLBs 968 */ 969 cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH; 970 } 971 972 static void ppc_hash64_update_rmls(PowerPCCPU *cpu) 973 { 974 CPUPPCState *env = &cpu->env; 975 uint64_t lpcr = env->spr[SPR_LPCR]; 976 977 /* 978 * This is the full 4 bits encoding of POWER8. Previous 979 * CPUs only support a subset of these but the filtering 980 * is done when writing LPCR 981 */ 982 switch ((lpcr & LPCR_RMLS) >> LPCR_RMLS_SHIFT) { 983 case 0x8: /* 32MB */ 984 env->rmls = 0x2000000ull; 985 break; 986 case 0x3: /* 64MB */ 987 env->rmls = 0x4000000ull; 988 break; 989 case 0x7: /* 128MB */ 990 env->rmls = 0x8000000ull; 991 break; 992 case 0x4: /* 256MB */ 993 env->rmls = 0x10000000ull; 994 break; 995 case 0x2: /* 1GB */ 996 env->rmls = 0x40000000ull; 997 break; 998 case 0x1: /* 16GB */ 999 env->rmls = 0x400000000ull; 1000 break; 1001 default: 1002 /* What to do here ??? */ 1003 env->rmls = 0; 1004 } 1005 } 1006 1007 static void ppc_hash64_update_vrma(PowerPCCPU *cpu) 1008 { 1009 CPUPPCState *env = &cpu->env; 1010 const PPCHash64SegmentPageSizes *sps = NULL; 1011 target_ulong esid, vsid, lpcr; 1012 ppc_slb_t *slb = &env->vrma_slb; 1013 uint32_t vrmasd; 1014 int i; 1015 1016 /* First clear it */ 1017 slb->esid = slb->vsid = 0; 1018 slb->sps = NULL; 1019 1020 /* Is VRMA enabled ? */ 1021 lpcr = env->spr[SPR_LPCR]; 1022 if (!(lpcr & LPCR_VPM0)) { 1023 return; 1024 } 1025 1026 /* Make one up. Mostly ignore the ESID which will not be 1027 * needed for translation 1028 */ 1029 vsid = SLB_VSID_VRMA; 1030 vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT; 1031 vsid |= (vrmasd << 4) & (SLB_VSID_L | SLB_VSID_LP); 1032 esid = SLB_ESID_V; 1033 1034 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 1035 const PPCHash64SegmentPageSizes *sps1 = &cpu->hash64_opts->sps[i]; 1036 1037 if (!sps1->page_shift) { 1038 break; 1039 } 1040 1041 if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) { 1042 sps = sps1; 1043 break; 1044 } 1045 } 1046 1047 if (!sps) { 1048 error_report("Bad page size encoding esid 0x"TARGET_FMT_lx 1049 " vsid 0x"TARGET_FMT_lx, esid, vsid); 1050 return; 1051 } 1052 1053 slb->vsid = vsid; 1054 slb->esid = esid; 1055 slb->sps = sps; 1056 } 1057 1058 void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val) 1059 { 1060 CPUPPCState *env = &cpu->env; 1061 uint64_t lpcr = 0; 1062 1063 /* Filter out bits */ 1064 switch (env->mmu_model) { 1065 case POWERPC_MMU_64B: /* 970 */ 1066 if (val & 0x40) { 1067 lpcr |= LPCR_LPES0; 1068 } 1069 if (val & 0x8000000000000000ull) { 1070 lpcr |= LPCR_LPES1; 1071 } 1072 if (val & 0x20) { 1073 lpcr |= (0x4ull << LPCR_RMLS_SHIFT); 1074 } 1075 if (val & 0x4000000000000000ull) { 1076 lpcr |= (0x2ull << LPCR_RMLS_SHIFT); 1077 } 1078 if (val & 0x2000000000000000ull) { 1079 lpcr |= (0x1ull << LPCR_RMLS_SHIFT); 1080 } 1081 env->spr[SPR_RMOR] = ((lpcr >> 41) & 0xffffull) << 26; 1082 1083 /* XXX We could also write LPID from HID4 here 1084 * but since we don't tag any translation on it 1085 * it doesn't actually matter 1086 */ 1087 /* XXX For proper emulation of 970 we also need 1088 * to dig HRMOR out of HID5 1089 */ 1090 break; 1091 case POWERPC_MMU_2_03: /* P5p */ 1092 lpcr = val & (LPCR_RMLS | LPCR_ILE | 1093 LPCR_LPES0 | LPCR_LPES1 | 1094 LPCR_RMI | LPCR_HDICE); 1095 break; 1096 case POWERPC_MMU_2_06: /* P7 */ 1097 lpcr = val & (LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_DPFD | 1098 LPCR_VRMASD | LPCR_RMLS | LPCR_ILE | 1099 LPCR_P7_PECE0 | LPCR_P7_PECE1 | LPCR_P7_PECE2 | 1100 LPCR_MER | LPCR_TC | 1101 LPCR_LPES0 | LPCR_LPES1 | LPCR_HDICE); 1102 break; 1103 case POWERPC_MMU_2_07: /* P8 */ 1104 lpcr = val & (LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_KBV | 1105 LPCR_DPFD | LPCR_VRMASD | LPCR_RMLS | LPCR_ILE | 1106 LPCR_AIL | LPCR_ONL | LPCR_P8_PECE0 | LPCR_P8_PECE1 | 1107 LPCR_P8_PECE2 | LPCR_P8_PECE3 | LPCR_P8_PECE4 | 1108 LPCR_MER | LPCR_TC | LPCR_LPES0 | LPCR_HDICE); 1109 break; 1110 case POWERPC_MMU_3_00: /* P9 */ 1111 lpcr = val & (LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD | 1112 (LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL | 1113 LPCR_UPRT | LPCR_EVIRT | LPCR_ONL | LPCR_HR | LPCR_LD | 1114 (LPCR_PECE_L_MASK & (LPCR_PDEE | LPCR_HDEE | LPCR_EEE | 1115 LPCR_DEE | LPCR_OEE)) | LPCR_MER | LPCR_GTSE | LPCR_TC | 1116 LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE); 1117 /* 1118 * If we have a virtual hypervisor, we need to bring back RMLS. It 1119 * doesn't exist on an actual P9 but that's all we know how to 1120 * configure with softmmu at the moment 1121 */ 1122 if (cpu->vhyp) { 1123 lpcr |= (val & LPCR_RMLS); 1124 } 1125 break; 1126 default: 1127 ; 1128 } 1129 env->spr[SPR_LPCR] = lpcr; 1130 ppc_hash64_update_rmls(cpu); 1131 ppc_hash64_update_vrma(cpu); 1132 } 1133 1134 void helper_store_lpcr(CPUPPCState *env, target_ulong val) 1135 { 1136 PowerPCCPU *cpu = ppc_env_get_cpu(env); 1137 1138 ppc_store_lpcr(cpu, val); 1139 } 1140 1141 void ppc_hash64_init(PowerPCCPU *cpu) 1142 { 1143 CPUPPCState *env = &cpu->env; 1144 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 1145 1146 if (!pcc->hash64_opts) { 1147 assert(!(env->mmu_model & POWERPC_MMU_64)); 1148 return; 1149 } 1150 1151 cpu->hash64_opts = g_memdup(pcc->hash64_opts, sizeof(*cpu->hash64_opts)); 1152 } 1153 1154 void ppc_hash64_finalize(PowerPCCPU *cpu) 1155 { 1156 g_free(cpu->hash64_opts); 1157 } 1158 1159 const PPCHash64Options ppc_hash64_opts_basic = { 1160 .flags = 0, 1161 .slb_size = 64, 1162 .sps = { 1163 { .page_shift = 12, /* 4K */ 1164 .slb_enc = 0, 1165 .enc = { { .page_shift = 12, .pte_enc = 0 } } 1166 }, 1167 { .page_shift = 24, /* 16M */ 1168 .slb_enc = 0x100, 1169 .enc = { { .page_shift = 24, .pte_enc = 0 } } 1170 }, 1171 }, 1172 }; 1173 1174 const PPCHash64Options ppc_hash64_opts_POWER7 = { 1175 .flags = PPC_HASH64_1TSEG | PPC_HASH64_AMR | PPC_HASH64_CI_LARGEPAGE, 1176 .slb_size = 32, 1177 .sps = { 1178 { 1179 .page_shift = 12, /* 4K */ 1180 .slb_enc = 0, 1181 .enc = { { .page_shift = 12, .pte_enc = 0 }, 1182 { .page_shift = 16, .pte_enc = 0x7 }, 1183 { .page_shift = 24, .pte_enc = 0x38 }, }, 1184 }, 1185 { 1186 .page_shift = 16, /* 64K */ 1187 .slb_enc = SLB_VSID_64K, 1188 .enc = { { .page_shift = 16, .pte_enc = 0x1 }, 1189 { .page_shift = 24, .pte_enc = 0x8 }, }, 1190 }, 1191 { 1192 .page_shift = 24, /* 16M */ 1193 .slb_enc = SLB_VSID_16M, 1194 .enc = { { .page_shift = 24, .pte_enc = 0 }, }, 1195 }, 1196 { 1197 .page_shift = 34, /* 16G */ 1198 .slb_enc = SLB_VSID_16G, 1199 .enc = { { .page_shift = 34, .pte_enc = 0x3 }, }, 1200 }, 1201 } 1202 }; 1203 1204 void ppc_hash64_filter_pagesizes(PowerPCCPU *cpu, 1205 bool (*cb)(void *, uint32_t, uint32_t), 1206 void *opaque) 1207 { 1208 PPCHash64Options *opts = cpu->hash64_opts; 1209 int i; 1210 int n = 0; 1211 bool ci_largepage = false; 1212 1213 assert(opts); 1214 1215 n = 0; 1216 for (i = 0; i < ARRAY_SIZE(opts->sps); i++) { 1217 PPCHash64SegmentPageSizes *sps = &opts->sps[i]; 1218 int j; 1219 int m = 0; 1220 1221 assert(n <= i); 1222 1223 if (!sps->page_shift) { 1224 break; 1225 } 1226 1227 for (j = 0; j < ARRAY_SIZE(sps->enc); j++) { 1228 PPCHash64PageSize *ps = &sps->enc[j]; 1229 1230 assert(m <= j); 1231 if (!ps->page_shift) { 1232 break; 1233 } 1234 1235 if (cb(opaque, sps->page_shift, ps->page_shift)) { 1236 if (ps->page_shift >= 16) { 1237 ci_largepage = true; 1238 } 1239 sps->enc[m++] = *ps; 1240 } 1241 } 1242 1243 /* Clear rest of the row */ 1244 for (j = m; j < ARRAY_SIZE(sps->enc); j++) { 1245 memset(&sps->enc[j], 0, sizeof(sps->enc[j])); 1246 } 1247 1248 if (m) { 1249 n++; 1250 } 1251 } 1252 1253 /* Clear the rest of the table */ 1254 for (i = n; i < ARRAY_SIZE(opts->sps); i++) { 1255 memset(&opts->sps[i], 0, sizeof(opts->sps[i])); 1256 } 1257 1258 if (!ci_largepage) { 1259 opts->flags &= ~PPC_HASH64_CI_LARGEPAGE; 1260 } 1261 } 1262