1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * native hashtable management. 4 * 5 * SMP scalability work: 6 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM 7 */ 8 9 #undef DEBUG_LOW 10 11 #include <linux/spinlock.h> 12 #include <linux/bitops.h> 13 #include <linux/of.h> 14 #include <linux/processor.h> 15 #include <linux/threads.h> 16 #include <linux/smp.h> 17 #include <linux/pgtable.h> 18 19 #include <asm/machdep.h> 20 #include <asm/mmu.h> 21 #include <asm/mmu_context.h> 22 #include <asm/trace.h> 23 #include <asm/tlb.h> 24 #include <asm/cputable.h> 25 #include <asm/udbg.h> 26 #include <asm/kexec.h> 27 #include <asm/ppc-opcode.h> 28 #include <asm/feature-fixups.h> 29 30 #include <misc/cxl-base.h> 31 32 #ifdef DEBUG_LOW 33 #define DBG_LOW(fmt...) udbg_printf(fmt) 34 #else 35 #define DBG_LOW(fmt...) 36 #endif 37 38 #ifdef __BIG_ENDIAN__ 39 #define HPTE_LOCK_BIT 3 40 #else 41 #define HPTE_LOCK_BIT (56+3) 42 #endif 43 44 static DEFINE_RAW_SPINLOCK(native_tlbie_lock); 45 46 #ifdef CONFIG_LOCKDEP 47 static struct lockdep_map hpte_lock_map = 48 STATIC_LOCKDEP_MAP_INIT("hpte_lock", &hpte_lock_map); 49 50 static void acquire_hpte_lock(void) 51 { 52 lock_map_acquire(&hpte_lock_map); 53 } 54 55 static void release_hpte_lock(void) 56 { 57 lock_map_release(&hpte_lock_map); 58 } 59 #else 60 static void acquire_hpte_lock(void) 61 { 62 } 63 64 static void release_hpte_lock(void) 65 { 66 } 67 #endif 68 69 static inline unsigned long ___tlbie(unsigned long vpn, int psize, 70 int apsize, int ssize) 71 { 72 unsigned long va; 73 unsigned int penc; 74 unsigned long sllp; 75 76 /* 77 * We need 14 to 65 bits of va for a tlibe of 4K page 78 * With vpn we ignore the lower VPN_SHIFT bits already. 79 * And top two bits are already ignored because we can 80 * only accomodate 76 bits in a 64 bit vpn with a VPN_SHIFT 81 * of 12. 82 */ 83 va = vpn << VPN_SHIFT; 84 /* 85 * clear top 16 bits of 64bit va, non SLS segment 86 * Older versions of the architecture (2.02 and earler) require the 87 * masking of the top 16 bits. 88 */ 89 if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA)) 90 va &= ~(0xffffULL << 48); 91 92 switch (psize) { 93 case MMU_PAGE_4K: 94 /* clear out bits after (52) [0....52.....63] */ 95 va &= ~((1ul << (64 - 52)) - 1); 96 va |= ssize << 8; 97 sllp = get_sllp_encoding(apsize); 98 va |= sllp << 5; 99 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2) 100 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) 101 : "memory"); 102 break; 103 default: 104 /* We need 14 to 14 + i bits of va */ 105 penc = mmu_psize_defs[psize].penc[apsize]; 106 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1); 107 va |= penc << 12; 108 va |= ssize << 8; 109 /* 110 * AVAL bits: 111 * We don't need all the bits, but rest of the bits 112 * must be ignored by the processor. 113 * vpn cover upto 65 bits of va. (0...65) and we need 114 * 58..64 bits of va. 115 */ 116 va |= (vpn & 0xfe); /* AVAL */ 117 va |= 1; /* L */ 118 asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2) 119 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) 120 : "memory"); 121 break; 122 } 123 return va; 124 } 125 126 static inline void fixup_tlbie_vpn(unsigned long vpn, int psize, 127 int apsize, int ssize) 128 { 129 if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) { 130 /* Radix flush for a hash guest */ 131 132 unsigned long rb,rs,prs,r,ric; 133 134 rb = PPC_BIT(52); /* IS = 2 */ 135 rs = 0; /* lpid = 0 */ 136 prs = 0; /* partition scoped */ 137 r = 1; /* radix format */ 138 ric = 0; /* RIC_FLSUH_TLB */ 139 140 /* 141 * Need the extra ptesync to make sure we don't 142 * re-order the tlbie 143 */ 144 asm volatile("ptesync": : :"memory"); 145 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) 146 : : "r"(rb), "i"(r), "i"(prs), 147 "i"(ric), "r"(rs) : "memory"); 148 } 149 150 151 if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) { 152 /* Need the extra ptesync to ensure we don't reorder tlbie*/ 153 asm volatile("ptesync": : :"memory"); 154 ___tlbie(vpn, psize, apsize, ssize); 155 } 156 } 157 158 static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize) 159 { 160 unsigned long rb; 161 162 rb = ___tlbie(vpn, psize, apsize, ssize); 163 trace_tlbie(0, 0, rb, 0, 0, 0, 0); 164 } 165 166 static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize) 167 { 168 unsigned long va; 169 unsigned int penc; 170 unsigned long sllp; 171 172 /* VPN_SHIFT can be atmost 12 */ 173 va = vpn << VPN_SHIFT; 174 /* 175 * clear top 16 bits of 64 bit va, non SLS segment 176 * Older versions of the architecture (2.02 and earler) require the 177 * masking of the top 16 bits. 178 */ 179 if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA)) 180 va &= ~(0xffffULL << 48); 181 182 switch (psize) { 183 case MMU_PAGE_4K: 184 /* clear out bits after(52) [0....52.....63] */ 185 va &= ~((1ul << (64 - 52)) - 1); 186 va |= ssize << 8; 187 sllp = get_sllp_encoding(apsize); 188 va |= sllp << 5; 189 asm volatile(ASM_FTR_IFSET("tlbiel %0", PPC_TLBIEL_v205(%0, 0), %1) 190 : : "r" (va), "i" (CPU_FTR_ARCH_206) 191 : "memory"); 192 break; 193 default: 194 /* We need 14 to 14 + i bits of va */ 195 penc = mmu_psize_defs[psize].penc[apsize]; 196 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1); 197 va |= penc << 12; 198 va |= ssize << 8; 199 /* 200 * AVAL bits: 201 * We don't need all the bits, but rest of the bits 202 * must be ignored by the processor. 203 * vpn cover upto 65 bits of va. (0...65) and we need 204 * 58..64 bits of va. 205 */ 206 va |= (vpn & 0xfe); 207 va |= 1; /* L */ 208 asm volatile(ASM_FTR_IFSET("tlbiel %0", PPC_TLBIEL_v205(%0, 1), %1) 209 : : "r" (va), "i" (CPU_FTR_ARCH_206) 210 : "memory"); 211 break; 212 } 213 trace_tlbie(0, 1, va, 0, 0, 0, 0); 214 215 } 216 217 static inline void tlbie(unsigned long vpn, int psize, int apsize, 218 int ssize, int local) 219 { 220 unsigned int use_local; 221 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 222 223 use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use(); 224 225 if (use_local) 226 use_local = mmu_psize_defs[psize].tlbiel; 227 if (lock_tlbie && !use_local) 228 raw_spin_lock(&native_tlbie_lock); 229 asm volatile("ptesync": : :"memory"); 230 if (use_local) { 231 __tlbiel(vpn, psize, apsize, ssize); 232 ppc_after_tlbiel_barrier(); 233 } else { 234 __tlbie(vpn, psize, apsize, ssize); 235 fixup_tlbie_vpn(vpn, psize, apsize, ssize); 236 asm volatile("eieio; tlbsync; ptesync": : :"memory"); 237 } 238 if (lock_tlbie && !use_local) 239 raw_spin_unlock(&native_tlbie_lock); 240 } 241 242 static inline void native_lock_hpte(struct hash_pte *hptep) 243 { 244 unsigned long *word = (unsigned long *)&hptep->v; 245 246 acquire_hpte_lock(); 247 while (1) { 248 if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word)) 249 break; 250 spin_begin(); 251 while(test_bit(HPTE_LOCK_BIT, word)) 252 spin_cpu_relax(); 253 spin_end(); 254 } 255 } 256 257 static inline void native_unlock_hpte(struct hash_pte *hptep) 258 { 259 unsigned long *word = (unsigned long *)&hptep->v; 260 261 release_hpte_lock(); 262 clear_bit_unlock(HPTE_LOCK_BIT, word); 263 } 264 265 static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn, 266 unsigned long pa, unsigned long rflags, 267 unsigned long vflags, int psize, int apsize, int ssize) 268 { 269 struct hash_pte *hptep = htab_address + hpte_group; 270 unsigned long hpte_v, hpte_r; 271 unsigned long flags; 272 int i; 273 274 local_irq_save(flags); 275 276 if (!(vflags & HPTE_V_BOLTED)) { 277 DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx," 278 " rflags=%lx, vflags=%lx, psize=%d)\n", 279 hpte_group, vpn, pa, rflags, vflags, psize); 280 } 281 282 for (i = 0; i < HPTES_PER_GROUP; i++) { 283 if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) { 284 /* retry with lock held */ 285 native_lock_hpte(hptep); 286 if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) 287 break; 288 native_unlock_hpte(hptep); 289 } 290 291 hptep++; 292 } 293 294 if (i == HPTES_PER_GROUP) { 295 local_irq_restore(flags); 296 return -1; 297 } 298 299 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID; 300 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags; 301 302 if (!(vflags & HPTE_V_BOLTED)) { 303 DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n", 304 i, hpte_v, hpte_r); 305 } 306 307 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 308 hpte_r = hpte_old_to_new_r(hpte_v, hpte_r); 309 hpte_v = hpte_old_to_new_v(hpte_v); 310 } 311 312 hptep->r = cpu_to_be64(hpte_r); 313 /* Guarantee the second dword is visible before the valid bit */ 314 eieio(); 315 /* 316 * Now set the first dword including the valid bit 317 * NOTE: this also unlocks the hpte 318 */ 319 release_hpte_lock(); 320 hptep->v = cpu_to_be64(hpte_v); 321 322 __asm__ __volatile__ ("ptesync" : : : "memory"); 323 324 local_irq_restore(flags); 325 326 return i | (!!(vflags & HPTE_V_SECONDARY) << 3); 327 } 328 329 static long native_hpte_remove(unsigned long hpte_group) 330 { 331 struct hash_pte *hptep; 332 int i; 333 int slot_offset; 334 unsigned long hpte_v; 335 336 DBG_LOW(" remove(group=%lx)\n", hpte_group); 337 338 /* pick a random entry to start at */ 339 slot_offset = mftb() & 0x7; 340 341 for (i = 0; i < HPTES_PER_GROUP; i++) { 342 hptep = htab_address + hpte_group + slot_offset; 343 hpte_v = be64_to_cpu(hptep->v); 344 345 if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) { 346 /* retry with lock held */ 347 native_lock_hpte(hptep); 348 hpte_v = be64_to_cpu(hptep->v); 349 if ((hpte_v & HPTE_V_VALID) 350 && !(hpte_v & HPTE_V_BOLTED)) 351 break; 352 native_unlock_hpte(hptep); 353 } 354 355 slot_offset++; 356 slot_offset &= 0x7; 357 } 358 359 if (i == HPTES_PER_GROUP) 360 return -1; 361 362 /* Invalidate the hpte. NOTE: this also unlocks it */ 363 release_hpte_lock(); 364 hptep->v = 0; 365 366 return i; 367 } 368 369 static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, 370 unsigned long vpn, int bpsize, 371 int apsize, int ssize, unsigned long flags) 372 { 373 struct hash_pte *hptep = htab_address + slot; 374 unsigned long hpte_v, want_v; 375 int ret = 0, local = 0; 376 unsigned long irqflags; 377 378 local_irq_save(irqflags); 379 380 want_v = hpte_encode_avpn(vpn, bpsize, ssize); 381 382 DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)", 383 vpn, want_v & HPTE_V_AVPN, slot, newpp); 384 385 hpte_v = hpte_get_old_v(hptep); 386 /* 387 * We need to invalidate the TLB always because hpte_remove doesn't do 388 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less 389 * random entry from it. When we do that we don't invalidate the TLB 390 * (hpte_remove) because we assume the old translation is still 391 * technically "valid". 392 */ 393 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) { 394 DBG_LOW(" -> miss\n"); 395 ret = -1; 396 } else { 397 native_lock_hpte(hptep); 398 /* recheck with locks held */ 399 hpte_v = hpte_get_old_v(hptep); 400 if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) || 401 !(hpte_v & HPTE_V_VALID))) { 402 ret = -1; 403 } else { 404 DBG_LOW(" -> hit\n"); 405 /* Update the HPTE */ 406 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & 407 ~(HPTE_R_PPP | HPTE_R_N)) | 408 (newpp & (HPTE_R_PPP | HPTE_R_N | 409 HPTE_R_C))); 410 } 411 native_unlock_hpte(hptep); 412 } 413 414 if (flags & HPTE_LOCAL_UPDATE) 415 local = 1; 416 /* 417 * Ensure it is out of the tlb too if it is not a nohpte fault 418 */ 419 if (!(flags & HPTE_NOHPTE_UPDATE)) 420 tlbie(vpn, bpsize, apsize, ssize, local); 421 422 local_irq_restore(irqflags); 423 424 return ret; 425 } 426 427 static long __native_hpte_find(unsigned long want_v, unsigned long slot) 428 { 429 struct hash_pte *hptep; 430 unsigned long hpte_v; 431 unsigned long i; 432 433 for (i = 0; i < HPTES_PER_GROUP; i++) { 434 435 hptep = htab_address + slot; 436 hpte_v = hpte_get_old_v(hptep); 437 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) 438 /* HPTE matches */ 439 return slot; 440 ++slot; 441 } 442 443 return -1; 444 } 445 446 static long native_hpte_find(unsigned long vpn, int psize, int ssize) 447 { 448 unsigned long hpte_group; 449 unsigned long want_v; 450 unsigned long hash; 451 long slot; 452 453 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize); 454 want_v = hpte_encode_avpn(vpn, psize, ssize); 455 456 /* 457 * We try to keep bolted entries always in primary hash 458 * But in some case we can find them in secondary too. 459 */ 460 hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; 461 slot = __native_hpte_find(want_v, hpte_group); 462 if (slot < 0) { 463 /* Try in secondary */ 464 hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP; 465 slot = __native_hpte_find(want_v, hpte_group); 466 if (slot < 0) 467 return -1; 468 } 469 470 return slot; 471 } 472 473 /* 474 * Update the page protection bits. Intended to be used to create 475 * guard pages for kernel data structures on pages which are bolted 476 * in the HPT. Assumes pages being operated on will not be stolen. 477 * 478 * No need to lock here because we should be the only user. 479 */ 480 static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, 481 int psize, int ssize) 482 { 483 unsigned long vpn; 484 unsigned long vsid; 485 long slot; 486 struct hash_pte *hptep; 487 unsigned long flags; 488 489 local_irq_save(flags); 490 491 vsid = get_kernel_vsid(ea, ssize); 492 vpn = hpt_vpn(ea, vsid, ssize); 493 494 slot = native_hpte_find(vpn, psize, ssize); 495 if (slot == -1) 496 panic("could not find page to bolt\n"); 497 hptep = htab_address + slot; 498 499 /* Update the HPTE */ 500 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & 501 ~(HPTE_R_PPP | HPTE_R_N)) | 502 (newpp & (HPTE_R_PPP | HPTE_R_N))); 503 /* 504 * Ensure it is out of the tlb too. Bolted entries base and 505 * actual page size will be same. 506 */ 507 tlbie(vpn, psize, psize, ssize, 0); 508 509 local_irq_restore(flags); 510 } 511 512 /* 513 * Remove a bolted kernel entry. Memory hotplug uses this. 514 * 515 * No need to lock here because we should be the only user. 516 */ 517 static int native_hpte_removebolted(unsigned long ea, int psize, int ssize) 518 { 519 unsigned long vpn; 520 unsigned long vsid; 521 long slot; 522 struct hash_pte *hptep; 523 unsigned long flags; 524 525 local_irq_save(flags); 526 527 vsid = get_kernel_vsid(ea, ssize); 528 vpn = hpt_vpn(ea, vsid, ssize); 529 530 slot = native_hpte_find(vpn, psize, ssize); 531 if (slot == -1) 532 return -ENOENT; 533 534 hptep = htab_address + slot; 535 536 VM_WARN_ON(!(be64_to_cpu(hptep->v) & HPTE_V_BOLTED)); 537 538 /* Invalidate the hpte */ 539 hptep->v = 0; 540 541 /* Invalidate the TLB */ 542 tlbie(vpn, psize, psize, ssize, 0); 543 544 local_irq_restore(flags); 545 546 return 0; 547 } 548 549 550 static void native_hpte_invalidate(unsigned long slot, unsigned long vpn, 551 int bpsize, int apsize, int ssize, int local) 552 { 553 struct hash_pte *hptep = htab_address + slot; 554 unsigned long hpte_v; 555 unsigned long want_v; 556 unsigned long flags; 557 558 local_irq_save(flags); 559 560 DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot); 561 562 want_v = hpte_encode_avpn(vpn, bpsize, ssize); 563 hpte_v = hpte_get_old_v(hptep); 564 565 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) { 566 native_lock_hpte(hptep); 567 /* recheck with locks held */ 568 hpte_v = hpte_get_old_v(hptep); 569 570 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) { 571 /* Invalidate the hpte. NOTE: this also unlocks it */ 572 release_hpte_lock(); 573 hptep->v = 0; 574 } else 575 native_unlock_hpte(hptep); 576 } 577 /* 578 * We need to invalidate the TLB always because hpte_remove doesn't do 579 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less 580 * random entry from it. When we do that we don't invalidate the TLB 581 * (hpte_remove) because we assume the old translation is still 582 * technically "valid". 583 */ 584 tlbie(vpn, bpsize, apsize, ssize, local); 585 586 local_irq_restore(flags); 587 } 588 589 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 590 static void native_hugepage_invalidate(unsigned long vsid, 591 unsigned long addr, 592 unsigned char *hpte_slot_array, 593 int psize, int ssize, int local) 594 { 595 int i; 596 struct hash_pte *hptep; 597 int actual_psize = MMU_PAGE_16M; 598 unsigned int max_hpte_count, valid; 599 unsigned long flags, s_addr = addr; 600 unsigned long hpte_v, want_v, shift; 601 unsigned long hidx, vpn = 0, hash, slot; 602 603 shift = mmu_psize_defs[psize].shift; 604 max_hpte_count = 1U << (PMD_SHIFT - shift); 605 606 local_irq_save(flags); 607 for (i = 0; i < max_hpte_count; i++) { 608 valid = hpte_valid(hpte_slot_array, i); 609 if (!valid) 610 continue; 611 hidx = hpte_hash_index(hpte_slot_array, i); 612 613 /* get the vpn */ 614 addr = s_addr + (i * (1ul << shift)); 615 vpn = hpt_vpn(addr, vsid, ssize); 616 hash = hpt_hash(vpn, shift, ssize); 617 if (hidx & _PTEIDX_SECONDARY) 618 hash = ~hash; 619 620 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 621 slot += hidx & _PTEIDX_GROUP_IX; 622 623 hptep = htab_address + slot; 624 want_v = hpte_encode_avpn(vpn, psize, ssize); 625 hpte_v = hpte_get_old_v(hptep); 626 627 /* Even if we miss, we need to invalidate the TLB */ 628 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) { 629 /* recheck with locks held */ 630 native_lock_hpte(hptep); 631 hpte_v = hpte_get_old_v(hptep); 632 633 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) { 634 /* Invalidate the hpte. NOTE: this also unlocks it */ 635 release_hpte_lock(); 636 hptep->v = 0; 637 } else 638 native_unlock_hpte(hptep); 639 } 640 /* 641 * We need to do tlb invalidate for all the address, tlbie 642 * instruction compares entry_VA in tlb with the VA specified 643 * here 644 */ 645 tlbie(vpn, psize, actual_psize, ssize, local); 646 } 647 local_irq_restore(flags); 648 } 649 #else 650 static void native_hugepage_invalidate(unsigned long vsid, 651 unsigned long addr, 652 unsigned char *hpte_slot_array, 653 int psize, int ssize, int local) 654 { 655 WARN(1, "%s called without THP support\n", __func__); 656 } 657 #endif 658 659 static void hpte_decode(struct hash_pte *hpte, unsigned long slot, 660 int *psize, int *apsize, int *ssize, unsigned long *vpn) 661 { 662 unsigned long avpn, pteg, vpi; 663 unsigned long hpte_v = be64_to_cpu(hpte->v); 664 unsigned long hpte_r = be64_to_cpu(hpte->r); 665 unsigned long vsid, seg_off; 666 int size, a_size, shift; 667 /* Look at the 8 bit LP value */ 668 unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1); 669 670 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 671 hpte_v = hpte_new_to_old_v(hpte_v, hpte_r); 672 hpte_r = hpte_new_to_old_r(hpte_r); 673 } 674 if (!(hpte_v & HPTE_V_LARGE)) { 675 size = MMU_PAGE_4K; 676 a_size = MMU_PAGE_4K; 677 } else { 678 size = hpte_page_sizes[lp] & 0xf; 679 a_size = hpte_page_sizes[lp] >> 4; 680 } 681 /* This works for all page sizes, and for 256M and 1T segments */ 682 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT; 683 shift = mmu_psize_defs[size].shift; 684 685 avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm); 686 pteg = slot / HPTES_PER_GROUP; 687 if (hpte_v & HPTE_V_SECONDARY) 688 pteg = ~pteg; 689 690 switch (*ssize) { 691 case MMU_SEGSIZE_256M: 692 /* We only have 28 - 23 bits of seg_off in avpn */ 693 seg_off = (avpn & 0x1f) << 23; 694 vsid = avpn >> 5; 695 /* We can find more bits from the pteg value */ 696 if (shift < 23) { 697 vpi = (vsid ^ pteg) & htab_hash_mask; 698 seg_off |= vpi << shift; 699 } 700 *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT; 701 break; 702 case MMU_SEGSIZE_1T: 703 /* We only have 40 - 23 bits of seg_off in avpn */ 704 seg_off = (avpn & 0x1ffff) << 23; 705 vsid = avpn >> 17; 706 if (shift < 23) { 707 vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask; 708 seg_off |= vpi << shift; 709 } 710 *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT; 711 break; 712 default: 713 *vpn = size = 0; 714 } 715 *psize = size; 716 *apsize = a_size; 717 } 718 719 /* 720 * clear all mappings on kexec. All cpus are in real mode (or they will 721 * be when they isi), and we are the only one left. We rely on our kernel 722 * mapping being 0xC0's and the hardware ignoring those two real bits. 723 * 724 * This must be called with interrupts disabled. 725 * 726 * Taking the native_tlbie_lock is unsafe here due to the possibility of 727 * lockdep being on. On pre POWER5 hardware, not taking the lock could 728 * cause deadlock. POWER5 and newer not taking the lock is fine. This only 729 * gets called during boot before secondary CPUs have come up and during 730 * crashdump and all bets are off anyway. 731 * 732 * TODO: add batching support when enabled. remember, no dynamic memory here, 733 * although there is the control page available... 734 */ 735 static notrace void native_hpte_clear(void) 736 { 737 unsigned long vpn = 0; 738 unsigned long slot, slots; 739 struct hash_pte *hptep = htab_address; 740 unsigned long hpte_v; 741 unsigned long pteg_count; 742 int psize, apsize, ssize; 743 744 pteg_count = htab_hash_mask + 1; 745 746 slots = pteg_count * HPTES_PER_GROUP; 747 748 for (slot = 0; slot < slots; slot++, hptep++) { 749 /* 750 * we could lock the pte here, but we are the only cpu 751 * running, right? and for crash dump, we probably 752 * don't want to wait for a maybe bad cpu. 753 */ 754 hpte_v = be64_to_cpu(hptep->v); 755 756 /* 757 * Call __tlbie() here rather than tlbie() since we can't take the 758 * native_tlbie_lock. 759 */ 760 if (hpte_v & HPTE_V_VALID) { 761 hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn); 762 hptep->v = 0; 763 ___tlbie(vpn, psize, apsize, ssize); 764 } 765 } 766 767 asm volatile("eieio; tlbsync; ptesync":::"memory"); 768 } 769 770 /* 771 * Batched hash table flush, we batch the tlbie's to avoid taking/releasing 772 * the lock all the time 773 */ 774 static void native_flush_hash_range(unsigned long number, int local) 775 { 776 unsigned long vpn = 0; 777 unsigned long hash, index, hidx, shift, slot; 778 struct hash_pte *hptep; 779 unsigned long hpte_v; 780 unsigned long want_v; 781 unsigned long flags; 782 real_pte_t pte; 783 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); 784 unsigned long psize = batch->psize; 785 int ssize = batch->ssize; 786 int i; 787 unsigned int use_local; 788 789 use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && 790 mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use(); 791 792 local_irq_save(flags); 793 794 for (i = 0; i < number; i++) { 795 vpn = batch->vpn[i]; 796 pte = batch->pte[i]; 797 798 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { 799 hash = hpt_hash(vpn, shift, ssize); 800 hidx = __rpte_to_hidx(pte, index); 801 if (hidx & _PTEIDX_SECONDARY) 802 hash = ~hash; 803 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 804 slot += hidx & _PTEIDX_GROUP_IX; 805 hptep = htab_address + slot; 806 want_v = hpte_encode_avpn(vpn, psize, ssize); 807 hpte_v = hpte_get_old_v(hptep); 808 809 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) 810 continue; 811 /* lock and try again */ 812 native_lock_hpte(hptep); 813 hpte_v = hpte_get_old_v(hptep); 814 815 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) 816 native_unlock_hpte(hptep); 817 else { 818 release_hpte_lock(); 819 hptep->v = 0; 820 } 821 822 } pte_iterate_hashed_end(); 823 } 824 825 if (use_local) { 826 asm volatile("ptesync":::"memory"); 827 for (i = 0; i < number; i++) { 828 vpn = batch->vpn[i]; 829 pte = batch->pte[i]; 830 831 pte_iterate_hashed_subpages(pte, psize, 832 vpn, index, shift) { 833 __tlbiel(vpn, psize, psize, ssize); 834 } pte_iterate_hashed_end(); 835 } 836 ppc_after_tlbiel_barrier(); 837 } else { 838 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 839 840 if (lock_tlbie) 841 raw_spin_lock(&native_tlbie_lock); 842 843 asm volatile("ptesync":::"memory"); 844 for (i = 0; i < number; i++) { 845 vpn = batch->vpn[i]; 846 pte = batch->pte[i]; 847 848 pte_iterate_hashed_subpages(pte, psize, 849 vpn, index, shift) { 850 __tlbie(vpn, psize, psize, ssize); 851 } pte_iterate_hashed_end(); 852 } 853 /* 854 * Just do one more with the last used values. 855 */ 856 fixup_tlbie_vpn(vpn, psize, psize, ssize); 857 asm volatile("eieio; tlbsync; ptesync":::"memory"); 858 859 if (lock_tlbie) 860 raw_spin_unlock(&native_tlbie_lock); 861 } 862 863 local_irq_restore(flags); 864 } 865 866 void __init hpte_init_native(void) 867 { 868 mmu_hash_ops.hpte_invalidate = native_hpte_invalidate; 869 mmu_hash_ops.hpte_updatepp = native_hpte_updatepp; 870 mmu_hash_ops.hpte_updateboltedpp = native_hpte_updateboltedpp; 871 mmu_hash_ops.hpte_removebolted = native_hpte_removebolted; 872 mmu_hash_ops.hpte_insert = native_hpte_insert; 873 mmu_hash_ops.hpte_remove = native_hpte_remove; 874 mmu_hash_ops.hpte_clear_all = native_hpte_clear; 875 mmu_hash_ops.flush_hash_range = native_flush_hash_range; 876 mmu_hash_ops.hugepage_invalidate = native_hugepage_invalidate; 877 } 878