1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * PowerPC64 port by Mike Corrigan and Dave Engebretsen 4 * {mikejc|engebret}@us.ibm.com 5 * 6 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com> 7 * 8 * SMP scalability work: 9 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM 10 * 11 * Module name: htab.c 12 * 13 * Description: 14 * PowerPC Hashed Page Table functions 15 */ 16 17 #undef DEBUG 18 #undef DEBUG_LOW 19 20 #define pr_fmt(fmt) "hash-mmu: " fmt 21 #include <linux/spinlock.h> 22 #include <linux/errno.h> 23 #include <linux/sched/mm.h> 24 #include <linux/proc_fs.h> 25 #include <linux/stat.h> 26 #include <linux/sysctl.h> 27 #include <linux/export.h> 28 #include <linux/ctype.h> 29 #include <linux/cache.h> 30 #include <linux/init.h> 31 #include <linux/signal.h> 32 #include <linux/memblock.h> 33 #include <linux/context_tracking.h> 34 #include <linux/libfdt.h> 35 #include <linux/pkeys.h> 36 #include <linux/hugetlb.h> 37 #include <linux/cpu.h> 38 39 #include <asm/debugfs.h> 40 #include <asm/processor.h> 41 #include <asm/pgtable.h> 42 #include <asm/mmu.h> 43 #include <asm/mmu_context.h> 44 #include <asm/page.h> 45 #include <asm/types.h> 46 #include <linux/uaccess.h> 47 #include <asm/machdep.h> 48 #include <asm/prom.h> 49 #include <asm/io.h> 50 #include <asm/eeh.h> 51 #include <asm/tlb.h> 52 #include <asm/cacheflush.h> 53 #include <asm/cputable.h> 54 #include <asm/sections.h> 55 #include <asm/copro.h> 56 #include <asm/udbg.h> 57 #include <asm/code-patching.h> 58 #include <asm/fadump.h> 59 #include <asm/firmware.h> 60 #include <asm/tm.h> 61 #include <asm/trace.h> 62 #include <asm/ps3.h> 63 #include <asm/pte-walk.h> 64 #include <asm/asm-prototypes.h> 65 #include <asm/ultravisor.h> 66 67 #include <mm/mmu_decl.h> 68 69 #ifdef DEBUG 70 #define DBG(fmt...) udbg_printf(fmt) 71 #else 72 #define DBG(fmt...) 73 #endif 74 75 #ifdef DEBUG_LOW 76 #define DBG_LOW(fmt...) udbg_printf(fmt) 77 #else 78 #define DBG_LOW(fmt...) 79 #endif 80 81 #define KB (1024) 82 #define MB (1024*KB) 83 #define GB (1024L*MB) 84 85 /* 86 * Note: pte --> Linux PTE 87 * HPTE --> PowerPC Hashed Page Table Entry 88 * 89 * Execution context: 90 * htab_initialize is called with the MMU off (of course), but 91 * the kernel has been copied down to zero so it can directly 92 * reference global data. At this point it is very difficult 93 * to print debug info. 94 * 95 */ 96 97 static unsigned long _SDR1; 98 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; 99 EXPORT_SYMBOL_GPL(mmu_psize_defs); 100 101 u8 hpte_page_sizes[1 << LP_BITS]; 102 EXPORT_SYMBOL_GPL(hpte_page_sizes); 103 104 struct hash_pte *htab_address; 105 unsigned long htab_size_bytes; 106 unsigned long htab_hash_mask; 107 EXPORT_SYMBOL_GPL(htab_hash_mask); 108 int mmu_linear_psize = MMU_PAGE_4K; 109 EXPORT_SYMBOL_GPL(mmu_linear_psize); 110 int mmu_virtual_psize = MMU_PAGE_4K; 111 int mmu_vmalloc_psize = MMU_PAGE_4K; 112 #ifdef CONFIG_SPARSEMEM_VMEMMAP 113 int mmu_vmemmap_psize = MMU_PAGE_4K; 114 #endif 115 int mmu_io_psize = MMU_PAGE_4K; 116 int mmu_kernel_ssize = MMU_SEGSIZE_256M; 117 EXPORT_SYMBOL_GPL(mmu_kernel_ssize); 118 int mmu_highuser_ssize = MMU_SEGSIZE_256M; 119 u16 mmu_slb_size = 64; 120 EXPORT_SYMBOL_GPL(mmu_slb_size); 121 #ifdef CONFIG_PPC_64K_PAGES 122 int mmu_ci_restrictions; 123 #endif 124 #ifdef CONFIG_DEBUG_PAGEALLOC 125 static u8 *linear_map_hash_slots; 126 static unsigned long linear_map_hash_count; 127 static DEFINE_SPINLOCK(linear_map_hash_lock); 128 #endif /* CONFIG_DEBUG_PAGEALLOC */ 129 struct mmu_hash_ops mmu_hash_ops; 130 EXPORT_SYMBOL(mmu_hash_ops); 131 132 /* 133 * These are definitions of page sizes arrays to be used when none 134 * is provided by the firmware. 135 */ 136 137 /* 138 * Fallback (4k pages only) 139 */ 140 static struct mmu_psize_def mmu_psize_defaults[] = { 141 [MMU_PAGE_4K] = { 142 .shift = 12, 143 .sllp = 0, 144 .penc = {[MMU_PAGE_4K] = 0, [1 ... MMU_PAGE_COUNT - 1] = -1}, 145 .avpnm = 0, 146 .tlbiel = 0, 147 }, 148 }; 149 150 /* 151 * POWER4, GPUL, POWER5 152 * 153 * Support for 16Mb large pages 154 */ 155 static struct mmu_psize_def mmu_psize_defaults_gp[] = { 156 [MMU_PAGE_4K] = { 157 .shift = 12, 158 .sllp = 0, 159 .penc = {[MMU_PAGE_4K] = 0, [1 ... MMU_PAGE_COUNT - 1] = -1}, 160 .avpnm = 0, 161 .tlbiel = 1, 162 }, 163 [MMU_PAGE_16M] = { 164 .shift = 24, 165 .sllp = SLB_VSID_L, 166 .penc = {[0 ... MMU_PAGE_16M - 1] = -1, [MMU_PAGE_16M] = 0, 167 [MMU_PAGE_16M + 1 ... MMU_PAGE_COUNT - 1] = -1 }, 168 .avpnm = 0x1UL, 169 .tlbiel = 0, 170 }, 171 }; 172 173 /* 174 * 'R' and 'C' update notes: 175 * - Under pHyp or KVM, the updatepp path will not set C, thus it *will* 176 * create writeable HPTEs without C set, because the hcall H_PROTECT 177 * that we use in that case will not update C 178 * - The above is however not a problem, because we also don't do that 179 * fancy "no flush" variant of eviction and we use H_REMOVE which will 180 * do the right thing and thus we don't have the race I described earlier 181 * 182 * - Under bare metal, we do have the race, so we need R and C set 183 * - We make sure R is always set and never lost 184 * - C is _PAGE_DIRTY, and *should* always be set for a writeable mapping 185 */ 186 unsigned long htab_convert_pte_flags(unsigned long pteflags) 187 { 188 unsigned long rflags = 0; 189 190 /* _PAGE_EXEC -> NOEXEC */ 191 if ((pteflags & _PAGE_EXEC) == 0) 192 rflags |= HPTE_R_N; 193 /* 194 * PPP bits: 195 * Linux uses slb key 0 for kernel and 1 for user. 196 * kernel RW areas are mapped with PPP=0b000 197 * User area is mapped with PPP=0b010 for read/write 198 * or PPP=0b011 for read-only (including writeable but clean pages). 199 */ 200 if (pteflags & _PAGE_PRIVILEGED) { 201 /* 202 * Kernel read only mapped with ppp bits 0b110 203 */ 204 if (!(pteflags & _PAGE_WRITE)) { 205 if (mmu_has_feature(MMU_FTR_KERNEL_RO)) 206 rflags |= (HPTE_R_PP0 | 0x2); 207 else 208 rflags |= 0x3; 209 } 210 } else { 211 if (pteflags & _PAGE_RWX) 212 rflags |= 0x2; 213 if (!((pteflags & _PAGE_WRITE) && (pteflags & _PAGE_DIRTY))) 214 rflags |= 0x1; 215 } 216 /* 217 * We can't allow hardware to update hpte bits. Hence always 218 * set 'R' bit and set 'C' if it is a write fault 219 */ 220 rflags |= HPTE_R_R; 221 222 if (pteflags & _PAGE_DIRTY) 223 rflags |= HPTE_R_C; 224 /* 225 * Add in WIG bits 226 */ 227 228 if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_TOLERANT) 229 rflags |= HPTE_R_I; 230 else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT) 231 rflags |= (HPTE_R_I | HPTE_R_G); 232 else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO) 233 rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M); 234 else 235 /* 236 * Add memory coherence if cache inhibited is not set 237 */ 238 rflags |= HPTE_R_M; 239 240 rflags |= pte_to_hpte_pkey_bits(pteflags); 241 return rflags; 242 } 243 244 int htab_bolt_mapping(unsigned long vstart, unsigned long vend, 245 unsigned long pstart, unsigned long prot, 246 int psize, int ssize) 247 { 248 unsigned long vaddr, paddr; 249 unsigned int step, shift; 250 int ret = 0; 251 252 shift = mmu_psize_defs[psize].shift; 253 step = 1 << shift; 254 255 prot = htab_convert_pte_flags(prot); 256 257 DBG("htab_bolt_mapping(%lx..%lx -> %lx (%lx,%d,%d)\n", 258 vstart, vend, pstart, prot, psize, ssize); 259 260 for (vaddr = vstart, paddr = pstart; vaddr < vend; 261 vaddr += step, paddr += step) { 262 unsigned long hash, hpteg; 263 unsigned long vsid = get_kernel_vsid(vaddr, ssize); 264 unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); 265 unsigned long tprot = prot; 266 bool secondary_hash = false; 267 268 /* 269 * If we hit a bad address return error. 270 */ 271 if (!vsid) 272 return -1; 273 /* Make kernel text executable */ 274 if (overlaps_kernel_text(vaddr, vaddr + step)) 275 tprot &= ~HPTE_R_N; 276 277 /* 278 * If relocatable, check if it overlaps interrupt vectors that 279 * are copied down to real 0. For relocatable kernel 280 * (e.g. kdump case) we copy interrupt vectors down to real 281 * address 0. Mark that region as executable. This is 282 * because on p8 system with relocation on exception feature 283 * enabled, exceptions are raised with MMU (IR=DR=1) ON. Hence 284 * in order to execute the interrupt handlers in virtual 285 * mode the vector region need to be marked as executable. 286 */ 287 if ((PHYSICAL_START > MEMORY_START) && 288 overlaps_interrupt_vector_text(vaddr, vaddr + step)) 289 tprot &= ~HPTE_R_N; 290 291 hash = hpt_hash(vpn, shift, ssize); 292 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 293 294 BUG_ON(!mmu_hash_ops.hpte_insert); 295 repeat: 296 ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot, 297 HPTE_V_BOLTED, psize, psize, 298 ssize); 299 if (ret == -1) { 300 /* 301 * Try to to keep bolted entries in primary. 302 * Remove non bolted entries and try insert again 303 */ 304 ret = mmu_hash_ops.hpte_remove(hpteg); 305 if (ret != -1) 306 ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot, 307 HPTE_V_BOLTED, psize, psize, 308 ssize); 309 if (ret == -1 && !secondary_hash) { 310 secondary_hash = true; 311 hpteg = ((~hash & htab_hash_mask) * HPTES_PER_GROUP); 312 goto repeat; 313 } 314 } 315 316 if (ret < 0) 317 break; 318 319 cond_resched(); 320 #ifdef CONFIG_DEBUG_PAGEALLOC 321 if (debug_pagealloc_enabled() && 322 (paddr >> PAGE_SHIFT) < linear_map_hash_count) 323 linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80; 324 #endif /* CONFIG_DEBUG_PAGEALLOC */ 325 } 326 return ret < 0 ? ret : 0; 327 } 328 329 int htab_remove_mapping(unsigned long vstart, unsigned long vend, 330 int psize, int ssize) 331 { 332 unsigned long vaddr; 333 unsigned int step, shift; 334 int rc; 335 int ret = 0; 336 337 shift = mmu_psize_defs[psize].shift; 338 step = 1 << shift; 339 340 if (!mmu_hash_ops.hpte_removebolted) 341 return -ENODEV; 342 343 for (vaddr = vstart; vaddr < vend; vaddr += step) { 344 rc = mmu_hash_ops.hpte_removebolted(vaddr, psize, ssize); 345 if (rc == -ENOENT) { 346 ret = -ENOENT; 347 continue; 348 } 349 if (rc < 0) 350 return rc; 351 } 352 353 return ret; 354 } 355 356 static bool disable_1tb_segments = false; 357 358 static int __init parse_disable_1tb_segments(char *p) 359 { 360 disable_1tb_segments = true; 361 return 0; 362 } 363 early_param("disable_1tb_segments", parse_disable_1tb_segments); 364 365 static int __init htab_dt_scan_seg_sizes(unsigned long node, 366 const char *uname, int depth, 367 void *data) 368 { 369 const char *type = of_get_flat_dt_prop(node, "device_type", NULL); 370 const __be32 *prop; 371 int size = 0; 372 373 /* We are scanning "cpu" nodes only */ 374 if (type == NULL || strcmp(type, "cpu") != 0) 375 return 0; 376 377 prop = of_get_flat_dt_prop(node, "ibm,processor-segment-sizes", &size); 378 if (prop == NULL) 379 return 0; 380 for (; size >= 4; size -= 4, ++prop) { 381 if (be32_to_cpu(prop[0]) == 40) { 382 DBG("1T segment support detected\n"); 383 384 if (disable_1tb_segments) { 385 DBG("1T segments disabled by command line\n"); 386 break; 387 } 388 389 cur_cpu_spec->mmu_features |= MMU_FTR_1T_SEGMENT; 390 return 1; 391 } 392 } 393 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B; 394 return 0; 395 } 396 397 static int __init get_idx_from_shift(unsigned int shift) 398 { 399 int idx = -1; 400 401 switch (shift) { 402 case 0xc: 403 idx = MMU_PAGE_4K; 404 break; 405 case 0x10: 406 idx = MMU_PAGE_64K; 407 break; 408 case 0x14: 409 idx = MMU_PAGE_1M; 410 break; 411 case 0x18: 412 idx = MMU_PAGE_16M; 413 break; 414 case 0x22: 415 idx = MMU_PAGE_16G; 416 break; 417 } 418 return idx; 419 } 420 421 static int __init htab_dt_scan_page_sizes(unsigned long node, 422 const char *uname, int depth, 423 void *data) 424 { 425 const char *type = of_get_flat_dt_prop(node, "device_type", NULL); 426 const __be32 *prop; 427 int size = 0; 428 429 /* We are scanning "cpu" nodes only */ 430 if (type == NULL || strcmp(type, "cpu") != 0) 431 return 0; 432 433 prop = of_get_flat_dt_prop(node, "ibm,segment-page-sizes", &size); 434 if (!prop) 435 return 0; 436 437 pr_info("Page sizes from device-tree:\n"); 438 size /= 4; 439 cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE); 440 while(size > 0) { 441 unsigned int base_shift = be32_to_cpu(prop[0]); 442 unsigned int slbenc = be32_to_cpu(prop[1]); 443 unsigned int lpnum = be32_to_cpu(prop[2]); 444 struct mmu_psize_def *def; 445 int idx, base_idx; 446 447 size -= 3; prop += 3; 448 base_idx = get_idx_from_shift(base_shift); 449 if (base_idx < 0) { 450 /* skip the pte encoding also */ 451 prop += lpnum * 2; size -= lpnum * 2; 452 continue; 453 } 454 def = &mmu_psize_defs[base_idx]; 455 if (base_idx == MMU_PAGE_16M) 456 cur_cpu_spec->mmu_features |= MMU_FTR_16M_PAGE; 457 458 def->shift = base_shift; 459 if (base_shift <= 23) 460 def->avpnm = 0; 461 else 462 def->avpnm = (1 << (base_shift - 23)) - 1; 463 def->sllp = slbenc; 464 /* 465 * We don't know for sure what's up with tlbiel, so 466 * for now we only set it for 4K and 64K pages 467 */ 468 if (base_idx == MMU_PAGE_4K || base_idx == MMU_PAGE_64K) 469 def->tlbiel = 1; 470 else 471 def->tlbiel = 0; 472 473 while (size > 0 && lpnum) { 474 unsigned int shift = be32_to_cpu(prop[0]); 475 int penc = be32_to_cpu(prop[1]); 476 477 prop += 2; size -= 2; 478 lpnum--; 479 480 idx = get_idx_from_shift(shift); 481 if (idx < 0) 482 continue; 483 484 if (penc == -1) 485 pr_err("Invalid penc for base_shift=%d " 486 "shift=%d\n", base_shift, shift); 487 488 def->penc[idx] = penc; 489 pr_info("base_shift=%d: shift=%d, sllp=0x%04lx," 490 " avpnm=0x%08lx, tlbiel=%d, penc=%d\n", 491 base_shift, shift, def->sllp, 492 def->avpnm, def->tlbiel, def->penc[idx]); 493 } 494 } 495 496 return 1; 497 } 498 499 #ifdef CONFIG_HUGETLB_PAGE 500 /* 501 * Scan for 16G memory blocks that have been set aside for huge pages 502 * and reserve those blocks for 16G huge pages. 503 */ 504 static int __init htab_dt_scan_hugepage_blocks(unsigned long node, 505 const char *uname, int depth, 506 void *data) { 507 const char *type = of_get_flat_dt_prop(node, "device_type", NULL); 508 const __be64 *addr_prop; 509 const __be32 *page_count_prop; 510 unsigned int expected_pages; 511 long unsigned int phys_addr; 512 long unsigned int block_size; 513 514 /* We are scanning "memory" nodes only */ 515 if (type == NULL || strcmp(type, "memory") != 0) 516 return 0; 517 518 /* 519 * This property is the log base 2 of the number of virtual pages that 520 * will represent this memory block. 521 */ 522 page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL); 523 if (page_count_prop == NULL) 524 return 0; 525 expected_pages = (1 << be32_to_cpu(page_count_prop[0])); 526 addr_prop = of_get_flat_dt_prop(node, "reg", NULL); 527 if (addr_prop == NULL) 528 return 0; 529 phys_addr = be64_to_cpu(addr_prop[0]); 530 block_size = be64_to_cpu(addr_prop[1]); 531 if (block_size != (16 * GB)) 532 return 0; 533 printk(KERN_INFO "Huge page(16GB) memory: " 534 "addr = 0x%lX size = 0x%lX pages = %d\n", 535 phys_addr, block_size, expected_pages); 536 if (phys_addr + block_size * expected_pages <= memblock_end_of_DRAM()) { 537 memblock_reserve(phys_addr, block_size * expected_pages); 538 pseries_add_gpage(phys_addr, block_size, expected_pages); 539 } 540 return 0; 541 } 542 #endif /* CONFIG_HUGETLB_PAGE */ 543 544 static void mmu_psize_set_default_penc(void) 545 { 546 int bpsize, apsize; 547 for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++) 548 for (apsize = 0; apsize < MMU_PAGE_COUNT; apsize++) 549 mmu_psize_defs[bpsize].penc[apsize] = -1; 550 } 551 552 #ifdef CONFIG_PPC_64K_PAGES 553 554 static bool might_have_hea(void) 555 { 556 /* 557 * The HEA ethernet adapter requires awareness of the 558 * GX bus. Without that awareness we can easily assume 559 * we will never see an HEA ethernet device. 560 */ 561 #ifdef CONFIG_IBMEBUS 562 return !cpu_has_feature(CPU_FTR_ARCH_207S) && 563 firmware_has_feature(FW_FEATURE_SPLPAR); 564 #else 565 return false; 566 #endif 567 } 568 569 #endif /* #ifdef CONFIG_PPC_64K_PAGES */ 570 571 static void __init htab_scan_page_sizes(void) 572 { 573 int rc; 574 575 /* se the invalid penc to -1 */ 576 mmu_psize_set_default_penc(); 577 578 /* Default to 4K pages only */ 579 memcpy(mmu_psize_defs, mmu_psize_defaults, 580 sizeof(mmu_psize_defaults)); 581 582 /* 583 * Try to find the available page sizes in the device-tree 584 */ 585 rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL); 586 if (rc == 0 && early_mmu_has_feature(MMU_FTR_16M_PAGE)) { 587 /* 588 * Nothing in the device-tree, but the CPU supports 16M pages, 589 * so let's fallback on a known size list for 16M capable CPUs. 590 */ 591 memcpy(mmu_psize_defs, mmu_psize_defaults_gp, 592 sizeof(mmu_psize_defaults_gp)); 593 } 594 595 #ifdef CONFIG_HUGETLB_PAGE 596 if (!hugetlb_disabled) { 597 /* Reserve 16G huge page memory sections for huge pages */ 598 of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL); 599 } 600 #endif /* CONFIG_HUGETLB_PAGE */ 601 } 602 603 /* 604 * Fill in the hpte_page_sizes[] array. 605 * We go through the mmu_psize_defs[] array looking for all the 606 * supported base/actual page size combinations. Each combination 607 * has a unique pagesize encoding (penc) value in the low bits of 608 * the LP field of the HPTE. For actual page sizes less than 1MB, 609 * some of the upper LP bits are used for RPN bits, meaning that 610 * we need to fill in several entries in hpte_page_sizes[]. 611 * 612 * In diagrammatic form, with r = RPN bits and z = page size bits: 613 * PTE LP actual page size 614 * rrrr rrrz >=8KB 615 * rrrr rrzz >=16KB 616 * rrrr rzzz >=32KB 617 * rrrr zzzz >=64KB 618 * ... 619 * 620 * The zzzz bits are implementation-specific but are chosen so that 621 * no encoding for a larger page size uses the same value in its 622 * low-order N bits as the encoding for the 2^(12+N) byte page size 623 * (if it exists). 624 */ 625 static void init_hpte_page_sizes(void) 626 { 627 long int ap, bp; 628 long int shift, penc; 629 630 for (bp = 0; bp < MMU_PAGE_COUNT; ++bp) { 631 if (!mmu_psize_defs[bp].shift) 632 continue; /* not a supported page size */ 633 for (ap = bp; ap < MMU_PAGE_COUNT; ++ap) { 634 penc = mmu_psize_defs[bp].penc[ap]; 635 if (penc == -1 || !mmu_psize_defs[ap].shift) 636 continue; 637 shift = mmu_psize_defs[ap].shift - LP_SHIFT; 638 if (shift <= 0) 639 continue; /* should never happen */ 640 /* 641 * For page sizes less than 1MB, this loop 642 * replicates the entry for all possible values 643 * of the rrrr bits. 644 */ 645 while (penc < (1 << LP_BITS)) { 646 hpte_page_sizes[penc] = (ap << 4) | bp; 647 penc += 1 << shift; 648 } 649 } 650 } 651 } 652 653 static void __init htab_init_page_sizes(void) 654 { 655 bool aligned = true; 656 init_hpte_page_sizes(); 657 658 if (!debug_pagealloc_enabled()) { 659 /* 660 * Pick a size for the linear mapping. Currently, we only 661 * support 16M, 1M and 4K which is the default 662 */ 663 if (IS_ENABLED(STRICT_KERNEL_RWX) && 664 (unsigned long)_stext % 0x1000000) { 665 if (mmu_psize_defs[MMU_PAGE_16M].shift) 666 pr_warn("Kernel not 16M aligned, " 667 "disabling 16M linear map alignment"); 668 aligned = false; 669 } 670 671 if (mmu_psize_defs[MMU_PAGE_16M].shift && aligned) 672 mmu_linear_psize = MMU_PAGE_16M; 673 else if (mmu_psize_defs[MMU_PAGE_1M].shift) 674 mmu_linear_psize = MMU_PAGE_1M; 675 } 676 677 #ifdef CONFIG_PPC_64K_PAGES 678 /* 679 * Pick a size for the ordinary pages. Default is 4K, we support 680 * 64K for user mappings and vmalloc if supported by the processor. 681 * We only use 64k for ioremap if the processor 682 * (and firmware) support cache-inhibited large pages. 683 * If not, we use 4k and set mmu_ci_restrictions so that 684 * hash_page knows to switch processes that use cache-inhibited 685 * mappings to 4k pages. 686 */ 687 if (mmu_psize_defs[MMU_PAGE_64K].shift) { 688 mmu_virtual_psize = MMU_PAGE_64K; 689 mmu_vmalloc_psize = MMU_PAGE_64K; 690 if (mmu_linear_psize == MMU_PAGE_4K) 691 mmu_linear_psize = MMU_PAGE_64K; 692 if (mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) { 693 /* 694 * When running on pSeries using 64k pages for ioremap 695 * would stop us accessing the HEA ethernet. So if we 696 * have the chance of ever seeing one, stay at 4k. 697 */ 698 if (!might_have_hea()) 699 mmu_io_psize = MMU_PAGE_64K; 700 } else 701 mmu_ci_restrictions = 1; 702 } 703 #endif /* CONFIG_PPC_64K_PAGES */ 704 705 #ifdef CONFIG_SPARSEMEM_VMEMMAP 706 /* 707 * We try to use 16M pages for vmemmap if that is supported 708 * and we have at least 1G of RAM at boot 709 */ 710 if (mmu_psize_defs[MMU_PAGE_16M].shift && 711 memblock_phys_mem_size() >= 0x40000000) 712 mmu_vmemmap_psize = MMU_PAGE_16M; 713 else 714 mmu_vmemmap_psize = mmu_virtual_psize; 715 #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 716 717 printk(KERN_DEBUG "Page orders: linear mapping = %d, " 718 "virtual = %d, io = %d" 719 #ifdef CONFIG_SPARSEMEM_VMEMMAP 720 ", vmemmap = %d" 721 #endif 722 "\n", 723 mmu_psize_defs[mmu_linear_psize].shift, 724 mmu_psize_defs[mmu_virtual_psize].shift, 725 mmu_psize_defs[mmu_io_psize].shift 726 #ifdef CONFIG_SPARSEMEM_VMEMMAP 727 ,mmu_psize_defs[mmu_vmemmap_psize].shift 728 #endif 729 ); 730 } 731 732 static int __init htab_dt_scan_pftsize(unsigned long node, 733 const char *uname, int depth, 734 void *data) 735 { 736 const char *type = of_get_flat_dt_prop(node, "device_type", NULL); 737 const __be32 *prop; 738 739 /* We are scanning "cpu" nodes only */ 740 if (type == NULL || strcmp(type, "cpu") != 0) 741 return 0; 742 743 prop = of_get_flat_dt_prop(node, "ibm,pft-size", NULL); 744 if (prop != NULL) { 745 /* pft_size[0] is the NUMA CEC cookie */ 746 ppc64_pft_size = be32_to_cpu(prop[1]); 747 return 1; 748 } 749 return 0; 750 } 751 752 unsigned htab_shift_for_mem_size(unsigned long mem_size) 753 { 754 unsigned memshift = __ilog2(mem_size); 755 unsigned pshift = mmu_psize_defs[mmu_virtual_psize].shift; 756 unsigned pteg_shift; 757 758 /* round mem_size up to next power of 2 */ 759 if ((1UL << memshift) < mem_size) 760 memshift += 1; 761 762 /* aim for 2 pages / pteg */ 763 pteg_shift = memshift - (pshift + 1); 764 765 /* 766 * 2^11 PTEGS of 128 bytes each, ie. 2^18 bytes is the minimum htab 767 * size permitted by the architecture. 768 */ 769 return max(pteg_shift + 7, 18U); 770 } 771 772 static unsigned long __init htab_get_table_size(void) 773 { 774 /* 775 * If hash size isn't already provided by the platform, we try to 776 * retrieve it from the device-tree. If it's not there neither, we 777 * calculate it now based on the total RAM size 778 */ 779 if (ppc64_pft_size == 0) 780 of_scan_flat_dt(htab_dt_scan_pftsize, NULL); 781 if (ppc64_pft_size) 782 return 1UL << ppc64_pft_size; 783 784 return 1UL << htab_shift_for_mem_size(memblock_phys_mem_size()); 785 } 786 787 #ifdef CONFIG_MEMORY_HOTPLUG 788 int resize_hpt_for_hotplug(unsigned long new_mem_size) 789 { 790 unsigned target_hpt_shift; 791 792 if (!mmu_hash_ops.resize_hpt) 793 return 0; 794 795 target_hpt_shift = htab_shift_for_mem_size(new_mem_size); 796 797 /* 798 * To avoid lots of HPT resizes if memory size is fluctuating 799 * across a boundary, we deliberately have some hysterisis 800 * here: we immediately increase the HPT size if the target 801 * shift exceeds the current shift, but we won't attempt to 802 * reduce unless the target shift is at least 2 below the 803 * current shift 804 */ 805 if (target_hpt_shift > ppc64_pft_size || 806 target_hpt_shift < ppc64_pft_size - 1) 807 return mmu_hash_ops.resize_hpt(target_hpt_shift); 808 809 return 0; 810 } 811 812 int hash__create_section_mapping(unsigned long start, unsigned long end, 813 int nid, pgprot_t prot) 814 { 815 int rc; 816 817 if (end >= H_VMALLOC_START) { 818 pr_warn("Outside the supported range\n"); 819 return -1; 820 } 821 822 rc = htab_bolt_mapping(start, end, __pa(start), 823 pgprot_val(prot), mmu_linear_psize, 824 mmu_kernel_ssize); 825 826 if (rc < 0) { 827 int rc2 = htab_remove_mapping(start, end, mmu_linear_psize, 828 mmu_kernel_ssize); 829 BUG_ON(rc2 && (rc2 != -ENOENT)); 830 } 831 return rc; 832 } 833 834 int hash__remove_section_mapping(unsigned long start, unsigned long end) 835 { 836 int rc = htab_remove_mapping(start, end, mmu_linear_psize, 837 mmu_kernel_ssize); 838 WARN_ON(rc < 0); 839 return rc; 840 } 841 #endif /* CONFIG_MEMORY_HOTPLUG */ 842 843 static void __init hash_init_partition_table(phys_addr_t hash_table, 844 unsigned long htab_size) 845 { 846 mmu_partition_table_init(); 847 848 /* 849 * PS field (VRMA page size) is not used for LPID 0, hence set to 0. 850 * For now, UPRT is 0 and we have no segment table. 851 */ 852 htab_size = __ilog2(htab_size) - 18; 853 mmu_partition_table_set_entry(0, hash_table | htab_size, 0, false); 854 pr_info("Partition table %p\n", partition_tb); 855 } 856 857 static void __init htab_initialize(void) 858 { 859 unsigned long table; 860 unsigned long pteg_count; 861 unsigned long prot; 862 unsigned long base = 0, size = 0; 863 struct memblock_region *reg; 864 865 DBG(" -> htab_initialize()\n"); 866 867 if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) { 868 mmu_kernel_ssize = MMU_SEGSIZE_1T; 869 mmu_highuser_ssize = MMU_SEGSIZE_1T; 870 printk(KERN_INFO "Using 1TB segments\n"); 871 } 872 873 /* 874 * Calculate the required size of the htab. We want the number of 875 * PTEGs to equal one half the number of real pages. 876 */ 877 htab_size_bytes = htab_get_table_size(); 878 pteg_count = htab_size_bytes >> 7; 879 880 htab_hash_mask = pteg_count - 1; 881 882 if (firmware_has_feature(FW_FEATURE_LPAR) || 883 firmware_has_feature(FW_FEATURE_PS3_LV1)) { 884 /* Using a hypervisor which owns the htab */ 885 htab_address = NULL; 886 _SDR1 = 0; 887 #ifdef CONFIG_FA_DUMP 888 /* 889 * If firmware assisted dump is active firmware preserves 890 * the contents of htab along with entire partition memory. 891 * Clear the htab if firmware assisted dump is active so 892 * that we dont end up using old mappings. 893 */ 894 if (is_fadump_active() && mmu_hash_ops.hpte_clear_all) 895 mmu_hash_ops.hpte_clear_all(); 896 #endif 897 } else { 898 unsigned long limit = MEMBLOCK_ALLOC_ANYWHERE; 899 900 #ifdef CONFIG_PPC_CELL 901 /* 902 * Cell may require the hash table down low when using the 903 * Axon IOMMU in order to fit the dynamic region over it, see 904 * comments in cell/iommu.c 905 */ 906 if (fdt_subnode_offset(initial_boot_params, 0, "axon") > 0) { 907 limit = 0x80000000; 908 pr_info("Hash table forced below 2G for Axon IOMMU\n"); 909 } 910 #endif /* CONFIG_PPC_CELL */ 911 912 table = memblock_phys_alloc_range(htab_size_bytes, 913 htab_size_bytes, 914 0, limit); 915 if (!table) 916 panic("ERROR: Failed to allocate %pa bytes below %pa\n", 917 &htab_size_bytes, &limit); 918 919 DBG("Hash table allocated at %lx, size: %lx\n", table, 920 htab_size_bytes); 921 922 htab_address = __va(table); 923 924 /* htab absolute addr + encoded htabsize */ 925 _SDR1 = table + __ilog2(htab_size_bytes) - 18; 926 927 /* Initialize the HPT with no entries */ 928 memset((void *)table, 0, htab_size_bytes); 929 930 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 931 /* Set SDR1 */ 932 mtspr(SPRN_SDR1, _SDR1); 933 else 934 hash_init_partition_table(table, htab_size_bytes); 935 } 936 937 prot = pgprot_val(PAGE_KERNEL); 938 939 #ifdef CONFIG_DEBUG_PAGEALLOC 940 if (debug_pagealloc_enabled()) { 941 linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; 942 linear_map_hash_slots = memblock_alloc_try_nid( 943 linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT, 944 ppc64_rma_size, NUMA_NO_NODE); 945 if (!linear_map_hash_slots) 946 panic("%s: Failed to allocate %lu bytes max_addr=%pa\n", 947 __func__, linear_map_hash_count, &ppc64_rma_size); 948 } 949 #endif /* CONFIG_DEBUG_PAGEALLOC */ 950 951 /* create bolted the linear mapping in the hash table */ 952 for_each_memblock(memory, reg) { 953 base = (unsigned long)__va(reg->base); 954 size = reg->size; 955 956 DBG("creating mapping for region: %lx..%lx (prot: %lx)\n", 957 base, size, prot); 958 959 if ((base + size) >= H_VMALLOC_START) { 960 pr_warn("Outside the supported range\n"); 961 continue; 962 } 963 964 BUG_ON(htab_bolt_mapping(base, base + size, __pa(base), 965 prot, mmu_linear_psize, mmu_kernel_ssize)); 966 } 967 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); 968 969 /* 970 * If we have a memory_limit and we've allocated TCEs then we need to 971 * explicitly map the TCE area at the top of RAM. We also cope with the 972 * case that the TCEs start below memory_limit. 973 * tce_alloc_start/end are 16MB aligned so the mapping should work 974 * for either 4K or 16MB pages. 975 */ 976 if (tce_alloc_start) { 977 tce_alloc_start = (unsigned long)__va(tce_alloc_start); 978 tce_alloc_end = (unsigned long)__va(tce_alloc_end); 979 980 if (base + size >= tce_alloc_start) 981 tce_alloc_start = base + size + 1; 982 983 BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end, 984 __pa(tce_alloc_start), prot, 985 mmu_linear_psize, mmu_kernel_ssize)); 986 } 987 988 989 DBG(" <- htab_initialize()\n"); 990 } 991 #undef KB 992 #undef MB 993 994 void __init hash__early_init_devtree(void) 995 { 996 /* Initialize segment sizes */ 997 of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL); 998 999 /* Initialize page sizes */ 1000 htab_scan_page_sizes(); 1001 } 1002 1003 static struct hash_mm_context init_hash_mm_context; 1004 void __init hash__early_init_mmu(void) 1005 { 1006 #ifndef CONFIG_PPC_64K_PAGES 1007 /* 1008 * We have code in __hash_page_4K() and elsewhere, which assumes it can 1009 * do the following: 1010 * new_pte |= (slot << H_PAGE_F_GIX_SHIFT) & (H_PAGE_F_SECOND | H_PAGE_F_GIX); 1011 * 1012 * Where the slot number is between 0-15, and values of 8-15 indicate 1013 * the secondary bucket. For that code to work H_PAGE_F_SECOND and 1014 * H_PAGE_F_GIX must occupy four contiguous bits in the PTE, and 1015 * H_PAGE_F_SECOND must be placed above H_PAGE_F_GIX. Assert that here 1016 * with a BUILD_BUG_ON(). 1017 */ 1018 BUILD_BUG_ON(H_PAGE_F_SECOND != (1ul << (H_PAGE_F_GIX_SHIFT + 3))); 1019 #endif /* CONFIG_PPC_64K_PAGES */ 1020 1021 htab_init_page_sizes(); 1022 1023 /* 1024 * initialize page table size 1025 */ 1026 __pte_frag_nr = H_PTE_FRAG_NR; 1027 __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT; 1028 __pmd_frag_nr = H_PMD_FRAG_NR; 1029 __pmd_frag_size_shift = H_PMD_FRAG_SIZE_SHIFT; 1030 1031 __pte_index_size = H_PTE_INDEX_SIZE; 1032 __pmd_index_size = H_PMD_INDEX_SIZE; 1033 __pud_index_size = H_PUD_INDEX_SIZE; 1034 __pgd_index_size = H_PGD_INDEX_SIZE; 1035 __pud_cache_index = H_PUD_CACHE_INDEX; 1036 __pte_table_size = H_PTE_TABLE_SIZE; 1037 __pmd_table_size = H_PMD_TABLE_SIZE; 1038 __pud_table_size = H_PUD_TABLE_SIZE; 1039 __pgd_table_size = H_PGD_TABLE_SIZE; 1040 /* 1041 * 4k use hugepd format, so for hash set then to 1042 * zero 1043 */ 1044 __pmd_val_bits = HASH_PMD_VAL_BITS; 1045 __pud_val_bits = HASH_PUD_VAL_BITS; 1046 __pgd_val_bits = HASH_PGD_VAL_BITS; 1047 1048 __kernel_virt_start = H_KERN_VIRT_START; 1049 __vmalloc_start = H_VMALLOC_START; 1050 __vmalloc_end = H_VMALLOC_END; 1051 __kernel_io_start = H_KERN_IO_START; 1052 __kernel_io_end = H_KERN_IO_END; 1053 vmemmap = (struct page *)H_VMEMMAP_START; 1054 ioremap_bot = IOREMAP_BASE; 1055 1056 #ifdef CONFIG_PCI 1057 pci_io_base = ISA_IO_BASE; 1058 #endif 1059 1060 /* Select appropriate backend */ 1061 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) 1062 ps3_early_mm_init(); 1063 else if (firmware_has_feature(FW_FEATURE_LPAR)) 1064 hpte_init_pseries(); 1065 else if (IS_ENABLED(CONFIG_PPC_NATIVE)) 1066 hpte_init_native(); 1067 1068 if (!mmu_hash_ops.hpte_insert) 1069 panic("hash__early_init_mmu: No MMU hash ops defined!\n"); 1070 1071 /* 1072 * Initialize the MMU Hash table and create the linear mapping 1073 * of memory. Has to be done before SLB initialization as this is 1074 * currently where the page size encoding is obtained. 1075 */ 1076 htab_initialize(); 1077 1078 init_mm.context.hash_context = &init_hash_mm_context; 1079 mm_ctx_set_slb_addr_limit(&init_mm.context, SLB_ADDR_LIMIT_DEFAULT); 1080 1081 pr_info("Initializing hash mmu with SLB\n"); 1082 /* Initialize SLB management */ 1083 slb_initialize(); 1084 1085 if (cpu_has_feature(CPU_FTR_ARCH_206) 1086 && cpu_has_feature(CPU_FTR_HVMODE)) 1087 tlbiel_all(); 1088 } 1089 1090 #ifdef CONFIG_SMP 1091 void hash__early_init_mmu_secondary(void) 1092 { 1093 /* Initialize hash table for that CPU */ 1094 if (!firmware_has_feature(FW_FEATURE_LPAR)) { 1095 1096 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1097 mtspr(SPRN_SDR1, _SDR1); 1098 else 1099 set_ptcr_when_no_uv(__pa(partition_tb) | 1100 (PATB_SIZE_SHIFT - 12)); 1101 } 1102 /* Initialize SLB */ 1103 slb_initialize(); 1104 1105 if (cpu_has_feature(CPU_FTR_ARCH_206) 1106 && cpu_has_feature(CPU_FTR_HVMODE)) 1107 tlbiel_all(); 1108 } 1109 #endif /* CONFIG_SMP */ 1110 1111 /* 1112 * Called by asm hashtable.S for doing lazy icache flush 1113 */ 1114 unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap) 1115 { 1116 struct page *page; 1117 1118 if (!pfn_valid(pte_pfn(pte))) 1119 return pp; 1120 1121 page = pte_page(pte); 1122 1123 /* page is dirty */ 1124 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) { 1125 if (trap == 0x400) { 1126 flush_dcache_icache_page(page); 1127 set_bit(PG_arch_1, &page->flags); 1128 } else 1129 pp |= HPTE_R_N; 1130 } 1131 return pp; 1132 } 1133 1134 #ifdef CONFIG_PPC_MM_SLICES 1135 static unsigned int get_paca_psize(unsigned long addr) 1136 { 1137 unsigned char *psizes; 1138 unsigned long index, mask_index; 1139 1140 if (addr < SLICE_LOW_TOP) { 1141 psizes = get_paca()->mm_ctx_low_slices_psize; 1142 index = GET_LOW_SLICE_INDEX(addr); 1143 } else { 1144 psizes = get_paca()->mm_ctx_high_slices_psize; 1145 index = GET_HIGH_SLICE_INDEX(addr); 1146 } 1147 mask_index = index & 0x1; 1148 return (psizes[index >> 1] >> (mask_index * 4)) & 0xF; 1149 } 1150 1151 #else 1152 unsigned int get_paca_psize(unsigned long addr) 1153 { 1154 return get_paca()->mm_ctx_user_psize; 1155 } 1156 #endif 1157 1158 /* 1159 * Demote a segment to using 4k pages. 1160 * For now this makes the whole process use 4k pages. 1161 */ 1162 #ifdef CONFIG_PPC_64K_PAGES 1163 void demote_segment_4k(struct mm_struct *mm, unsigned long addr) 1164 { 1165 if (get_slice_psize(mm, addr) == MMU_PAGE_4K) 1166 return; 1167 slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K); 1168 copro_flush_all_slbs(mm); 1169 if ((get_paca_psize(addr) != MMU_PAGE_4K) && (current->mm == mm)) { 1170 1171 copy_mm_to_paca(mm); 1172 slb_flush_and_restore_bolted(); 1173 } 1174 } 1175 #endif /* CONFIG_PPC_64K_PAGES */ 1176 1177 #ifdef CONFIG_PPC_SUBPAGE_PROT 1178 /* 1179 * This looks up a 2-bit protection code for a 4k subpage of a 64k page. 1180 * Userspace sets the subpage permissions using the subpage_prot system call. 1181 * 1182 * Result is 0: full permissions, _PAGE_RW: read-only, 1183 * _PAGE_RWX: no access. 1184 */ 1185 static int subpage_protection(struct mm_struct *mm, unsigned long ea) 1186 { 1187 struct subpage_prot_table *spt = mm_ctx_subpage_prot(&mm->context); 1188 u32 spp = 0; 1189 u32 **sbpm, *sbpp; 1190 1191 if (!spt) 1192 return 0; 1193 1194 if (ea >= spt->maxaddr) 1195 return 0; 1196 if (ea < 0x100000000UL) { 1197 /* addresses below 4GB use spt->low_prot */ 1198 sbpm = spt->low_prot; 1199 } else { 1200 sbpm = spt->protptrs[ea >> SBP_L3_SHIFT]; 1201 if (!sbpm) 1202 return 0; 1203 } 1204 sbpp = sbpm[(ea >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)]; 1205 if (!sbpp) 1206 return 0; 1207 spp = sbpp[(ea >> PAGE_SHIFT) & (SBP_L1_COUNT - 1)]; 1208 1209 /* extract 2-bit bitfield for this 4k subpage */ 1210 spp >>= 30 - 2 * ((ea >> 12) & 0xf); 1211 1212 /* 1213 * 0 -> full premission 1214 * 1 -> Read only 1215 * 2 -> no access. 1216 * We return the flag that need to be cleared. 1217 */ 1218 spp = ((spp & 2) ? _PAGE_RWX : 0) | ((spp & 1) ? _PAGE_WRITE : 0); 1219 return spp; 1220 } 1221 1222 #else /* CONFIG_PPC_SUBPAGE_PROT */ 1223 static inline int subpage_protection(struct mm_struct *mm, unsigned long ea) 1224 { 1225 return 0; 1226 } 1227 #endif 1228 1229 void hash_failure_debug(unsigned long ea, unsigned long access, 1230 unsigned long vsid, unsigned long trap, 1231 int ssize, int psize, int lpsize, unsigned long pte) 1232 { 1233 if (!printk_ratelimit()) 1234 return; 1235 pr_info("mm: Hashing failure ! EA=0x%lx access=0x%lx current=%s\n", 1236 ea, access, current->comm); 1237 pr_info(" trap=0x%lx vsid=0x%lx ssize=%d base psize=%d psize %d pte=0x%lx\n", 1238 trap, vsid, ssize, psize, lpsize, pte); 1239 } 1240 1241 static void check_paca_psize(unsigned long ea, struct mm_struct *mm, 1242 int psize, bool user_region) 1243 { 1244 if (user_region) { 1245 if (psize != get_paca_psize(ea)) { 1246 copy_mm_to_paca(mm); 1247 slb_flush_and_restore_bolted(); 1248 } 1249 } else if (get_paca()->vmalloc_sllp != 1250 mmu_psize_defs[mmu_vmalloc_psize].sllp) { 1251 get_paca()->vmalloc_sllp = 1252 mmu_psize_defs[mmu_vmalloc_psize].sllp; 1253 slb_vmalloc_update(); 1254 } 1255 } 1256 1257 /* 1258 * Result code is: 1259 * 0 - handled 1260 * 1 - normal page fault 1261 * -1 - critical hash insertion error 1262 * -2 - access not permitted by subpage protection mechanism 1263 */ 1264 int hash_page_mm(struct mm_struct *mm, unsigned long ea, 1265 unsigned long access, unsigned long trap, 1266 unsigned long flags) 1267 { 1268 bool is_thp; 1269 enum ctx_state prev_state = exception_enter(); 1270 pgd_t *pgdir; 1271 unsigned long vsid; 1272 pte_t *ptep; 1273 unsigned hugeshift; 1274 int rc, user_region = 0; 1275 int psize, ssize; 1276 1277 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", 1278 ea, access, trap); 1279 trace_hash_fault(ea, access, trap); 1280 1281 /* Get region & vsid */ 1282 switch (get_region_id(ea)) { 1283 case USER_REGION_ID: 1284 user_region = 1; 1285 if (! mm) { 1286 DBG_LOW(" user region with no mm !\n"); 1287 rc = 1; 1288 goto bail; 1289 } 1290 psize = get_slice_psize(mm, ea); 1291 ssize = user_segment_size(ea); 1292 vsid = get_user_vsid(&mm->context, ea, ssize); 1293 break; 1294 case VMALLOC_REGION_ID: 1295 vsid = get_kernel_vsid(ea, mmu_kernel_ssize); 1296 psize = mmu_vmalloc_psize; 1297 ssize = mmu_kernel_ssize; 1298 break; 1299 1300 case IO_REGION_ID: 1301 vsid = get_kernel_vsid(ea, mmu_kernel_ssize); 1302 psize = mmu_io_psize; 1303 ssize = mmu_kernel_ssize; 1304 break; 1305 default: 1306 /* 1307 * Not a valid range 1308 * Send the problem up to do_page_fault() 1309 */ 1310 rc = 1; 1311 goto bail; 1312 } 1313 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); 1314 1315 /* Bad address. */ 1316 if (!vsid) { 1317 DBG_LOW("Bad address!\n"); 1318 rc = 1; 1319 goto bail; 1320 } 1321 /* Get pgdir */ 1322 pgdir = mm->pgd; 1323 if (pgdir == NULL) { 1324 rc = 1; 1325 goto bail; 1326 } 1327 1328 /* Check CPU locality */ 1329 if (user_region && mm_is_thread_local(mm)) 1330 flags |= HPTE_LOCAL_UPDATE; 1331 1332 #ifndef CONFIG_PPC_64K_PAGES 1333 /* 1334 * If we use 4K pages and our psize is not 4K, then we might 1335 * be hitting a special driver mapping, and need to align the 1336 * address before we fetch the PTE. 1337 * 1338 * It could also be a hugepage mapping, in which case this is 1339 * not necessary, but it's not harmful, either. 1340 */ 1341 if (psize != MMU_PAGE_4K) 1342 ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1); 1343 #endif /* CONFIG_PPC_64K_PAGES */ 1344 1345 /* Get PTE and page size from page tables */ 1346 ptep = find_linux_pte(pgdir, ea, &is_thp, &hugeshift); 1347 if (ptep == NULL || !pte_present(*ptep)) { 1348 DBG_LOW(" no PTE !\n"); 1349 rc = 1; 1350 goto bail; 1351 } 1352 1353 /* Add _PAGE_PRESENT to the required access perm */ 1354 access |= _PAGE_PRESENT; 1355 1356 /* 1357 * Pre-check access permissions (will be re-checked atomically 1358 * in __hash_page_XX but this pre-check is a fast path 1359 */ 1360 if (!check_pte_access(access, pte_val(*ptep))) { 1361 DBG_LOW(" no access !\n"); 1362 rc = 1; 1363 goto bail; 1364 } 1365 1366 if (hugeshift) { 1367 if (is_thp) 1368 rc = __hash_page_thp(ea, access, vsid, (pmd_t *)ptep, 1369 trap, flags, ssize, psize); 1370 #ifdef CONFIG_HUGETLB_PAGE 1371 else 1372 rc = __hash_page_huge(ea, access, vsid, ptep, trap, 1373 flags, ssize, hugeshift, psize); 1374 #else 1375 else { 1376 /* 1377 * if we have hugeshift, and is not transhuge with 1378 * hugetlb disabled, something is really wrong. 1379 */ 1380 rc = 1; 1381 WARN_ON(1); 1382 } 1383 #endif 1384 if (current->mm == mm) 1385 check_paca_psize(ea, mm, psize, user_region); 1386 1387 goto bail; 1388 } 1389 1390 #ifndef CONFIG_PPC_64K_PAGES 1391 DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep)); 1392 #else 1393 DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep), 1394 pte_val(*(ptep + PTRS_PER_PTE))); 1395 #endif 1396 /* Do actual hashing */ 1397 #ifdef CONFIG_PPC_64K_PAGES 1398 /* If H_PAGE_4K_PFN is set, make sure this is a 4k segment */ 1399 if ((pte_val(*ptep) & H_PAGE_4K_PFN) && psize == MMU_PAGE_64K) { 1400 demote_segment_4k(mm, ea); 1401 psize = MMU_PAGE_4K; 1402 } 1403 1404 /* 1405 * If this PTE is non-cacheable and we have restrictions on 1406 * using non cacheable large pages, then we switch to 4k 1407 */ 1408 if (mmu_ci_restrictions && psize == MMU_PAGE_64K && pte_ci(*ptep)) { 1409 if (user_region) { 1410 demote_segment_4k(mm, ea); 1411 psize = MMU_PAGE_4K; 1412 } else if (ea < VMALLOC_END) { 1413 /* 1414 * some driver did a non-cacheable mapping 1415 * in vmalloc space, so switch vmalloc 1416 * to 4k pages 1417 */ 1418 printk(KERN_ALERT "Reducing vmalloc segment " 1419 "to 4kB pages because of " 1420 "non-cacheable mapping\n"); 1421 psize = mmu_vmalloc_psize = MMU_PAGE_4K; 1422 copro_flush_all_slbs(mm); 1423 } 1424 } 1425 1426 #endif /* CONFIG_PPC_64K_PAGES */ 1427 1428 if (current->mm == mm) 1429 check_paca_psize(ea, mm, psize, user_region); 1430 1431 #ifdef CONFIG_PPC_64K_PAGES 1432 if (psize == MMU_PAGE_64K) 1433 rc = __hash_page_64K(ea, access, vsid, ptep, trap, 1434 flags, ssize); 1435 else 1436 #endif /* CONFIG_PPC_64K_PAGES */ 1437 { 1438 int spp = subpage_protection(mm, ea); 1439 if (access & spp) 1440 rc = -2; 1441 else 1442 rc = __hash_page_4K(ea, access, vsid, ptep, trap, 1443 flags, ssize, spp); 1444 } 1445 1446 /* 1447 * Dump some info in case of hash insertion failure, they should 1448 * never happen so it is really useful to know if/when they do 1449 */ 1450 if (rc == -1) 1451 hash_failure_debug(ea, access, vsid, trap, ssize, psize, 1452 psize, pte_val(*ptep)); 1453 #ifndef CONFIG_PPC_64K_PAGES 1454 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep)); 1455 #else 1456 DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep), 1457 pte_val(*(ptep + PTRS_PER_PTE))); 1458 #endif 1459 DBG_LOW(" -> rc=%d\n", rc); 1460 1461 bail: 1462 exception_exit(prev_state); 1463 return rc; 1464 } 1465 EXPORT_SYMBOL_GPL(hash_page_mm); 1466 1467 int hash_page(unsigned long ea, unsigned long access, unsigned long trap, 1468 unsigned long dsisr) 1469 { 1470 unsigned long flags = 0; 1471 struct mm_struct *mm = current->mm; 1472 1473 if ((get_region_id(ea) == VMALLOC_REGION_ID) || 1474 (get_region_id(ea) == IO_REGION_ID)) 1475 mm = &init_mm; 1476 1477 if (dsisr & DSISR_NOHPTE) 1478 flags |= HPTE_NOHPTE_UPDATE; 1479 1480 return hash_page_mm(mm, ea, access, trap, flags); 1481 } 1482 EXPORT_SYMBOL_GPL(hash_page); 1483 1484 int __hash_page(unsigned long trap, unsigned long ea, unsigned long dsisr, 1485 unsigned long msr) 1486 { 1487 unsigned long access = _PAGE_PRESENT | _PAGE_READ; 1488 unsigned long flags = 0; 1489 struct mm_struct *mm = current->mm; 1490 unsigned int region_id = get_region_id(ea); 1491 1492 if ((region_id == VMALLOC_REGION_ID) || (region_id == IO_REGION_ID)) 1493 mm = &init_mm; 1494 1495 if (dsisr & DSISR_NOHPTE) 1496 flags |= HPTE_NOHPTE_UPDATE; 1497 1498 if (dsisr & DSISR_ISSTORE) 1499 access |= _PAGE_WRITE; 1500 /* 1501 * We set _PAGE_PRIVILEGED only when 1502 * kernel mode access kernel space. 1503 * 1504 * _PAGE_PRIVILEGED is NOT set 1505 * 1) when kernel mode access user space 1506 * 2) user space access kernel space. 1507 */ 1508 access |= _PAGE_PRIVILEGED; 1509 if ((msr & MSR_PR) || (region_id == USER_REGION_ID)) 1510 access &= ~_PAGE_PRIVILEGED; 1511 1512 if (trap == 0x400) 1513 access |= _PAGE_EXEC; 1514 1515 return hash_page_mm(mm, ea, access, trap, flags); 1516 } 1517 1518 #ifdef CONFIG_PPC_MM_SLICES 1519 static bool should_hash_preload(struct mm_struct *mm, unsigned long ea) 1520 { 1521 int psize = get_slice_psize(mm, ea); 1522 1523 /* We only prefault standard pages for now */ 1524 if (unlikely(psize != mm_ctx_user_psize(&mm->context))) 1525 return false; 1526 1527 /* 1528 * Don't prefault if subpage protection is enabled for the EA. 1529 */ 1530 if (unlikely((psize == MMU_PAGE_4K) && subpage_protection(mm, ea))) 1531 return false; 1532 1533 return true; 1534 } 1535 #else 1536 static bool should_hash_preload(struct mm_struct *mm, unsigned long ea) 1537 { 1538 return true; 1539 } 1540 #endif 1541 1542 static void hash_preload(struct mm_struct *mm, unsigned long ea, 1543 bool is_exec, unsigned long trap) 1544 { 1545 int hugepage_shift; 1546 unsigned long vsid; 1547 pgd_t *pgdir; 1548 pte_t *ptep; 1549 unsigned long flags; 1550 int rc, ssize, update_flags = 0; 1551 unsigned long access = _PAGE_PRESENT | _PAGE_READ | (is_exec ? _PAGE_EXEC : 0); 1552 1553 BUG_ON(get_region_id(ea) != USER_REGION_ID); 1554 1555 if (!should_hash_preload(mm, ea)) 1556 return; 1557 1558 DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx," 1559 " trap=%lx\n", mm, mm->pgd, ea, access, trap); 1560 1561 /* Get Linux PTE if available */ 1562 pgdir = mm->pgd; 1563 if (pgdir == NULL) 1564 return; 1565 1566 /* Get VSID */ 1567 ssize = user_segment_size(ea); 1568 vsid = get_user_vsid(&mm->context, ea, ssize); 1569 if (!vsid) 1570 return; 1571 /* 1572 * Hash doesn't like irqs. Walking linux page table with irq disabled 1573 * saves us from holding multiple locks. 1574 */ 1575 local_irq_save(flags); 1576 1577 /* 1578 * THP pages use update_mmu_cache_pmd. We don't do 1579 * hash preload there. Hence can ignore THP here 1580 */ 1581 ptep = find_current_mm_pte(pgdir, ea, NULL, &hugepage_shift); 1582 if (!ptep) 1583 goto out_exit; 1584 1585 WARN_ON(hugepage_shift); 1586 #ifdef CONFIG_PPC_64K_PAGES 1587 /* If either H_PAGE_4K_PFN or cache inhibited is set (and we are on 1588 * a 64K kernel), then we don't preload, hash_page() will take 1589 * care of it once we actually try to access the page. 1590 * That way we don't have to duplicate all of the logic for segment 1591 * page size demotion here 1592 */ 1593 if ((pte_val(*ptep) & H_PAGE_4K_PFN) || pte_ci(*ptep)) 1594 goto out_exit; 1595 #endif /* CONFIG_PPC_64K_PAGES */ 1596 1597 /* Is that local to this CPU ? */ 1598 if (mm_is_thread_local(mm)) 1599 update_flags |= HPTE_LOCAL_UPDATE; 1600 1601 /* Hash it in */ 1602 #ifdef CONFIG_PPC_64K_PAGES 1603 if (mm_ctx_user_psize(&mm->context) == MMU_PAGE_64K) 1604 rc = __hash_page_64K(ea, access, vsid, ptep, trap, 1605 update_flags, ssize); 1606 else 1607 #endif /* CONFIG_PPC_64K_PAGES */ 1608 rc = __hash_page_4K(ea, access, vsid, ptep, trap, update_flags, 1609 ssize, subpage_protection(mm, ea)); 1610 1611 /* Dump some info in case of hash insertion failure, they should 1612 * never happen so it is really useful to know if/when they do 1613 */ 1614 if (rc == -1) 1615 hash_failure_debug(ea, access, vsid, trap, ssize, 1616 mm_ctx_user_psize(&mm->context), 1617 mm_ctx_user_psize(&mm->context), 1618 pte_val(*ptep)); 1619 out_exit: 1620 local_irq_restore(flags); 1621 } 1622 1623 /* 1624 * This is called at the end of handling a user page fault, when the 1625 * fault has been handled by updating a PTE in the linux page tables. 1626 * We use it to preload an HPTE into the hash table corresponding to 1627 * the updated linux PTE. 1628 * 1629 * This must always be called with the pte lock held. 1630 */ 1631 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 1632 pte_t *ptep) 1633 { 1634 /* 1635 * We don't need to worry about _PAGE_PRESENT here because we are 1636 * called with either mm->page_table_lock held or ptl lock held 1637 */ 1638 unsigned long trap; 1639 bool is_exec; 1640 1641 if (radix_enabled()) { 1642 prefetch((void *)address); 1643 return; 1644 } 1645 1646 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ 1647 if (!pte_young(*ptep) || address >= TASK_SIZE) 1648 return; 1649 1650 /* 1651 * We try to figure out if we are coming from an instruction 1652 * access fault and pass that down to __hash_page so we avoid 1653 * double-faulting on execution of fresh text. We have to test 1654 * for regs NULL since init will get here first thing at boot. 1655 * 1656 * We also avoid filling the hash if not coming from a fault. 1657 */ 1658 1659 trap = current->thread.regs ? TRAP(current->thread.regs) : 0UL; 1660 switch (trap) { 1661 case 0x300: 1662 is_exec = false; 1663 break; 1664 case 0x400: 1665 is_exec = true; 1666 break; 1667 default: 1668 return; 1669 } 1670 1671 hash_preload(vma->vm_mm, address, is_exec, trap); 1672 } 1673 1674 #ifdef CONFIG_PPC_MEM_KEYS 1675 /* 1676 * Return the protection key associated with the given address and the 1677 * mm_struct. 1678 */ 1679 u16 get_mm_addr_key(struct mm_struct *mm, unsigned long address) 1680 { 1681 pte_t *ptep; 1682 u16 pkey = 0; 1683 unsigned long flags; 1684 1685 if (!mm || !mm->pgd) 1686 return 0; 1687 1688 local_irq_save(flags); 1689 ptep = find_linux_pte(mm->pgd, address, NULL, NULL); 1690 if (ptep) 1691 pkey = pte_to_pkey_bits(pte_val(READ_ONCE(*ptep))); 1692 local_irq_restore(flags); 1693 1694 return pkey; 1695 } 1696 #endif /* CONFIG_PPC_MEM_KEYS */ 1697 1698 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1699 static inline void tm_flush_hash_page(int local) 1700 { 1701 /* 1702 * Transactions are not aborted by tlbiel, only tlbie. Without, syncing a 1703 * page back to a block device w/PIO could pick up transactional data 1704 * (bad!) so we force an abort here. Before the sync the page will be 1705 * made read-only, which will flush_hash_page. BIG ISSUE here: if the 1706 * kernel uses a page from userspace without unmapping it first, it may 1707 * see the speculated version. 1708 */ 1709 if (local && cpu_has_feature(CPU_FTR_TM) && current->thread.regs && 1710 MSR_TM_ACTIVE(current->thread.regs->msr)) { 1711 tm_enable(); 1712 tm_abort(TM_CAUSE_TLBI); 1713 } 1714 } 1715 #else 1716 static inline void tm_flush_hash_page(int local) 1717 { 1718 } 1719 #endif 1720 1721 /* 1722 * Return the global hash slot, corresponding to the given PTE, which contains 1723 * the HPTE. 1724 */ 1725 unsigned long pte_get_hash_gslot(unsigned long vpn, unsigned long shift, 1726 int ssize, real_pte_t rpte, unsigned int subpg_index) 1727 { 1728 unsigned long hash, gslot, hidx; 1729 1730 hash = hpt_hash(vpn, shift, ssize); 1731 hidx = __rpte_to_hidx(rpte, subpg_index); 1732 if (hidx & _PTEIDX_SECONDARY) 1733 hash = ~hash; 1734 gslot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 1735 gslot += hidx & _PTEIDX_GROUP_IX; 1736 return gslot; 1737 } 1738 1739 /* 1740 * WARNING: This is called from hash_low_64.S, if you change this prototype, 1741 * do not forget to update the assembly call site ! 1742 */ 1743 void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize, 1744 unsigned long flags) 1745 { 1746 unsigned long index, shift, gslot; 1747 int local = flags & HPTE_LOCAL_UPDATE; 1748 1749 DBG_LOW("flush_hash_page(vpn=%016lx)\n", vpn); 1750 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { 1751 gslot = pte_get_hash_gslot(vpn, shift, ssize, pte, index); 1752 DBG_LOW(" sub %ld: gslot=%lx\n", index, gslot); 1753 /* 1754 * We use same base page size and actual psize, because we don't 1755 * use these functions for hugepage 1756 */ 1757 mmu_hash_ops.hpte_invalidate(gslot, vpn, psize, psize, 1758 ssize, local); 1759 } pte_iterate_hashed_end(); 1760 1761 tm_flush_hash_page(local); 1762 } 1763 1764 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1765 void flush_hash_hugepage(unsigned long vsid, unsigned long addr, 1766 pmd_t *pmdp, unsigned int psize, int ssize, 1767 unsigned long flags) 1768 { 1769 int i, max_hpte_count, valid; 1770 unsigned long s_addr; 1771 unsigned char *hpte_slot_array; 1772 unsigned long hidx, shift, vpn, hash, slot; 1773 int local = flags & HPTE_LOCAL_UPDATE; 1774 1775 s_addr = addr & HPAGE_PMD_MASK; 1776 hpte_slot_array = get_hpte_slot_array(pmdp); 1777 /* 1778 * IF we try to do a HUGE PTE update after a withdraw is done. 1779 * we will find the below NULL. This happens when we do 1780 * split_huge_pmd 1781 */ 1782 if (!hpte_slot_array) 1783 return; 1784 1785 if (mmu_hash_ops.hugepage_invalidate) { 1786 mmu_hash_ops.hugepage_invalidate(vsid, s_addr, hpte_slot_array, 1787 psize, ssize, local); 1788 goto tm_abort; 1789 } 1790 /* 1791 * No bluk hpte removal support, invalidate each entry 1792 */ 1793 shift = mmu_psize_defs[psize].shift; 1794 max_hpte_count = HPAGE_PMD_SIZE >> shift; 1795 for (i = 0; i < max_hpte_count; i++) { 1796 /* 1797 * 8 bits per each hpte entries 1798 * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit] 1799 */ 1800 valid = hpte_valid(hpte_slot_array, i); 1801 if (!valid) 1802 continue; 1803 hidx = hpte_hash_index(hpte_slot_array, i); 1804 1805 /* get the vpn */ 1806 addr = s_addr + (i * (1ul << shift)); 1807 vpn = hpt_vpn(addr, vsid, ssize); 1808 hash = hpt_hash(vpn, shift, ssize); 1809 if (hidx & _PTEIDX_SECONDARY) 1810 hash = ~hash; 1811 1812 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 1813 slot += hidx & _PTEIDX_GROUP_IX; 1814 mmu_hash_ops.hpte_invalidate(slot, vpn, psize, 1815 MMU_PAGE_16M, ssize, local); 1816 } 1817 tm_abort: 1818 tm_flush_hash_page(local); 1819 } 1820 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1821 1822 void flush_hash_range(unsigned long number, int local) 1823 { 1824 if (mmu_hash_ops.flush_hash_range) 1825 mmu_hash_ops.flush_hash_range(number, local); 1826 else { 1827 int i; 1828 struct ppc64_tlb_batch *batch = 1829 this_cpu_ptr(&ppc64_tlb_batch); 1830 1831 for (i = 0; i < number; i++) 1832 flush_hash_page(batch->vpn[i], batch->pte[i], 1833 batch->psize, batch->ssize, local); 1834 } 1835 } 1836 1837 /* 1838 * low_hash_fault is called when we the low level hash code failed 1839 * to instert a PTE due to an hypervisor error 1840 */ 1841 void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc) 1842 { 1843 enum ctx_state prev_state = exception_enter(); 1844 1845 if (user_mode(regs)) { 1846 #ifdef CONFIG_PPC_SUBPAGE_PROT 1847 if (rc == -2) 1848 _exception(SIGSEGV, regs, SEGV_ACCERR, address); 1849 else 1850 #endif 1851 _exception(SIGBUS, regs, BUS_ADRERR, address); 1852 } else 1853 bad_page_fault(regs, address, SIGBUS); 1854 1855 exception_exit(prev_state); 1856 } 1857 1858 long hpte_insert_repeating(unsigned long hash, unsigned long vpn, 1859 unsigned long pa, unsigned long rflags, 1860 unsigned long vflags, int psize, int ssize) 1861 { 1862 unsigned long hpte_group; 1863 long slot; 1864 1865 repeat: 1866 hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; 1867 1868 /* Insert into the hash table, primary slot */ 1869 slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, vflags, 1870 psize, psize, ssize); 1871 1872 /* Primary is full, try the secondary */ 1873 if (unlikely(slot == -1)) { 1874 hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP; 1875 slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 1876 vflags | HPTE_V_SECONDARY, 1877 psize, psize, ssize); 1878 if (slot == -1) { 1879 if (mftb() & 0x1) 1880 hpte_group = (hash & htab_hash_mask) * 1881 HPTES_PER_GROUP; 1882 1883 mmu_hash_ops.hpte_remove(hpte_group); 1884 goto repeat; 1885 } 1886 } 1887 1888 return slot; 1889 } 1890 1891 #ifdef CONFIG_DEBUG_PAGEALLOC 1892 static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) 1893 { 1894 unsigned long hash; 1895 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); 1896 unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize); 1897 unsigned long mode = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL)); 1898 long ret; 1899 1900 hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); 1901 1902 /* Don't create HPTE entries for bad address */ 1903 if (!vsid) 1904 return; 1905 1906 ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode, 1907 HPTE_V_BOLTED, 1908 mmu_linear_psize, mmu_kernel_ssize); 1909 1910 BUG_ON (ret < 0); 1911 spin_lock(&linear_map_hash_lock); 1912 BUG_ON(linear_map_hash_slots[lmi] & 0x80); 1913 linear_map_hash_slots[lmi] = ret | 0x80; 1914 spin_unlock(&linear_map_hash_lock); 1915 } 1916 1917 static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi) 1918 { 1919 unsigned long hash, hidx, slot; 1920 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); 1921 unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize); 1922 1923 hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); 1924 spin_lock(&linear_map_hash_lock); 1925 BUG_ON(!(linear_map_hash_slots[lmi] & 0x80)); 1926 hidx = linear_map_hash_slots[lmi] & 0x7f; 1927 linear_map_hash_slots[lmi] = 0; 1928 spin_unlock(&linear_map_hash_lock); 1929 if (hidx & _PTEIDX_SECONDARY) 1930 hash = ~hash; 1931 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 1932 slot += hidx & _PTEIDX_GROUP_IX; 1933 mmu_hash_ops.hpte_invalidate(slot, vpn, mmu_linear_psize, 1934 mmu_linear_psize, 1935 mmu_kernel_ssize, 0); 1936 } 1937 1938 void __kernel_map_pages(struct page *page, int numpages, int enable) 1939 { 1940 unsigned long flags, vaddr, lmi; 1941 int i; 1942 1943 local_irq_save(flags); 1944 for (i = 0; i < numpages; i++, page++) { 1945 vaddr = (unsigned long)page_address(page); 1946 lmi = __pa(vaddr) >> PAGE_SHIFT; 1947 if (lmi >= linear_map_hash_count) 1948 continue; 1949 if (enable) 1950 kernel_map_linear_page(vaddr, lmi); 1951 else 1952 kernel_unmap_linear_page(vaddr, lmi); 1953 } 1954 local_irq_restore(flags); 1955 } 1956 #endif /* CONFIG_DEBUG_PAGEALLOC */ 1957 1958 void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base, 1959 phys_addr_t first_memblock_size) 1960 { 1961 /* 1962 * We don't currently support the first MEMBLOCK not mapping 0 1963 * physical on those processors 1964 */ 1965 BUG_ON(first_memblock_base != 0); 1966 1967 /* 1968 * On virtualized systems the first entry is our RMA region aka VRMA, 1969 * non-virtualized 64-bit hash MMU systems don't have a limitation 1970 * on real mode access. 1971 * 1972 * For guests on platforms before POWER9, we clamp the it limit to 1G 1973 * to avoid some funky things such as RTAS bugs etc... 1974 * 1975 * On POWER9 we limit to 1TB in case the host erroneously told us that 1976 * the RMA was >1TB. Effective address bits 0:23 are treated as zero 1977 * (meaning the access is aliased to zero i.e. addr = addr % 1TB) 1978 * for virtual real mode addressing and so it doesn't make sense to 1979 * have an area larger than 1TB as it can't be addressed. 1980 */ 1981 if (!early_cpu_has_feature(CPU_FTR_HVMODE)) { 1982 ppc64_rma_size = first_memblock_size; 1983 if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) 1984 ppc64_rma_size = min_t(u64, ppc64_rma_size, 0x40000000); 1985 else 1986 ppc64_rma_size = min_t(u64, ppc64_rma_size, 1987 1UL << SID_SHIFT_1T); 1988 1989 /* Finally limit subsequent allocations */ 1990 memblock_set_current_limit(ppc64_rma_size); 1991 } else { 1992 ppc64_rma_size = ULONG_MAX; 1993 } 1994 } 1995 1996 #ifdef CONFIG_DEBUG_FS 1997 1998 static int hpt_order_get(void *data, u64 *val) 1999 { 2000 *val = ppc64_pft_size; 2001 return 0; 2002 } 2003 2004 static int hpt_order_set(void *data, u64 val) 2005 { 2006 int ret; 2007 2008 if (!mmu_hash_ops.resize_hpt) 2009 return -ENODEV; 2010 2011 cpus_read_lock(); 2012 ret = mmu_hash_ops.resize_hpt(val); 2013 cpus_read_unlock(); 2014 2015 return ret; 2016 } 2017 2018 DEFINE_DEBUGFS_ATTRIBUTE(fops_hpt_order, hpt_order_get, hpt_order_set, "%llu\n"); 2019 2020 static int __init hash64_debugfs(void) 2021 { 2022 debugfs_create_file("hpt_order", 0600, powerpc_debugfs_root, NULL, 2023 &fops_hpt_order); 2024 return 0; 2025 } 2026 machine_device_initcall(pseries, hash64_debugfs); 2027 #endif /* CONFIG_DEBUG_FS */ 2028 2029 void __init print_system_hash_info(void) 2030 { 2031 pr_info("ppc64_pft_size = 0x%llx\n", ppc64_pft_size); 2032 2033 if (htab_hash_mask) 2034 pr_info("htab_hash_mask = 0x%lx\n", htab_hash_mask); 2035 } 2036