1 /* 2 * This file contains ioremap and related functions for 64-bit machines. 3 * 4 * Derived from arch/ppc64/mm/init.c 5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 6 * 7 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org) 8 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 9 * Copyright (C) 1996 Paul Mackerras 10 * 11 * Derived from "arch/i386/mm/init.c" 12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 13 * 14 * Dave Engebretsen <engebret@us.ibm.com> 15 * Rework for PPC64 port. 16 * 17 * This program is free software; you can redistribute it and/or 18 * modify it under the terms of the GNU General Public License 19 * as published by the Free Software Foundation; either version 20 * 2 of the License, or (at your option) any later version. 21 * 22 */ 23 24 #include <linux/signal.h> 25 #include <linux/sched.h> 26 #include <linux/kernel.h> 27 #include <linux/errno.h> 28 #include <linux/string.h> 29 #include <linux/export.h> 30 #include <linux/types.h> 31 #include <linux/mman.h> 32 #include <linux/mm.h> 33 #include <linux/swap.h> 34 #include <linux/stddef.h> 35 #include <linux/vmalloc.h> 36 #include <linux/memblock.h> 37 #include <linux/slab.h> 38 #include <linux/hugetlb.h> 39 40 #include <asm/pgalloc.h> 41 #include <asm/page.h> 42 #include <asm/prom.h> 43 #include <asm/io.h> 44 #include <asm/mmu_context.h> 45 #include <asm/pgtable.h> 46 #include <asm/mmu.h> 47 #include <asm/smp.h> 48 #include <asm/machdep.h> 49 #include <asm/tlb.h> 50 #include <asm/processor.h> 51 #include <asm/cputable.h> 52 #include <asm/sections.h> 53 #include <asm/firmware.h> 54 #include <asm/dma.h> 55 56 #include "mmu_decl.h" 57 58 #define CREATE_TRACE_POINTS 59 #include <trace/events/thp.h> 60 61 /* Some sanity checking */ 62 #if TASK_SIZE_USER64 > PGTABLE_RANGE 63 #error TASK_SIZE_USER64 exceeds pagetable range 64 #endif 65 66 #ifdef CONFIG_PPC_STD_MMU_64 67 #if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT)) 68 #error TASK_SIZE_USER64 exceeds user VSID range 69 #endif 70 #endif 71 72 #ifdef CONFIG_PPC_BOOK3S_64 73 /* 74 * partition table and process table for ISA 3.0 75 */ 76 struct prtb_entry *process_tb; 77 struct patb_entry *partition_tb; 78 #endif 79 unsigned long ioremap_bot = IOREMAP_BASE; 80 81 /** 82 * __ioremap_at - Low level function to establish the page tables 83 * for an IO mapping 84 */ 85 void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size, 86 unsigned long flags) 87 { 88 unsigned long i; 89 90 /* Make sure we have the base flags */ 91 if ((flags & _PAGE_PRESENT) == 0) 92 flags |= pgprot_val(PAGE_KERNEL); 93 94 /* We don't support the 4K PFN hack with ioremap */ 95 if (flags & H_PAGE_4K_PFN) 96 return NULL; 97 98 WARN_ON(pa & ~PAGE_MASK); 99 WARN_ON(((unsigned long)ea) & ~PAGE_MASK); 100 WARN_ON(size & ~PAGE_MASK); 101 102 for (i = 0; i < size; i += PAGE_SIZE) 103 if (map_kernel_page((unsigned long)ea+i, pa+i, flags)) 104 return NULL; 105 106 return (void __iomem *)ea; 107 } 108 109 /** 110 * __iounmap_from - Low level function to tear down the page tables 111 * for an IO mapping. This is used for mappings that 112 * are manipulated manually, like partial unmapping of 113 * PCI IOs or ISA space. 114 */ 115 void __iounmap_at(void *ea, unsigned long size) 116 { 117 WARN_ON(((unsigned long)ea) & ~PAGE_MASK); 118 WARN_ON(size & ~PAGE_MASK); 119 120 unmap_kernel_range((unsigned long)ea, size); 121 } 122 123 void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size, 124 unsigned long flags, void *caller) 125 { 126 phys_addr_t paligned; 127 void __iomem *ret; 128 129 /* 130 * Choose an address to map it to. 131 * Once the imalloc system is running, we use it. 132 * Before that, we map using addresses going 133 * up from ioremap_bot. imalloc will use 134 * the addresses from ioremap_bot through 135 * IMALLOC_END 136 * 137 */ 138 paligned = addr & PAGE_MASK; 139 size = PAGE_ALIGN(addr + size) - paligned; 140 141 if ((size == 0) || (paligned == 0)) 142 return NULL; 143 144 if (slab_is_available()) { 145 struct vm_struct *area; 146 147 area = __get_vm_area_caller(size, VM_IOREMAP, 148 ioremap_bot, IOREMAP_END, 149 caller); 150 if (area == NULL) 151 return NULL; 152 153 area->phys_addr = paligned; 154 ret = __ioremap_at(paligned, area->addr, size, flags); 155 if (!ret) 156 vunmap(area->addr); 157 } else { 158 ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags); 159 if (ret) 160 ioremap_bot += size; 161 } 162 163 if (ret) 164 ret += addr & ~PAGE_MASK; 165 return ret; 166 } 167 168 void __iomem * __ioremap(phys_addr_t addr, unsigned long size, 169 unsigned long flags) 170 { 171 return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); 172 } 173 174 void __iomem * ioremap(phys_addr_t addr, unsigned long size) 175 { 176 unsigned long flags = pgprot_val(pgprot_noncached(__pgprot(0))); 177 void *caller = __builtin_return_address(0); 178 179 if (ppc_md.ioremap) 180 return ppc_md.ioremap(addr, size, flags, caller); 181 return __ioremap_caller(addr, size, flags, caller); 182 } 183 184 void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size) 185 { 186 unsigned long flags = pgprot_val(pgprot_noncached_wc(__pgprot(0))); 187 void *caller = __builtin_return_address(0); 188 189 if (ppc_md.ioremap) 190 return ppc_md.ioremap(addr, size, flags, caller); 191 return __ioremap_caller(addr, size, flags, caller); 192 } 193 194 void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size, 195 unsigned long flags) 196 { 197 void *caller = __builtin_return_address(0); 198 199 /* writeable implies dirty for kernel addresses */ 200 if (flags & _PAGE_WRITE) 201 flags |= _PAGE_DIRTY; 202 203 /* we don't want to let _PAGE_EXEC leak out */ 204 flags &= ~_PAGE_EXEC; 205 /* 206 * Force kernel mapping. 207 */ 208 #if defined(CONFIG_PPC_BOOK3S_64) 209 flags |= _PAGE_PRIVILEGED; 210 #else 211 flags &= ~_PAGE_USER; 212 #endif 213 214 215 #ifdef _PAGE_BAP_SR 216 /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format 217 * which means that we just cleared supervisor access... oops ;-) This 218 * restores it 219 */ 220 flags |= _PAGE_BAP_SR; 221 #endif 222 223 if (ppc_md.ioremap) 224 return ppc_md.ioremap(addr, size, flags, caller); 225 return __ioremap_caller(addr, size, flags, caller); 226 } 227 228 229 /* 230 * Unmap an IO region and remove it from imalloc'd list. 231 * Access to IO memory should be serialized by driver. 232 */ 233 void __iounmap(volatile void __iomem *token) 234 { 235 void *addr; 236 237 if (!slab_is_available()) 238 return; 239 240 addr = (void *) ((unsigned long __force) 241 PCI_FIX_ADDR(token) & PAGE_MASK); 242 if ((unsigned long)addr < ioremap_bot) { 243 printk(KERN_WARNING "Attempt to iounmap early bolted mapping" 244 " at 0x%p\n", addr); 245 return; 246 } 247 vunmap(addr); 248 } 249 250 void iounmap(volatile void __iomem *token) 251 { 252 if (ppc_md.iounmap) 253 ppc_md.iounmap(token); 254 else 255 __iounmap(token); 256 } 257 258 EXPORT_SYMBOL(ioremap); 259 EXPORT_SYMBOL(ioremap_wc); 260 EXPORT_SYMBOL(ioremap_prot); 261 EXPORT_SYMBOL(__ioremap); 262 EXPORT_SYMBOL(__ioremap_at); 263 EXPORT_SYMBOL(iounmap); 264 EXPORT_SYMBOL(__iounmap); 265 EXPORT_SYMBOL(__iounmap_at); 266 267 #ifndef __PAGETABLE_PUD_FOLDED 268 /* 4 level page table */ 269 struct page *pgd_page(pgd_t pgd) 270 { 271 if (pgd_huge(pgd)) 272 return pte_page(pgd_pte(pgd)); 273 return virt_to_page(pgd_page_vaddr(pgd)); 274 } 275 #endif 276 277 struct page *pud_page(pud_t pud) 278 { 279 if (pud_huge(pud)) 280 return pte_page(pud_pte(pud)); 281 return virt_to_page(pud_page_vaddr(pud)); 282 } 283 284 /* 285 * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags 286 * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address. 287 */ 288 struct page *pmd_page(pmd_t pmd) 289 { 290 if (pmd_trans_huge(pmd) || pmd_huge(pmd)) 291 return pte_page(pmd_pte(pmd)); 292 return virt_to_page(pmd_page_vaddr(pmd)); 293 } 294 295 #ifdef CONFIG_PPC_64K_PAGES 296 static pte_t *get_from_cache(struct mm_struct *mm) 297 { 298 void *pte_frag, *ret; 299 300 spin_lock(&mm->page_table_lock); 301 ret = mm->context.pte_frag; 302 if (ret) { 303 pte_frag = ret + PTE_FRAG_SIZE; 304 /* 305 * If we have taken up all the fragments mark PTE page NULL 306 */ 307 if (((unsigned long)pte_frag & ~PAGE_MASK) == 0) 308 pte_frag = NULL; 309 mm->context.pte_frag = pte_frag; 310 } 311 spin_unlock(&mm->page_table_lock); 312 return (pte_t *)ret; 313 } 314 315 static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel) 316 { 317 void *ret = NULL; 318 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | 319 __GFP_REPEAT | __GFP_ZERO); 320 if (!page) 321 return NULL; 322 if (!kernel && !pgtable_page_ctor(page)) { 323 __free_page(page); 324 return NULL; 325 } 326 327 ret = page_address(page); 328 spin_lock(&mm->page_table_lock); 329 /* 330 * If we find pgtable_page set, we return 331 * the allocated page with single fragement 332 * count. 333 */ 334 if (likely(!mm->context.pte_frag)) { 335 set_page_count(page, PTE_FRAG_NR); 336 mm->context.pte_frag = ret + PTE_FRAG_SIZE; 337 } 338 spin_unlock(&mm->page_table_lock); 339 340 return (pte_t *)ret; 341 } 342 343 pte_t *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel) 344 { 345 pte_t *pte; 346 347 pte = get_from_cache(mm); 348 if (pte) 349 return pte; 350 351 return __alloc_for_cache(mm, kernel); 352 } 353 354 void page_table_free(struct mm_struct *mm, unsigned long *table, int kernel) 355 { 356 struct page *page = virt_to_page(table); 357 if (put_page_testzero(page)) { 358 if (!kernel) 359 pgtable_page_dtor(page); 360 free_hot_cold_page(page, 0); 361 } 362 } 363 364 #ifdef CONFIG_SMP 365 static void page_table_free_rcu(void *table) 366 { 367 struct page *page = virt_to_page(table); 368 if (put_page_testzero(page)) { 369 pgtable_page_dtor(page); 370 free_hot_cold_page(page, 0); 371 } 372 } 373 374 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) 375 { 376 unsigned long pgf = (unsigned long)table; 377 378 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); 379 pgf |= shift; 380 tlb_remove_table(tlb, (void *)pgf); 381 } 382 383 void __tlb_remove_table(void *_table) 384 { 385 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE); 386 unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE; 387 388 if (!shift) 389 /* PTE page needs special handling */ 390 page_table_free_rcu(table); 391 else { 392 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); 393 kmem_cache_free(PGT_CACHE(shift), table); 394 } 395 } 396 #else 397 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) 398 { 399 if (!shift) { 400 /* PTE page needs special handling */ 401 struct page *page = virt_to_page(table); 402 if (put_page_testzero(page)) { 403 pgtable_page_dtor(page); 404 free_hot_cold_page(page, 0); 405 } 406 } else { 407 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); 408 kmem_cache_free(PGT_CACHE(shift), table); 409 } 410 } 411 #endif 412 #endif /* CONFIG_PPC_64K_PAGES */ 413 414 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 415 416 /* 417 * This is called when relaxing access to a hugepage. It's also called in the page 418 * fault path when we don't hit any of the major fault cases, ie, a minor 419 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have 420 * handled those two for us, we additionally deal with missing execute 421 * permission here on some processors 422 */ 423 int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, 424 pmd_t *pmdp, pmd_t entry, int dirty) 425 { 426 int changed; 427 #ifdef CONFIG_DEBUG_VM 428 WARN_ON(!pmd_trans_huge(*pmdp)); 429 assert_spin_locked(&vma->vm_mm->page_table_lock); 430 #endif 431 changed = !pmd_same(*(pmdp), entry); 432 if (changed) { 433 __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry)); 434 /* 435 * Since we are not supporting SW TLB systems, we don't 436 * have any thing similar to flush_tlb_page_nohash() 437 */ 438 } 439 return changed; 440 } 441 442 unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, 443 pmd_t *pmdp, unsigned long clr, 444 unsigned long set) 445 { 446 447 __be64 old_be, tmp; 448 unsigned long old; 449 450 #ifdef CONFIG_DEBUG_VM 451 WARN_ON(!pmd_trans_huge(*pmdp)); 452 assert_spin_locked(&mm->page_table_lock); 453 #endif 454 455 __asm__ __volatile__( 456 "1: ldarx %0,0,%3\n\ 457 and. %1,%0,%6\n\ 458 bne- 1b \n\ 459 andc %1,%0,%4 \n\ 460 or %1,%1,%7\n\ 461 stdcx. %1,0,%3 \n\ 462 bne- 1b" 463 : "=&r" (old_be), "=&r" (tmp), "=m" (*pmdp) 464 : "r" (pmdp), "r" (cpu_to_be64(clr)), "m" (*pmdp), 465 "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set)) 466 : "cc" ); 467 468 old = be64_to_cpu(old_be); 469 470 trace_hugepage_update(addr, old, clr, set); 471 if (old & H_PAGE_HASHPTE) 472 hpte_do_hugepage_flush(mm, addr, pmdp, old); 473 return old; 474 } 475 476 pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, 477 pmd_t *pmdp) 478 { 479 pmd_t pmd; 480 481 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 482 VM_BUG_ON(pmd_trans_huge(*pmdp)); 483 484 pmd = *pmdp; 485 pmd_clear(pmdp); 486 /* 487 * Wait for all pending hash_page to finish. This is needed 488 * in case of subpage collapse. When we collapse normal pages 489 * to hugepage, we first clear the pmd, then invalidate all 490 * the PTE entries. The assumption here is that any low level 491 * page fault will see a none pmd and take the slow path that 492 * will wait on mmap_sem. But we could very well be in a 493 * hash_page with local ptep pointer value. Such a hash page 494 * can result in adding new HPTE entries for normal subpages. 495 * That means we could be modifying the page content as we 496 * copy them to a huge page. So wait for parallel hash_page 497 * to finish before invalidating HPTE entries. We can do this 498 * by sending an IPI to all the cpus and executing a dummy 499 * function there. 500 */ 501 kick_all_cpus_sync(); 502 /* 503 * Now invalidate the hpte entries in the range 504 * covered by pmd. This make sure we take a 505 * fault and will find the pmd as none, which will 506 * result in a major fault which takes mmap_sem and 507 * hence wait for collapse to complete. Without this 508 * the __collapse_huge_page_copy can result in copying 509 * the old content. 510 */ 511 flush_tlb_pmd_range(vma->vm_mm, &pmd, address); 512 return pmd; 513 } 514 515 /* 516 * We currently remove entries from the hashtable regardless of whether 517 * the entry was young or dirty. 518 * 519 * We should be more intelligent about this but for the moment we override 520 * these functions and force a tlb flush unconditionally 521 */ 522 int pmdp_test_and_clear_young(struct vm_area_struct *vma, 523 unsigned long address, pmd_t *pmdp) 524 { 525 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp); 526 } 527 528 /* 529 * We want to put the pgtable in pmd and use pgtable for tracking 530 * the base page size hptes 531 */ 532 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 533 pgtable_t pgtable) 534 { 535 pgtable_t *pgtable_slot; 536 assert_spin_locked(&mm->page_table_lock); 537 /* 538 * we store the pgtable in the second half of PMD 539 */ 540 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; 541 *pgtable_slot = pgtable; 542 /* 543 * expose the deposited pgtable to other cpus. 544 * before we set the hugepage PTE at pmd level 545 * hash fault code looks at the deposted pgtable 546 * to store hash index values. 547 */ 548 smp_wmb(); 549 } 550 551 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) 552 { 553 pgtable_t pgtable; 554 pgtable_t *pgtable_slot; 555 556 assert_spin_locked(&mm->page_table_lock); 557 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; 558 pgtable = *pgtable_slot; 559 /* 560 * Once we withdraw, mark the entry NULL. 561 */ 562 *pgtable_slot = NULL; 563 /* 564 * We store HPTE information in the deposited PTE fragment. 565 * zero out the content on withdraw. 566 */ 567 memset(pgtable, 0, PTE_FRAG_SIZE); 568 return pgtable; 569 } 570 571 void pmdp_huge_split_prepare(struct vm_area_struct *vma, 572 unsigned long address, pmd_t *pmdp) 573 { 574 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 575 VM_BUG_ON(REGION_ID(address) != USER_REGION_ID); 576 577 /* 578 * We can't mark the pmd none here, because that will cause a race 579 * against exit_mmap. We need to continue mark pmd TRANS HUGE, while 580 * we spilt, but at the same time we wan't rest of the ppc64 code 581 * not to insert hash pte on this, because we will be modifying 582 * the deposited pgtable in the caller of this function. Hence 583 * clear the _PAGE_USER so that we move the fault handling to 584 * higher level function and that will serialize against ptl. 585 * We need to flush existing hash pte entries here even though, 586 * the translation is still valid, because we will withdraw 587 * pgtable_t after this. 588 */ 589 pmd_hugepage_update(vma->vm_mm, address, pmdp, 0, _PAGE_PRIVILEGED); 590 } 591 592 593 /* 594 * set a new huge pmd. We should not be called for updating 595 * an existing pmd entry. That should go via pmd_hugepage_update. 596 */ 597 void set_pmd_at(struct mm_struct *mm, unsigned long addr, 598 pmd_t *pmdp, pmd_t pmd) 599 { 600 #ifdef CONFIG_DEBUG_VM 601 WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp))); 602 assert_spin_locked(&mm->page_table_lock); 603 WARN_ON(!pmd_trans_huge(pmd)); 604 #endif 605 trace_hugepage_set_pmd(addr, pmd_val(pmd)); 606 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); 607 } 608 609 /* 610 * We use this to invalidate a pmdp entry before switching from a 611 * hugepte to regular pmd entry. 612 */ 613 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 614 pmd_t *pmdp) 615 { 616 pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); 617 618 /* 619 * This ensures that generic code that rely on IRQ disabling 620 * to prevent a parallel THP split work as expected. 621 */ 622 kick_all_cpus_sync(); 623 } 624 625 /* 626 * A linux hugepage PMD was changed and the corresponding hash table entries 627 * neesd to be flushed. 628 */ 629 void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, 630 pmd_t *pmdp, unsigned long old_pmd) 631 { 632 int ssize; 633 unsigned int psize; 634 unsigned long vsid; 635 unsigned long flags = 0; 636 const struct cpumask *tmp; 637 638 /* get the base page size,vsid and segment size */ 639 #ifdef CONFIG_DEBUG_VM 640 psize = get_slice_psize(mm, addr); 641 BUG_ON(psize == MMU_PAGE_16M); 642 #endif 643 if (old_pmd & H_PAGE_COMBO) 644 psize = MMU_PAGE_4K; 645 else 646 psize = MMU_PAGE_64K; 647 648 if (!is_kernel_addr(addr)) { 649 ssize = user_segment_size(addr); 650 vsid = get_vsid(mm->context.id, addr, ssize); 651 WARN_ON(vsid == 0); 652 } else { 653 vsid = get_kernel_vsid(addr, mmu_kernel_ssize); 654 ssize = mmu_kernel_ssize; 655 } 656 657 tmp = cpumask_of(smp_processor_id()); 658 if (cpumask_equal(mm_cpumask(mm), tmp)) 659 flags |= HPTE_LOCAL_UPDATE; 660 661 return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags); 662 } 663 664 static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) 665 { 666 return __pmd(pmd_val(pmd) | pgprot_val(pgprot)); 667 } 668 669 pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot) 670 { 671 unsigned long pmdv; 672 673 pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK; 674 return pmd_set_protbits(__pmd(pmdv), pgprot); 675 } 676 677 pmd_t mk_pmd(struct page *page, pgprot_t pgprot) 678 { 679 return pfn_pmd(page_to_pfn(page), pgprot); 680 } 681 682 pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 683 { 684 unsigned long pmdv; 685 686 pmdv = pmd_val(pmd); 687 pmdv &= _HPAGE_CHG_MASK; 688 return pmd_set_protbits(__pmd(pmdv), newprot); 689 } 690 691 /* 692 * This is called at the end of handling a user page fault, when the 693 * fault has been handled by updating a HUGE PMD entry in the linux page tables. 694 * We use it to preload an HPTE into the hash table corresponding to 695 * the updated linux HUGE PMD entry. 696 */ 697 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, 698 pmd_t *pmd) 699 { 700 return; 701 } 702 703 pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 704 unsigned long addr, pmd_t *pmdp) 705 { 706 pmd_t old_pmd; 707 pgtable_t pgtable; 708 unsigned long old; 709 pgtable_t *pgtable_slot; 710 711 old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0); 712 old_pmd = __pmd(old); 713 /* 714 * We have pmd == none and we are holding page_table_lock. 715 * So we can safely go and clear the pgtable hash 716 * index info. 717 */ 718 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; 719 pgtable = *pgtable_slot; 720 /* 721 * Let's zero out old valid and hash index details 722 * hash fault look at them. 723 */ 724 memset(pgtable, 0, PTE_FRAG_SIZE); 725 /* 726 * Serialize against find_linux_pte_or_hugepte which does lock-less 727 * lookup in page tables with local interrupts disabled. For huge pages 728 * it casts pmd_t to pte_t. Since format of pte_t is different from 729 * pmd_t we want to prevent transit from pmd pointing to page table 730 * to pmd pointing to huge page (and back) while interrupts are disabled. 731 * We clear pmd to possibly replace it with page table pointer in 732 * different code paths. So make sure we wait for the parallel 733 * find_linux_pte_or_hugepage to finish. 734 */ 735 kick_all_cpus_sync(); 736 return old_pmd; 737 } 738 739 int has_transparent_hugepage(void) 740 { 741 742 BUILD_BUG_ON_MSG((PMD_SHIFT - PAGE_SHIFT) >= MAX_ORDER, 743 "hugepages can't be allocated by the buddy allocator"); 744 745 BUILD_BUG_ON_MSG((PMD_SHIFT - PAGE_SHIFT) < 2, 746 "We need more than 2 pages to do deferred thp split"); 747 748 if (!mmu_has_feature(MMU_FTR_16M_PAGE)) 749 return 0; 750 /* 751 * We support THP only if PMD_SIZE is 16MB. 752 */ 753 if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT) 754 return 0; 755 /* 756 * We need to make sure that we support 16MB hugepage in a segement 757 * with base page size 64K or 4K. We only enable THP with a PAGE_SIZE 758 * of 64K. 759 */ 760 /* 761 * If we have 64K HPTE, we will be using that by default 762 */ 763 if (mmu_psize_defs[MMU_PAGE_64K].shift && 764 (mmu_psize_defs[MMU_PAGE_64K].penc[MMU_PAGE_16M] == -1)) 765 return 0; 766 /* 767 * Ok we only have 4K HPTE 768 */ 769 if (mmu_psize_defs[MMU_PAGE_4K].penc[MMU_PAGE_16M] == -1) 770 return 0; 771 772 return 1; 773 } 774 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 775