1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2e2cda322SAndrea Arcangeli /* 3e2cda322SAndrea Arcangeli * mm/pgtable-generic.c 4e2cda322SAndrea Arcangeli * 5ca5999fdSMike Rapoport * Generic pgtable methods declared in linux/pgtable.h 6e2cda322SAndrea Arcangeli * 7e2cda322SAndrea Arcangeli * Copyright (C) 2010 Linus Torvalds 8e2cda322SAndrea Arcangeli */ 9e2cda322SAndrea Arcangeli 10f95ba941SAndrew Morton #include <linux/pagemap.h> 11a31acd3eSPeter Zijlstra #include <linux/hugetlb.h> 12ca5999fdSMike Rapoport #include <linux/pgtable.h> 130d940a9bSHugh Dickins #include <linux/swap.h> 140d940a9bSHugh Dickins #include <linux/swapops.h> 1536090defSArnd Bergmann #include <linux/mm_inline.h> 1613cf577eSHugh Dickins #include <asm/pgalloc.h> 17e2cda322SAndrea Arcangeli #include <asm/tlb.h> 18e2cda322SAndrea Arcangeli 19bc4b4448SJoonsoo Kim /* 20bc4b4448SJoonsoo Kim * If a p?d_bad entry is found while walking page tables, report 21bc4b4448SJoonsoo Kim * the error, before resetting entry to p?d_none. Usually (but 22bc4b4448SJoonsoo Kim * very seldom) called out from the p?d_none_or_clear_bad macros. 23bc4b4448SJoonsoo Kim */ 24bc4b4448SJoonsoo Kim 25bc4b4448SJoonsoo Kim void pgd_clear_bad(pgd_t *pgd) 26bc4b4448SJoonsoo Kim { 27bc4b4448SJoonsoo Kim pgd_ERROR(*pgd); 28bc4b4448SJoonsoo Kim pgd_clear(pgd); 29bc4b4448SJoonsoo Kim } 30bc4b4448SJoonsoo Kim 31f2400abcSVineet Gupta #ifndef __PAGETABLE_P4D_FOLDED 32c2febafcSKirill A. Shutemov void p4d_clear_bad(p4d_t *p4d) 33c2febafcSKirill A. Shutemov { 34c2febafcSKirill A. Shutemov p4d_ERROR(*p4d); 35c2febafcSKirill A. Shutemov p4d_clear(p4d); 36c2febafcSKirill A. Shutemov } 37f2400abcSVineet Gupta #endif 38c2febafcSKirill A. Shutemov 39f2400abcSVineet Gupta #ifndef __PAGETABLE_PUD_FOLDED 40bc4b4448SJoonsoo Kim void pud_clear_bad(pud_t *pud) 41bc4b4448SJoonsoo Kim { 42bc4b4448SJoonsoo Kim pud_ERROR(*pud); 43bc4b4448SJoonsoo Kim pud_clear(pud); 44bc4b4448SJoonsoo Kim } 45f2400abcSVineet Gupta #endif 46bc4b4448SJoonsoo Kim 47f2400abcSVineet Gupta /* 48f2400abcSVineet Gupta * Note that the pmd variant below can't be stub'ed out just as for p4d/pud 49f2400abcSVineet Gupta * above. pmd folding is special and typically pmd_* macros refer to upper 50f2400abcSVineet Gupta * level even when folded 51f2400abcSVineet Gupta */ 52bc4b4448SJoonsoo Kim void pmd_clear_bad(pmd_t *pmd) 53bc4b4448SJoonsoo Kim { 54bc4b4448SJoonsoo Kim pmd_ERROR(*pmd); 55bc4b4448SJoonsoo Kim pmd_clear(pmd); 56bc4b4448SJoonsoo Kim } 57bc4b4448SJoonsoo Kim 58e2cda322SAndrea Arcangeli #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 59e2cda322SAndrea Arcangeli /* 60cef23d9dSRik van Riel * Only sets the access flags (dirty, accessed), as well as write 61cef23d9dSRik van Riel * permission. Furthermore, we know it always gets set to a "more 62e2cda322SAndrea Arcangeli * permissive" setting, which allows most architectures to optimize 63e2cda322SAndrea Arcangeli * this. We return whether the PTE actually changed, which in turn 64e2cda322SAndrea Arcangeli * instructs the caller to do things like update__mmu_cache. This 65e2cda322SAndrea Arcangeli * used to be done in the caller, but sparc needs minor faults to 66e2cda322SAndrea Arcangeli * force that call on sun4c so we changed this macro slightly 67e2cda322SAndrea Arcangeli */ 68e2cda322SAndrea Arcangeli int ptep_set_access_flags(struct vm_area_struct *vma, 69e2cda322SAndrea Arcangeli unsigned long address, pte_t *ptep, 70e2cda322SAndrea Arcangeli pte_t entry, int dirty) 71e2cda322SAndrea Arcangeli { 72c33c7948SRyan Roberts int changed = !pte_same(ptep_get(ptep), entry); 73e2cda322SAndrea Arcangeli if (changed) { 74e2cda322SAndrea Arcangeli set_pte_at(vma->vm_mm, address, ptep, entry); 7599c29133SGerald Schaefer flush_tlb_fix_spurious_fault(vma, address, ptep); 76e2cda322SAndrea Arcangeli } 77e2cda322SAndrea Arcangeli return changed; 78e2cda322SAndrea Arcangeli } 79e2cda322SAndrea Arcangeli #endif 80e2cda322SAndrea Arcangeli 8152585bccSVineet Gupta #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 8252585bccSVineet Gupta int ptep_clear_flush_young(struct vm_area_struct *vma, 8352585bccSVineet Gupta unsigned long address, pte_t *ptep) 8452585bccSVineet Gupta { 8552585bccSVineet Gupta int young; 8652585bccSVineet Gupta young = ptep_test_and_clear_young(vma, address, ptep); 8752585bccSVineet Gupta if (young) 8852585bccSVineet Gupta flush_tlb_page(vma, address); 8952585bccSVineet Gupta return young; 9052585bccSVineet Gupta } 9152585bccSVineet Gupta #endif 9252585bccSVineet Gupta 9352585bccSVineet Gupta #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH 9452585bccSVineet Gupta pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, 9552585bccSVineet Gupta pte_t *ptep) 9652585bccSVineet Gupta { 9752585bccSVineet Gupta struct mm_struct *mm = (vma)->vm_mm; 9852585bccSVineet Gupta pte_t pte; 9952585bccSVineet Gupta pte = ptep_get_and_clear(mm, address, ptep); 10052585bccSVineet Gupta if (pte_accessible(mm, pte)) 10152585bccSVineet Gupta flush_tlb_page(vma, address); 10252585bccSVineet Gupta return pte; 10352585bccSVineet Gupta } 10452585bccSVineet Gupta #endif 10552585bccSVineet Gupta 106bd5e88adSVineet Gupta #ifdef CONFIG_TRANSPARENT_HUGEPAGE 107bd5e88adSVineet Gupta 108e2cda322SAndrea Arcangeli #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 109e2cda322SAndrea Arcangeli int pmdp_set_access_flags(struct vm_area_struct *vma, 110e2cda322SAndrea Arcangeli unsigned long address, pmd_t *pmdp, 111e2cda322SAndrea Arcangeli pmd_t entry, int dirty) 112e2cda322SAndrea Arcangeli { 113e2cda322SAndrea Arcangeli int changed = !pmd_same(*pmdp, entry); 114e2cda322SAndrea Arcangeli VM_BUG_ON(address & ~HPAGE_PMD_MASK); 115e2cda322SAndrea Arcangeli if (changed) { 116e2cda322SAndrea Arcangeli set_pmd_at(vma->vm_mm, address, pmdp, entry); 11712ebc158SVineet Gupta flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 118e2cda322SAndrea Arcangeli } 119e2cda322SAndrea Arcangeli return changed; 120e2cda322SAndrea Arcangeli } 121e2cda322SAndrea Arcangeli #endif 122e2cda322SAndrea Arcangeli 123e2cda322SAndrea Arcangeli #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 124e2cda322SAndrea Arcangeli int pmdp_clear_flush_young(struct vm_area_struct *vma, 125e2cda322SAndrea Arcangeli unsigned long address, pmd_t *pmdp) 126e2cda322SAndrea Arcangeli { 127e2cda322SAndrea Arcangeli int young; 128d8c37c48SNaoya Horiguchi VM_BUG_ON(address & ~HPAGE_PMD_MASK); 129e2cda322SAndrea Arcangeli young = pmdp_test_and_clear_young(vma, address, pmdp); 130e2cda322SAndrea Arcangeli if (young) 13112ebc158SVineet Gupta flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 132e2cda322SAndrea Arcangeli return young; 133e2cda322SAndrea Arcangeli } 134e2cda322SAndrea Arcangeli #endif 135e2cda322SAndrea Arcangeli 1368809aa2dSAneesh Kumar K.V #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH 1378809aa2dSAneesh Kumar K.V pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, 138e2cda322SAndrea Arcangeli pmd_t *pmdp) 139e2cda322SAndrea Arcangeli { 140e2cda322SAndrea Arcangeli pmd_t pmd; 141e2cda322SAndrea Arcangeli VM_BUG_ON(address & ~HPAGE_PMD_MASK); 14299fa8a48SHugh Dickins VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) && 14399fa8a48SHugh Dickins !pmd_devmap(*pmdp)); 1448809aa2dSAneesh Kumar K.V pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); 14512ebc158SVineet Gupta flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 146e2cda322SAndrea Arcangeli return pmd; 147e2cda322SAndrea Arcangeli } 148a00cc7d9SMatthew Wilcox 149a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 150a00cc7d9SMatthew Wilcox pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, 151a00cc7d9SMatthew Wilcox pud_t *pudp) 152a00cc7d9SMatthew Wilcox { 153a00cc7d9SMatthew Wilcox pud_t pud; 154a00cc7d9SMatthew Wilcox 155a00cc7d9SMatthew Wilcox VM_BUG_ON(address & ~HPAGE_PUD_MASK); 156a00cc7d9SMatthew Wilcox VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp)); 157a00cc7d9SMatthew Wilcox pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp); 158a00cc7d9SMatthew Wilcox flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE); 159a00cc7d9SMatthew Wilcox return pud; 160a00cc7d9SMatthew Wilcox } 161a00cc7d9SMatthew Wilcox #endif 162e2cda322SAndrea Arcangeli #endif 163e2cda322SAndrea Arcangeli 164e3ebcf64SGerald Schaefer #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT 1656b0b50b0SAneesh Kumar K.V void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 1666b0b50b0SAneesh Kumar K.V pgtable_t pgtable) 167e3ebcf64SGerald Schaefer { 168c4088ebdSKirill A. Shutemov assert_spin_locked(pmd_lockptr(mm, pmdp)); 169e3ebcf64SGerald Schaefer 170e3ebcf64SGerald Schaefer /* FIFO */ 171c389a250SKirill A. Shutemov if (!pmd_huge_pte(mm, pmdp)) 172e3ebcf64SGerald Schaefer INIT_LIST_HEAD(&pgtable->lru); 173e3ebcf64SGerald Schaefer else 174c389a250SKirill A. Shutemov list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru); 175c389a250SKirill A. Shutemov pmd_huge_pte(mm, pmdp) = pgtable; 176e3ebcf64SGerald Schaefer } 177e3ebcf64SGerald Schaefer #endif 178e3ebcf64SGerald Schaefer 179e3ebcf64SGerald Schaefer #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW 180e3ebcf64SGerald Schaefer /* no "address" argument so destroys page coloring of some arch */ 1816b0b50b0SAneesh Kumar K.V pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) 182e3ebcf64SGerald Schaefer { 183e3ebcf64SGerald Schaefer pgtable_t pgtable; 184e3ebcf64SGerald Schaefer 185c4088ebdSKirill A. Shutemov assert_spin_locked(pmd_lockptr(mm, pmdp)); 186e3ebcf64SGerald Schaefer 187e3ebcf64SGerald Schaefer /* FIFO */ 188c389a250SKirill A. Shutemov pgtable = pmd_huge_pte(mm, pmdp); 18914669347SGeliang Tang pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru, 190e3ebcf64SGerald Schaefer struct page, lru); 19114669347SGeliang Tang if (pmd_huge_pte(mm, pmdp)) 192e3ebcf64SGerald Schaefer list_del(&pgtable->lru); 193e3ebcf64SGerald Schaefer return pgtable; 194e3ebcf64SGerald Schaefer } 195e3ebcf64SGerald Schaefer #endif 19646dcde73SGerald Schaefer 19746dcde73SGerald Schaefer #ifndef __HAVE_ARCH_PMDP_INVALIDATE 198d52605d7SKirill A. Shutemov pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 19946dcde73SGerald Schaefer pmd_t *pmdp) 20046dcde73SGerald Schaefer { 201*be0ce3f6SRyan Roberts VM_WARN_ON_ONCE(!pmd_present(*pmdp)); 20286ec2da0SAnshuman Khandual pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp)); 20312ebc158SVineet Gupta flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 204d52605d7SKirill A. Shutemov return old; 20546dcde73SGerald Schaefer } 20646dcde73SGerald Schaefer #endif 207f28b6ff8SAneesh Kumar K.V 2084f831457SNadav Amit #ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD 2094f831457SNadav Amit pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address, 2104f831457SNadav Amit pmd_t *pmdp) 2114f831457SNadav Amit { 212*be0ce3f6SRyan Roberts VM_WARN_ON_ONCE(!pmd_present(*pmdp)); 2134f831457SNadav Amit return pmdp_invalidate(vma, address, pmdp); 2144f831457SNadav Amit } 2154f831457SNadav Amit #endif 2164f831457SNadav Amit 217f28b6ff8SAneesh Kumar K.V #ifndef pmdp_collapse_flush 218f28b6ff8SAneesh Kumar K.V pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, 219f28b6ff8SAneesh Kumar K.V pmd_t *pmdp) 220f28b6ff8SAneesh Kumar K.V { 2218809aa2dSAneesh Kumar K.V /* 2228809aa2dSAneesh Kumar K.V * pmd and hugepage pte format are same. So we could 2238809aa2dSAneesh Kumar K.V * use the same function. 2248809aa2dSAneesh Kumar K.V */ 225f28b6ff8SAneesh Kumar K.V pmd_t pmd; 226f28b6ff8SAneesh Kumar K.V 227f28b6ff8SAneesh Kumar K.V VM_BUG_ON(address & ~HPAGE_PMD_MASK); 228f28b6ff8SAneesh Kumar K.V VM_BUG_ON(pmd_trans_huge(*pmdp)); 2298809aa2dSAneesh Kumar K.V pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); 2306a6ac72fSVineet Gupta 2316a6ac72fSVineet Gupta /* collapse entails shooting down ptes not pmd */ 2326a6ac72fSVineet Gupta flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 233f28b6ff8SAneesh Kumar K.V return pmd; 234f28b6ff8SAneesh Kumar K.V } 235f28b6ff8SAneesh Kumar K.V #endif 23613cf577eSHugh Dickins 23713cf577eSHugh Dickins /* arch define pte_free_defer in asm/pgalloc.h for its own implementation */ 23813cf577eSHugh Dickins #ifndef pte_free_defer 23913cf577eSHugh Dickins static void pte_free_now(struct rcu_head *head) 24013cf577eSHugh Dickins { 24113cf577eSHugh Dickins struct page *page; 24213cf577eSHugh Dickins 24313cf577eSHugh Dickins page = container_of(head, struct page, rcu_head); 24413cf577eSHugh Dickins pte_free(NULL /* mm not passed and not used */, (pgtable_t)page); 24513cf577eSHugh Dickins } 24613cf577eSHugh Dickins 24713cf577eSHugh Dickins void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable) 24813cf577eSHugh Dickins { 24913cf577eSHugh Dickins struct page *page; 25013cf577eSHugh Dickins 25113cf577eSHugh Dickins page = pgtable; 25213cf577eSHugh Dickins call_rcu(&page->rcu_head, pte_free_now); 25313cf577eSHugh Dickins } 25413cf577eSHugh Dickins #endif /* pte_free_defer */ 255bd5e88adSVineet Gupta #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 2560d940a9bSHugh Dickins 257146b42e0SHugh Dickins #if defined(CONFIG_GUP_GET_PXX_LOW_HIGH) && \ 258146b42e0SHugh Dickins (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RCU)) 259146b42e0SHugh Dickins /* 260146b42e0SHugh Dickins * See the comment above ptep_get_lockless() in include/linux/pgtable.h: 261146b42e0SHugh Dickins * the barriers in pmdp_get_lockless() cannot guarantee that the value in 262146b42e0SHugh Dickins * pmd_high actually belongs with the value in pmd_low; but holding interrupts 263146b42e0SHugh Dickins * off blocks the TLB flush between present updates, which guarantees that a 264146b42e0SHugh Dickins * successful __pte_offset_map() points to a page from matched halves. 265146b42e0SHugh Dickins */ 266146b42e0SHugh Dickins static unsigned long pmdp_get_lockless_start(void) 267146b42e0SHugh Dickins { 268146b42e0SHugh Dickins unsigned long irqflags; 269146b42e0SHugh Dickins 270146b42e0SHugh Dickins local_irq_save(irqflags); 271146b42e0SHugh Dickins return irqflags; 272146b42e0SHugh Dickins } 273146b42e0SHugh Dickins static void pmdp_get_lockless_end(unsigned long irqflags) 274146b42e0SHugh Dickins { 275146b42e0SHugh Dickins local_irq_restore(irqflags); 276146b42e0SHugh Dickins } 277146b42e0SHugh Dickins #else 278146b42e0SHugh Dickins static unsigned long pmdp_get_lockless_start(void) { return 0; } 279146b42e0SHugh Dickins static void pmdp_get_lockless_end(unsigned long irqflags) { } 280146b42e0SHugh Dickins #endif 281146b42e0SHugh Dickins 2820d940a9bSHugh Dickins pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp) 2830d940a9bSHugh Dickins { 284146b42e0SHugh Dickins unsigned long irqflags; 2850d940a9bSHugh Dickins pmd_t pmdval; 2860d940a9bSHugh Dickins 287a349d72fSHugh Dickins rcu_read_lock(); 288146b42e0SHugh Dickins irqflags = pmdp_get_lockless_start(); 2890d940a9bSHugh Dickins pmdval = pmdp_get_lockless(pmd); 290146b42e0SHugh Dickins pmdp_get_lockless_end(irqflags); 291146b42e0SHugh Dickins 2920d940a9bSHugh Dickins if (pmdvalp) 2930d940a9bSHugh Dickins *pmdvalp = pmdval; 2940d940a9bSHugh Dickins if (unlikely(pmd_none(pmdval) || is_pmd_migration_entry(pmdval))) 2950d940a9bSHugh Dickins goto nomap; 2960d940a9bSHugh Dickins if (unlikely(pmd_trans_huge(pmdval) || pmd_devmap(pmdval))) 2970d940a9bSHugh Dickins goto nomap; 2980d940a9bSHugh Dickins if (unlikely(pmd_bad(pmdval))) { 2990d940a9bSHugh Dickins pmd_clear_bad(pmd); 3000d940a9bSHugh Dickins goto nomap; 3010d940a9bSHugh Dickins } 3020d940a9bSHugh Dickins return __pte_map(&pmdval, addr); 3030d940a9bSHugh Dickins nomap: 304a349d72fSHugh Dickins rcu_read_unlock(); 3050d940a9bSHugh Dickins return NULL; 3060d940a9bSHugh Dickins } 3070d940a9bSHugh Dickins 3080d940a9bSHugh Dickins pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd, 3090d940a9bSHugh Dickins unsigned long addr, spinlock_t **ptlp) 3100d940a9bSHugh Dickins { 3110d940a9bSHugh Dickins pmd_t pmdval; 3120d940a9bSHugh Dickins pte_t *pte; 3130d940a9bSHugh Dickins 3140d940a9bSHugh Dickins pte = __pte_offset_map(pmd, addr, &pmdval); 3150d940a9bSHugh Dickins if (likely(pte)) 3160d940a9bSHugh Dickins *ptlp = pte_lockptr(mm, &pmdval); 3170d940a9bSHugh Dickins return pte; 3180d940a9bSHugh Dickins } 3190d940a9bSHugh Dickins 320610d0657SHugh Dickins /* 321610d0657SHugh Dickins * pte_offset_map_lock(mm, pmd, addr, ptlp), and its internal implementation 322610d0657SHugh Dickins * __pte_offset_map_lock() below, is usually called with the pmd pointer for 323610d0657SHugh Dickins * addr, reached by walking down the mm's pgd, p4d, pud for addr: either while 324610d0657SHugh Dickins * holding mmap_lock or vma lock for read or for write; or in truncate or rmap 325610d0657SHugh Dickins * context, while holding file's i_mmap_lock or anon_vma lock for read (or for 326610d0657SHugh Dickins * write). In a few cases, it may be used with pmd pointing to a pmd_t already 327610d0657SHugh Dickins * copied to or constructed on the stack. 328610d0657SHugh Dickins * 329610d0657SHugh Dickins * When successful, it returns the pte pointer for addr, with its page table 330610d0657SHugh Dickins * kmapped if necessary (when CONFIG_HIGHPTE), and locked against concurrent 331610d0657SHugh Dickins * modification by software, with a pointer to that spinlock in ptlp (in some 332610d0657SHugh Dickins * configs mm->page_table_lock, in SPLIT_PTLOCK configs a spinlock in table's 333610d0657SHugh Dickins * struct page). pte_unmap_unlock(pte, ptl) to unlock and unmap afterwards. 334610d0657SHugh Dickins * 335610d0657SHugh Dickins * But it is unsuccessful, returning NULL with *ptlp unchanged, if there is no 336610d0657SHugh Dickins * page table at *pmd: if, for example, the page table has just been removed, 337610d0657SHugh Dickins * or replaced by the huge pmd of a THP. (When successful, *pmd is rechecked 338610d0657SHugh Dickins * after acquiring the ptlock, and retried internally if it changed: so that a 339610d0657SHugh Dickins * page table can be safely removed or replaced by THP while holding its lock.) 340610d0657SHugh Dickins * 341610d0657SHugh Dickins * pte_offset_map(pmd, addr), and its internal helper __pte_offset_map() above, 342610d0657SHugh Dickins * just returns the pte pointer for addr, its page table kmapped if necessary; 343610d0657SHugh Dickins * or NULL if there is no page table at *pmd. It does not attempt to lock the 344610d0657SHugh Dickins * page table, so cannot normally be used when the page table is to be updated, 345610d0657SHugh Dickins * or when entries read must be stable. But it does take rcu_read_lock(): so 346610d0657SHugh Dickins * that even when page table is racily removed, it remains a valid though empty 347610d0657SHugh Dickins * and disconnected table. Until pte_unmap(pte) unmaps and rcu_read_unlock()s 348610d0657SHugh Dickins * afterwards. 349610d0657SHugh Dickins * 350610d0657SHugh Dickins * pte_offset_map_nolock(mm, pmd, addr, ptlp), above, is like pte_offset_map(); 351610d0657SHugh Dickins * but when successful, it also outputs a pointer to the spinlock in ptlp - as 352610d0657SHugh Dickins * pte_offset_map_lock() does, but in this case without locking it. This helps 353610d0657SHugh Dickins * the caller to avoid a later pte_lockptr(mm, *pmd), which might by that time 354610d0657SHugh Dickins * act on a changed *pmd: pte_offset_map_nolock() provides the correct spinlock 355610d0657SHugh Dickins * pointer for the page table that it returns. In principle, the caller should 356610d0657SHugh Dickins * recheck *pmd once the lock is taken; in practice, no callsite needs that - 357610d0657SHugh Dickins * either the mmap_lock for write, or pte_same() check on contents, is enough. 358610d0657SHugh Dickins * 359610d0657SHugh Dickins * Note that free_pgtables(), used after unmapping detached vmas, or when 360610d0657SHugh Dickins * exiting the whole mm, does not take page table lock before freeing a page 361610d0657SHugh Dickins * table, and may not use RCU at all: "outsiders" like khugepaged should avoid 362610d0657SHugh Dickins * pte_offset_map() and co once the vma is detached from mm or mm_users is zero. 363610d0657SHugh Dickins */ 3640d940a9bSHugh Dickins pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd, 3650d940a9bSHugh Dickins unsigned long addr, spinlock_t **ptlp) 3660d940a9bSHugh Dickins { 3670d940a9bSHugh Dickins spinlock_t *ptl; 3680d940a9bSHugh Dickins pmd_t pmdval; 3690d940a9bSHugh Dickins pte_t *pte; 3700d940a9bSHugh Dickins again: 3710d940a9bSHugh Dickins pte = __pte_offset_map(pmd, addr, &pmdval); 3720d940a9bSHugh Dickins if (unlikely(!pte)) 3730d940a9bSHugh Dickins return pte; 3740d940a9bSHugh Dickins ptl = pte_lockptr(mm, &pmdval); 3750d940a9bSHugh Dickins spin_lock(ptl); 3760d940a9bSHugh Dickins if (likely(pmd_same(pmdval, pmdp_get_lockless(pmd)))) { 3770d940a9bSHugh Dickins *ptlp = ptl; 3780d940a9bSHugh Dickins return pte; 3790d940a9bSHugh Dickins } 3800d940a9bSHugh Dickins pte_unmap_unlock(pte, ptl); 3810d940a9bSHugh Dickins goto again; 3820d940a9bSHugh Dickins } 383