1*b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2e2cda322SAndrea Arcangeli /* 3e2cda322SAndrea Arcangeli * mm/pgtable-generic.c 4e2cda322SAndrea Arcangeli * 5e2cda322SAndrea Arcangeli * Generic pgtable methods declared in asm-generic/pgtable.h 6e2cda322SAndrea Arcangeli * 7e2cda322SAndrea Arcangeli * Copyright (C) 2010 Linus Torvalds 8e2cda322SAndrea Arcangeli */ 9e2cda322SAndrea Arcangeli 10f95ba941SAndrew Morton #include <linux/pagemap.h> 11e2cda322SAndrea Arcangeli #include <asm/tlb.h> 12e2cda322SAndrea Arcangeli #include <asm-generic/pgtable.h> 13e2cda322SAndrea Arcangeli 14bc4b4448SJoonsoo Kim /* 15bc4b4448SJoonsoo Kim * If a p?d_bad entry is found while walking page tables, report 16bc4b4448SJoonsoo Kim * the error, before resetting entry to p?d_none. Usually (but 17bc4b4448SJoonsoo Kim * very seldom) called out from the p?d_none_or_clear_bad macros. 18bc4b4448SJoonsoo Kim */ 19bc4b4448SJoonsoo Kim 20bc4b4448SJoonsoo Kim void pgd_clear_bad(pgd_t *pgd) 21bc4b4448SJoonsoo Kim { 22bc4b4448SJoonsoo Kim pgd_ERROR(*pgd); 23bc4b4448SJoonsoo Kim pgd_clear(pgd); 24bc4b4448SJoonsoo Kim } 25bc4b4448SJoonsoo Kim 26c2febafcSKirill A. Shutemov void p4d_clear_bad(p4d_t *p4d) 27c2febafcSKirill A. Shutemov { 28c2febafcSKirill A. Shutemov p4d_ERROR(*p4d); 29c2febafcSKirill A. Shutemov p4d_clear(p4d); 30c2febafcSKirill A. Shutemov } 31c2febafcSKirill A. Shutemov 32bc4b4448SJoonsoo Kim void pud_clear_bad(pud_t *pud) 33bc4b4448SJoonsoo Kim { 34bc4b4448SJoonsoo Kim pud_ERROR(*pud); 35bc4b4448SJoonsoo Kim pud_clear(pud); 36bc4b4448SJoonsoo Kim } 37bc4b4448SJoonsoo Kim 38bc4b4448SJoonsoo Kim void pmd_clear_bad(pmd_t *pmd) 39bc4b4448SJoonsoo Kim { 40bc4b4448SJoonsoo Kim pmd_ERROR(*pmd); 41bc4b4448SJoonsoo Kim pmd_clear(pmd); 42bc4b4448SJoonsoo Kim } 43bc4b4448SJoonsoo Kim 44e2cda322SAndrea Arcangeli #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 45e2cda322SAndrea Arcangeli /* 46cef23d9dSRik van Riel * Only sets the access flags (dirty, accessed), as well as write 47cef23d9dSRik van Riel * permission. Furthermore, we know it always gets set to a "more 48e2cda322SAndrea Arcangeli * permissive" setting, which allows most architectures to optimize 49e2cda322SAndrea Arcangeli * this. We return whether the PTE actually changed, which in turn 50e2cda322SAndrea Arcangeli * instructs the caller to do things like update__mmu_cache. This 51e2cda322SAndrea Arcangeli * used to be done in the caller, but sparc needs minor faults to 52e2cda322SAndrea Arcangeli * force that call on sun4c so we changed this macro slightly 53e2cda322SAndrea Arcangeli */ 54e2cda322SAndrea Arcangeli int ptep_set_access_flags(struct vm_area_struct *vma, 55e2cda322SAndrea Arcangeli unsigned long address, pte_t *ptep, 56e2cda322SAndrea Arcangeli pte_t entry, int dirty) 57e2cda322SAndrea Arcangeli { 58e2cda322SAndrea Arcangeli int changed = !pte_same(*ptep, entry); 59e2cda322SAndrea Arcangeli if (changed) { 60e2cda322SAndrea Arcangeli set_pte_at(vma->vm_mm, address, ptep, entry); 61cef23d9dSRik van Riel flush_tlb_fix_spurious_fault(vma, address); 62e2cda322SAndrea Arcangeli } 63e2cda322SAndrea Arcangeli return changed; 64e2cda322SAndrea Arcangeli } 65e2cda322SAndrea Arcangeli #endif 66e2cda322SAndrea Arcangeli 6752585bccSVineet Gupta #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 6852585bccSVineet Gupta int ptep_clear_flush_young(struct vm_area_struct *vma, 6952585bccSVineet Gupta unsigned long address, pte_t *ptep) 7052585bccSVineet Gupta { 7152585bccSVineet Gupta int young; 7252585bccSVineet Gupta young = ptep_test_and_clear_young(vma, address, ptep); 7352585bccSVineet Gupta if (young) 7452585bccSVineet Gupta flush_tlb_page(vma, address); 7552585bccSVineet Gupta return young; 7652585bccSVineet Gupta } 7752585bccSVineet Gupta #endif 7852585bccSVineet Gupta 7952585bccSVineet Gupta #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH 8052585bccSVineet Gupta pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, 8152585bccSVineet Gupta pte_t *ptep) 8252585bccSVineet Gupta { 8352585bccSVineet Gupta struct mm_struct *mm = (vma)->vm_mm; 8452585bccSVineet Gupta pte_t pte; 8552585bccSVineet Gupta pte = ptep_get_and_clear(mm, address, ptep); 8652585bccSVineet Gupta if (pte_accessible(mm, pte)) 8752585bccSVineet Gupta flush_tlb_page(vma, address); 8852585bccSVineet Gupta return pte; 8952585bccSVineet Gupta } 9052585bccSVineet Gupta #endif 9152585bccSVineet Gupta 92bd5e88adSVineet Gupta #ifdef CONFIG_TRANSPARENT_HUGEPAGE 93bd5e88adSVineet Gupta 94e2cda322SAndrea Arcangeli #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 95e2cda322SAndrea Arcangeli int pmdp_set_access_flags(struct vm_area_struct *vma, 96e2cda322SAndrea Arcangeli unsigned long address, pmd_t *pmdp, 97e2cda322SAndrea Arcangeli pmd_t entry, int dirty) 98e2cda322SAndrea Arcangeli { 99e2cda322SAndrea Arcangeli int changed = !pmd_same(*pmdp, entry); 100e2cda322SAndrea Arcangeli VM_BUG_ON(address & ~HPAGE_PMD_MASK); 101e2cda322SAndrea Arcangeli if (changed) { 102e2cda322SAndrea Arcangeli set_pmd_at(vma->vm_mm, address, pmdp, entry); 10312ebc158SVineet Gupta flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 104e2cda322SAndrea Arcangeli } 105e2cda322SAndrea Arcangeli return changed; 106e2cda322SAndrea Arcangeli } 107e2cda322SAndrea Arcangeli #endif 108e2cda322SAndrea Arcangeli 109e2cda322SAndrea Arcangeli #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 110e2cda322SAndrea Arcangeli int pmdp_clear_flush_young(struct vm_area_struct *vma, 111e2cda322SAndrea Arcangeli unsigned long address, pmd_t *pmdp) 112e2cda322SAndrea Arcangeli { 113e2cda322SAndrea Arcangeli int young; 114d8c37c48SNaoya Horiguchi VM_BUG_ON(address & ~HPAGE_PMD_MASK); 115e2cda322SAndrea Arcangeli young = pmdp_test_and_clear_young(vma, address, pmdp); 116e2cda322SAndrea Arcangeli if (young) 11712ebc158SVineet Gupta flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 118e2cda322SAndrea Arcangeli return young; 119e2cda322SAndrea Arcangeli } 120e2cda322SAndrea Arcangeli #endif 121e2cda322SAndrea Arcangeli 1228809aa2dSAneesh Kumar K.V #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH 1238809aa2dSAneesh Kumar K.V pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, 124e2cda322SAndrea Arcangeli pmd_t *pmdp) 125e2cda322SAndrea Arcangeli { 126e2cda322SAndrea Arcangeli pmd_t pmd; 127e2cda322SAndrea Arcangeli VM_BUG_ON(address & ~HPAGE_PMD_MASK); 128616b8371SZi Yan VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) && 129616b8371SZi Yan !pmd_devmap(*pmdp)) || !pmd_present(*pmdp)); 1308809aa2dSAneesh Kumar K.V pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); 13112ebc158SVineet Gupta flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 132e2cda322SAndrea Arcangeli return pmd; 133e2cda322SAndrea Arcangeli } 134a00cc7d9SMatthew Wilcox 135a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 136a00cc7d9SMatthew Wilcox pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, 137a00cc7d9SMatthew Wilcox pud_t *pudp) 138a00cc7d9SMatthew Wilcox { 139a00cc7d9SMatthew Wilcox pud_t pud; 140a00cc7d9SMatthew Wilcox 141a00cc7d9SMatthew Wilcox VM_BUG_ON(address & ~HPAGE_PUD_MASK); 142a00cc7d9SMatthew Wilcox VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp)); 143a00cc7d9SMatthew Wilcox pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp); 144a00cc7d9SMatthew Wilcox flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE); 145a00cc7d9SMatthew Wilcox return pud; 146a00cc7d9SMatthew Wilcox } 147a00cc7d9SMatthew Wilcox #endif 148e2cda322SAndrea Arcangeli #endif 149e2cda322SAndrea Arcangeli 150e3ebcf64SGerald Schaefer #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT 1516b0b50b0SAneesh Kumar K.V void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 1526b0b50b0SAneesh Kumar K.V pgtable_t pgtable) 153e3ebcf64SGerald Schaefer { 154c4088ebdSKirill A. Shutemov assert_spin_locked(pmd_lockptr(mm, pmdp)); 155e3ebcf64SGerald Schaefer 156e3ebcf64SGerald Schaefer /* FIFO */ 157c389a250SKirill A. Shutemov if (!pmd_huge_pte(mm, pmdp)) 158e3ebcf64SGerald Schaefer INIT_LIST_HEAD(&pgtable->lru); 159e3ebcf64SGerald Schaefer else 160c389a250SKirill A. Shutemov list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru); 161c389a250SKirill A. Shutemov pmd_huge_pte(mm, pmdp) = pgtable; 162e3ebcf64SGerald Schaefer } 163e3ebcf64SGerald Schaefer #endif 164e3ebcf64SGerald Schaefer 165e3ebcf64SGerald Schaefer #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW 166e3ebcf64SGerald Schaefer /* no "address" argument so destroys page coloring of some arch */ 1676b0b50b0SAneesh Kumar K.V pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) 168e3ebcf64SGerald Schaefer { 169e3ebcf64SGerald Schaefer pgtable_t pgtable; 170e3ebcf64SGerald Schaefer 171c4088ebdSKirill A. Shutemov assert_spin_locked(pmd_lockptr(mm, pmdp)); 172e3ebcf64SGerald Schaefer 173e3ebcf64SGerald Schaefer /* FIFO */ 174c389a250SKirill A. Shutemov pgtable = pmd_huge_pte(mm, pmdp); 17514669347SGeliang Tang pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru, 176e3ebcf64SGerald Schaefer struct page, lru); 17714669347SGeliang Tang if (pmd_huge_pte(mm, pmdp)) 178e3ebcf64SGerald Schaefer list_del(&pgtable->lru); 179e3ebcf64SGerald Schaefer return pgtable; 180e3ebcf64SGerald Schaefer } 181e3ebcf64SGerald Schaefer #endif 18246dcde73SGerald Schaefer 18346dcde73SGerald Schaefer #ifndef __HAVE_ARCH_PMDP_INVALIDATE 18446dcde73SGerald Schaefer void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 18546dcde73SGerald Schaefer pmd_t *pmdp) 18646dcde73SGerald Schaefer { 18767f87463SMel Gorman pmd_t entry = *pmdp; 188ce8369bcSMatthew Wilcox set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry)); 18912ebc158SVineet Gupta flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 19046dcde73SGerald Schaefer } 19146dcde73SGerald Schaefer #endif 192f28b6ff8SAneesh Kumar K.V 193f28b6ff8SAneesh Kumar K.V #ifndef pmdp_collapse_flush 194f28b6ff8SAneesh Kumar K.V pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, 195f28b6ff8SAneesh Kumar K.V pmd_t *pmdp) 196f28b6ff8SAneesh Kumar K.V { 1978809aa2dSAneesh Kumar K.V /* 1988809aa2dSAneesh Kumar K.V * pmd and hugepage pte format are same. So we could 1998809aa2dSAneesh Kumar K.V * use the same function. 2008809aa2dSAneesh Kumar K.V */ 201f28b6ff8SAneesh Kumar K.V pmd_t pmd; 202f28b6ff8SAneesh Kumar K.V 203f28b6ff8SAneesh Kumar K.V VM_BUG_ON(address & ~HPAGE_PMD_MASK); 204f28b6ff8SAneesh Kumar K.V VM_BUG_ON(pmd_trans_huge(*pmdp)); 2058809aa2dSAneesh Kumar K.V pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); 2066a6ac72fSVineet Gupta 2076a6ac72fSVineet Gupta /* collapse entails shooting down ptes not pmd */ 2086a6ac72fSVineet Gupta flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 209f28b6ff8SAneesh Kumar K.V return pmd; 210f28b6ff8SAneesh Kumar K.V } 211f28b6ff8SAneesh Kumar K.V #endif 212bd5e88adSVineet Gupta #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 213