1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * mm/pgtable-generic.c 4 * 5 * Generic pgtable methods declared in asm-generic/pgtable.h 6 * 7 * Copyright (C) 2010 Linus Torvalds 8 */ 9 10 #include <linux/pagemap.h> 11 #include <asm/tlb.h> 12 #include <asm-generic/pgtable.h> 13 14 /* 15 * If a p?d_bad entry is found while walking page tables, report 16 * the error, before resetting entry to p?d_none. Usually (but 17 * very seldom) called out from the p?d_none_or_clear_bad macros. 18 */ 19 20 void pgd_clear_bad(pgd_t *pgd) 21 { 22 pgd_ERROR(*pgd); 23 pgd_clear(pgd); 24 } 25 26 void p4d_clear_bad(p4d_t *p4d) 27 { 28 p4d_ERROR(*p4d); 29 p4d_clear(p4d); 30 } 31 32 void pud_clear_bad(pud_t *pud) 33 { 34 pud_ERROR(*pud); 35 pud_clear(pud); 36 } 37 38 void pmd_clear_bad(pmd_t *pmd) 39 { 40 pmd_ERROR(*pmd); 41 pmd_clear(pmd); 42 } 43 44 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 45 /* 46 * Only sets the access flags (dirty, accessed), as well as write 47 * permission. Furthermore, we know it always gets set to a "more 48 * permissive" setting, which allows most architectures to optimize 49 * this. We return whether the PTE actually changed, which in turn 50 * instructs the caller to do things like update__mmu_cache. This 51 * used to be done in the caller, but sparc needs minor faults to 52 * force that call on sun4c so we changed this macro slightly 53 */ 54 int ptep_set_access_flags(struct vm_area_struct *vma, 55 unsigned long address, pte_t *ptep, 56 pte_t entry, int dirty) 57 { 58 int changed = !pte_same(*ptep, entry); 59 if (changed) { 60 set_pte_at(vma->vm_mm, address, ptep, entry); 61 flush_tlb_fix_spurious_fault(vma, address); 62 } 63 return changed; 64 } 65 #endif 66 67 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 68 int ptep_clear_flush_young(struct vm_area_struct *vma, 69 unsigned long address, pte_t *ptep) 70 { 71 int young; 72 young = ptep_test_and_clear_young(vma, address, ptep); 73 if (young) 74 flush_tlb_page(vma, address); 75 return young; 76 } 77 #endif 78 79 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH 80 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, 81 pte_t *ptep) 82 { 83 struct mm_struct *mm = (vma)->vm_mm; 84 pte_t pte; 85 pte = ptep_get_and_clear(mm, address, ptep); 86 if (pte_accessible(mm, pte)) 87 flush_tlb_page(vma, address); 88 return pte; 89 } 90 #endif 91 92 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 93 94 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 95 int pmdp_set_access_flags(struct vm_area_struct *vma, 96 unsigned long address, pmd_t *pmdp, 97 pmd_t entry, int dirty) 98 { 99 int changed = !pmd_same(*pmdp, entry); 100 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 101 if (changed) { 102 set_pmd_at(vma->vm_mm, address, pmdp, entry); 103 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 104 } 105 return changed; 106 } 107 #endif 108 109 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 110 int pmdp_clear_flush_young(struct vm_area_struct *vma, 111 unsigned long address, pmd_t *pmdp) 112 { 113 int young; 114 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 115 young = pmdp_test_and_clear_young(vma, address, pmdp); 116 if (young) 117 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 118 return young; 119 } 120 #endif 121 122 #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH 123 pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, 124 pmd_t *pmdp) 125 { 126 pmd_t pmd; 127 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 128 VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) && 129 !pmd_devmap(*pmdp)) || !pmd_present(*pmdp)); 130 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); 131 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 132 return pmd; 133 } 134 135 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 136 pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, 137 pud_t *pudp) 138 { 139 pud_t pud; 140 141 VM_BUG_ON(address & ~HPAGE_PUD_MASK); 142 VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp)); 143 pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp); 144 flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE); 145 return pud; 146 } 147 #endif 148 #endif 149 150 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT 151 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 152 pgtable_t pgtable) 153 { 154 assert_spin_locked(pmd_lockptr(mm, pmdp)); 155 156 /* FIFO */ 157 if (!pmd_huge_pte(mm, pmdp)) 158 INIT_LIST_HEAD(&pgtable->lru); 159 else 160 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru); 161 pmd_huge_pte(mm, pmdp) = pgtable; 162 } 163 #endif 164 165 #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW 166 /* no "address" argument so destroys page coloring of some arch */ 167 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) 168 { 169 pgtable_t pgtable; 170 171 assert_spin_locked(pmd_lockptr(mm, pmdp)); 172 173 /* FIFO */ 174 pgtable = pmd_huge_pte(mm, pmdp); 175 pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru, 176 struct page, lru); 177 if (pmd_huge_pte(mm, pmdp)) 178 list_del(&pgtable->lru); 179 return pgtable; 180 } 181 #endif 182 183 #ifndef __HAVE_ARCH_PMDP_INVALIDATE 184 pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 185 pmd_t *pmdp) 186 { 187 pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mknotpresent(*pmdp)); 188 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 189 return old; 190 } 191 #endif 192 193 #ifndef pmdp_collapse_flush 194 pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, 195 pmd_t *pmdp) 196 { 197 /* 198 * pmd and hugepage pte format are same. So we could 199 * use the same function. 200 */ 201 pmd_t pmd; 202 203 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 204 VM_BUG_ON(pmd_trans_huge(*pmdp)); 205 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); 206 207 /* collapse entails shooting down ptes not pmd */ 208 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 209 return pmd; 210 } 211 #endif 212 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 213