1 /* 2 * mm/pgtable-generic.c 3 * 4 * Generic pgtable methods declared in asm-generic/pgtable.h 5 * 6 * Copyright (C) 2010 Linus Torvalds 7 */ 8 9 #include <linux/pagemap.h> 10 #include <asm/tlb.h> 11 #include <asm-generic/pgtable.h> 12 13 /* 14 * If a p?d_bad entry is found while walking page tables, report 15 * the error, before resetting entry to p?d_none. Usually (but 16 * very seldom) called out from the p?d_none_or_clear_bad macros. 17 */ 18 19 void pgd_clear_bad(pgd_t *pgd) 20 { 21 pgd_ERROR(*pgd); 22 pgd_clear(pgd); 23 } 24 25 void p4d_clear_bad(p4d_t *p4d) 26 { 27 p4d_ERROR(*p4d); 28 p4d_clear(p4d); 29 } 30 31 void pud_clear_bad(pud_t *pud) 32 { 33 pud_ERROR(*pud); 34 pud_clear(pud); 35 } 36 37 void pmd_clear_bad(pmd_t *pmd) 38 { 39 pmd_ERROR(*pmd); 40 pmd_clear(pmd); 41 } 42 43 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 44 /* 45 * Only sets the access flags (dirty, accessed), as well as write 46 * permission. Furthermore, we know it always gets set to a "more 47 * permissive" setting, which allows most architectures to optimize 48 * this. We return whether the PTE actually changed, which in turn 49 * instructs the caller to do things like update__mmu_cache. This 50 * used to be done in the caller, but sparc needs minor faults to 51 * force that call on sun4c so we changed this macro slightly 52 */ 53 int ptep_set_access_flags(struct vm_area_struct *vma, 54 unsigned long address, pte_t *ptep, 55 pte_t entry, int dirty) 56 { 57 int changed = !pte_same(*ptep, entry); 58 if (changed) { 59 set_pte_at(vma->vm_mm, address, ptep, entry); 60 flush_tlb_fix_spurious_fault(vma, address); 61 } 62 return changed; 63 } 64 #endif 65 66 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 67 int ptep_clear_flush_young(struct vm_area_struct *vma, 68 unsigned long address, pte_t *ptep) 69 { 70 int young; 71 young = ptep_test_and_clear_young(vma, address, ptep); 72 if (young) 73 flush_tlb_page(vma, address); 74 return young; 75 } 76 #endif 77 78 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH 79 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, 80 pte_t *ptep) 81 { 82 struct mm_struct *mm = (vma)->vm_mm; 83 pte_t pte; 84 pte = ptep_get_and_clear(mm, address, ptep); 85 if (pte_accessible(mm, pte)) 86 flush_tlb_page(vma, address); 87 return pte; 88 } 89 #endif 90 91 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 92 93 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 94 int pmdp_set_access_flags(struct vm_area_struct *vma, 95 unsigned long address, pmd_t *pmdp, 96 pmd_t entry, int dirty) 97 { 98 int changed = !pmd_same(*pmdp, entry); 99 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 100 if (changed) { 101 set_pmd_at(vma->vm_mm, address, pmdp, entry); 102 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 103 } 104 return changed; 105 } 106 #endif 107 108 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 109 int pmdp_clear_flush_young(struct vm_area_struct *vma, 110 unsigned long address, pmd_t *pmdp) 111 { 112 int young; 113 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 114 young = pmdp_test_and_clear_young(vma, address, pmdp); 115 if (young) 116 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 117 return young; 118 } 119 #endif 120 121 #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH 122 pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, 123 pmd_t *pmdp) 124 { 125 pmd_t pmd; 126 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 127 VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) && 128 !pmd_devmap(*pmdp)) || !pmd_present(*pmdp)); 129 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); 130 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 131 return pmd; 132 } 133 134 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 135 pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, 136 pud_t *pudp) 137 { 138 pud_t pud; 139 140 VM_BUG_ON(address & ~HPAGE_PUD_MASK); 141 VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp)); 142 pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp); 143 flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE); 144 return pud; 145 } 146 #endif 147 #endif 148 149 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT 150 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 151 pgtable_t pgtable) 152 { 153 assert_spin_locked(pmd_lockptr(mm, pmdp)); 154 155 /* FIFO */ 156 if (!pmd_huge_pte(mm, pmdp)) 157 INIT_LIST_HEAD(&pgtable->lru); 158 else 159 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru); 160 pmd_huge_pte(mm, pmdp) = pgtable; 161 } 162 #endif 163 164 #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW 165 /* no "address" argument so destroys page coloring of some arch */ 166 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) 167 { 168 pgtable_t pgtable; 169 170 assert_spin_locked(pmd_lockptr(mm, pmdp)); 171 172 /* FIFO */ 173 pgtable = pmd_huge_pte(mm, pmdp); 174 pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru, 175 struct page, lru); 176 if (pmd_huge_pte(mm, pmdp)) 177 list_del(&pgtable->lru); 178 return pgtable; 179 } 180 #endif 181 182 #ifndef __HAVE_ARCH_PMDP_INVALIDATE 183 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 184 pmd_t *pmdp) 185 { 186 pmd_t entry = *pmdp; 187 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry)); 188 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 189 } 190 #endif 191 192 #ifndef pmdp_collapse_flush 193 pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, 194 pmd_t *pmdp) 195 { 196 /* 197 * pmd and hugepage pte format are same. So we could 198 * use the same function. 199 */ 200 pmd_t pmd; 201 202 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 203 VM_BUG_ON(pmd_trans_huge(*pmdp)); 204 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); 205 206 /* collapse entails shooting down ptes not pmd */ 207 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 208 return pmd; 209 } 210 #endif 211 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 212