xref: /openbmc/linux/mm/pgtable-generic.c (revision 483eb062)
1 /*
2  *  mm/pgtable-generic.c
3  *
4  *  Generic pgtable methods declared in asm-generic/pgtable.h
5  *
6  *  Copyright (C) 2010  Linus Torvalds
7  */
8 
9 #include <linux/pagemap.h>
10 #include <asm/tlb.h>
11 #include <asm-generic/pgtable.h>
12 
13 /*
14  * If a p?d_bad entry is found while walking page tables, report
15  * the error, before resetting entry to p?d_none.  Usually (but
16  * very seldom) called out from the p?d_none_or_clear_bad macros.
17  */
18 
19 void pgd_clear_bad(pgd_t *pgd)
20 {
21 	pgd_ERROR(*pgd);
22 	pgd_clear(pgd);
23 }
24 
25 void pud_clear_bad(pud_t *pud)
26 {
27 	pud_ERROR(*pud);
28 	pud_clear(pud);
29 }
30 
31 void pmd_clear_bad(pmd_t *pmd)
32 {
33 	pmd_ERROR(*pmd);
34 	pmd_clear(pmd);
35 }
36 
37 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
38 /*
39  * Only sets the access flags (dirty, accessed), as well as write
40  * permission. Furthermore, we know it always gets set to a "more
41  * permissive" setting, which allows most architectures to optimize
42  * this. We return whether the PTE actually changed, which in turn
43  * instructs the caller to do things like update__mmu_cache.  This
44  * used to be done in the caller, but sparc needs minor faults to
45  * force that call on sun4c so we changed this macro slightly
46  */
47 int ptep_set_access_flags(struct vm_area_struct *vma,
48 			  unsigned long address, pte_t *ptep,
49 			  pte_t entry, int dirty)
50 {
51 	int changed = !pte_same(*ptep, entry);
52 	if (changed) {
53 		set_pte_at(vma->vm_mm, address, ptep, entry);
54 		flush_tlb_fix_spurious_fault(vma, address);
55 	}
56 	return changed;
57 }
58 #endif
59 
60 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
61 int pmdp_set_access_flags(struct vm_area_struct *vma,
62 			  unsigned long address, pmd_t *pmdp,
63 			  pmd_t entry, int dirty)
64 {
65 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
66 	int changed = !pmd_same(*pmdp, entry);
67 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
68 	if (changed) {
69 		set_pmd_at(vma->vm_mm, address, pmdp, entry);
70 		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
71 	}
72 	return changed;
73 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
74 	BUG();
75 	return 0;
76 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
77 }
78 #endif
79 
80 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
81 int ptep_clear_flush_young(struct vm_area_struct *vma,
82 			   unsigned long address, pte_t *ptep)
83 {
84 	int young;
85 	young = ptep_test_and_clear_young(vma, address, ptep);
86 	if (young)
87 		flush_tlb_page(vma, address);
88 	return young;
89 }
90 #endif
91 
92 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
93 int pmdp_clear_flush_young(struct vm_area_struct *vma,
94 			   unsigned long address, pmd_t *pmdp)
95 {
96 	int young;
97 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
98 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
99 #else
100 	BUG();
101 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
102 	young = pmdp_test_and_clear_young(vma, address, pmdp);
103 	if (young)
104 		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
105 	return young;
106 }
107 #endif
108 
109 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
110 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
111 		       pte_t *ptep)
112 {
113 	struct mm_struct *mm = (vma)->vm_mm;
114 	pte_t pte;
115 	pte = ptep_get_and_clear(mm, address, ptep);
116 	if (pte_accessible(mm, pte))
117 		flush_tlb_page(vma, address);
118 	return pte;
119 }
120 #endif
121 
122 #ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
123 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
124 pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
125 		       pmd_t *pmdp)
126 {
127 	pmd_t pmd;
128 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
129 	pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp);
130 	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
131 	return pmd;
132 }
133 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
134 #endif
135 
136 #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
137 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
138 void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
139 			  pmd_t *pmdp)
140 {
141 	pmd_t pmd = pmd_mksplitting(*pmdp);
142 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
143 	set_pmd_at(vma->vm_mm, address, pmdp, pmd);
144 	/* tlb flush only to serialize against gup-fast */
145 	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
146 }
147 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
148 #endif
149 
150 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
151 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
152 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
153 				pgtable_t pgtable)
154 {
155 	assert_spin_locked(pmd_lockptr(mm, pmdp));
156 
157 	/* FIFO */
158 	if (!pmd_huge_pte(mm, pmdp))
159 		INIT_LIST_HEAD(&pgtable->lru);
160 	else
161 		list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
162 	pmd_huge_pte(mm, pmdp) = pgtable;
163 }
164 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
165 #endif
166 
167 #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
168 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
169 /* no "address" argument so destroys page coloring of some arch */
170 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
171 {
172 	pgtable_t pgtable;
173 
174 	assert_spin_locked(pmd_lockptr(mm, pmdp));
175 
176 	/* FIFO */
177 	pgtable = pmd_huge_pte(mm, pmdp);
178 	if (list_empty(&pgtable->lru))
179 		pmd_huge_pte(mm, pmdp) = NULL;
180 	else {
181 		pmd_huge_pte(mm, pmdp) = list_entry(pgtable->lru.next,
182 					      struct page, lru);
183 		list_del(&pgtable->lru);
184 	}
185 	return pgtable;
186 }
187 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
188 #endif
189 
190 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
191 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
192 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
193 		     pmd_t *pmdp)
194 {
195 	pmd_t entry = *pmdp;
196 	if (pmd_numa(entry))
197 		entry = pmd_mknonnuma(entry);
198 	set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp));
199 	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
200 }
201 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
202 #endif
203