xref: /openbmc/linux/mm/pgtable-generic.c (revision a8da474e)
1 /*
2  *  mm/pgtable-generic.c
3  *
4  *  Generic pgtable methods declared in asm-generic/pgtable.h
5  *
6  *  Copyright (C) 2010  Linus Torvalds
7  */
8 
9 #include <linux/pagemap.h>
10 #include <asm/tlb.h>
11 #include <asm-generic/pgtable.h>
12 
13 /*
14  * If a p?d_bad entry is found while walking page tables, report
15  * the error, before resetting entry to p?d_none.  Usually (but
16  * very seldom) called out from the p?d_none_or_clear_bad macros.
17  */
18 
19 void pgd_clear_bad(pgd_t *pgd)
20 {
21 	pgd_ERROR(*pgd);
22 	pgd_clear(pgd);
23 }
24 
25 void pud_clear_bad(pud_t *pud)
26 {
27 	pud_ERROR(*pud);
28 	pud_clear(pud);
29 }
30 
31 void pmd_clear_bad(pmd_t *pmd)
32 {
33 	pmd_ERROR(*pmd);
34 	pmd_clear(pmd);
35 }
36 
37 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
38 /*
39  * Only sets the access flags (dirty, accessed), as well as write
40  * permission. Furthermore, we know it always gets set to a "more
41  * permissive" setting, which allows most architectures to optimize
42  * this. We return whether the PTE actually changed, which in turn
43  * instructs the caller to do things like update__mmu_cache.  This
44  * used to be done in the caller, but sparc needs minor faults to
45  * force that call on sun4c so we changed this macro slightly
46  */
47 int ptep_set_access_flags(struct vm_area_struct *vma,
48 			  unsigned long address, pte_t *ptep,
49 			  pte_t entry, int dirty)
50 {
51 	int changed = !pte_same(*ptep, entry);
52 	if (changed) {
53 		set_pte_at(vma->vm_mm, address, ptep, entry);
54 		flush_tlb_fix_spurious_fault(vma, address);
55 	}
56 	return changed;
57 }
58 #endif
59 
60 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
61 int ptep_clear_flush_young(struct vm_area_struct *vma,
62 			   unsigned long address, pte_t *ptep)
63 {
64 	int young;
65 	young = ptep_test_and_clear_young(vma, address, ptep);
66 	if (young)
67 		flush_tlb_page(vma, address);
68 	return young;
69 }
70 #endif
71 
72 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
73 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
74 		       pte_t *ptep)
75 {
76 	struct mm_struct *mm = (vma)->vm_mm;
77 	pte_t pte;
78 	pte = ptep_get_and_clear(mm, address, ptep);
79 	if (pte_accessible(mm, pte))
80 		flush_tlb_page(vma, address);
81 	return pte;
82 }
83 #endif
84 
85 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
86 
87 #ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
88 
89 /*
90  * ARCHes with special requirements for evicting THP backing TLB entries can
91  * implement this. Otherwise also, it can help optimize normal TLB flush in
92  * THP regime. stock flush_tlb_range() typically has optimization to nuke the
93  * entire TLB TLB if flush span is greater than a threshhold, which will
94  * likely be true for a single huge page. Thus a single thp flush will
95  * invalidate the entire TLB which is not desitable.
96  * e.g. see arch/arc: flush_pmd_tlb_range
97  */
98 #define flush_pmd_tlb_range(vma, addr, end)	flush_tlb_range(vma, addr, end)
99 #endif
100 
101 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
102 int pmdp_set_access_flags(struct vm_area_struct *vma,
103 			  unsigned long address, pmd_t *pmdp,
104 			  pmd_t entry, int dirty)
105 {
106 	int changed = !pmd_same(*pmdp, entry);
107 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
108 	if (changed) {
109 		set_pmd_at(vma->vm_mm, address, pmdp, entry);
110 		flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
111 	}
112 	return changed;
113 }
114 #endif
115 
116 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
117 int pmdp_clear_flush_young(struct vm_area_struct *vma,
118 			   unsigned long address, pmd_t *pmdp)
119 {
120 	int young;
121 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
122 	young = pmdp_test_and_clear_young(vma, address, pmdp);
123 	if (young)
124 		flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
125 	return young;
126 }
127 #endif
128 
129 #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
130 pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
131 			    pmd_t *pmdp)
132 {
133 	pmd_t pmd;
134 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
135 	VM_BUG_ON(!pmd_trans_huge(*pmdp));
136 	pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
137 	flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
138 	return pmd;
139 }
140 #endif
141 
142 #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
143 void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
144 			  pmd_t *pmdp)
145 {
146 	pmd_t pmd = pmd_mksplitting(*pmdp);
147 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
148 	set_pmd_at(vma->vm_mm, address, pmdp, pmd);
149 	/* tlb flush only to serialize against gup-fast */
150 	flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
151 }
152 #endif
153 
154 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
155 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
156 				pgtable_t pgtable)
157 {
158 	assert_spin_locked(pmd_lockptr(mm, pmdp));
159 
160 	/* FIFO */
161 	if (!pmd_huge_pte(mm, pmdp))
162 		INIT_LIST_HEAD(&pgtable->lru);
163 	else
164 		list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
165 	pmd_huge_pte(mm, pmdp) = pgtable;
166 }
167 #endif
168 
169 #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
170 /* no "address" argument so destroys page coloring of some arch */
171 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
172 {
173 	pgtable_t pgtable;
174 
175 	assert_spin_locked(pmd_lockptr(mm, pmdp));
176 
177 	/* FIFO */
178 	pgtable = pmd_huge_pte(mm, pmdp);
179 	if (list_empty(&pgtable->lru))
180 		pmd_huge_pte(mm, pmdp) = NULL;
181 	else {
182 		pmd_huge_pte(mm, pmdp) = list_entry(pgtable->lru.next,
183 					      struct page, lru);
184 		list_del(&pgtable->lru);
185 	}
186 	return pgtable;
187 }
188 #endif
189 
190 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
191 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
192 		     pmd_t *pmdp)
193 {
194 	pmd_t entry = *pmdp;
195 	set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry));
196 	flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
197 }
198 #endif
199 
200 #ifndef pmdp_collapse_flush
201 pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
202 			  pmd_t *pmdp)
203 {
204 	/*
205 	 * pmd and hugepage pte format are same. So we could
206 	 * use the same function.
207 	 */
208 	pmd_t pmd;
209 
210 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
211 	VM_BUG_ON(pmd_trans_huge(*pmdp));
212 	pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
213 	flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
214 	return pmd;
215 }
216 #endif
217 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
218