xref: /openbmc/linux/mm/mapping_dirty_helpers.c (revision e9b7b8b3)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/pagewalk.h>
3 #include <linux/hugetlb.h>
4 #include <linux/bitops.h>
5 #include <linux/mmu_notifier.h>
6 #include <linux/mm_inline.h>
7 #include <asm/cacheflush.h>
8 #include <asm/tlbflush.h>
9 
10 /**
11  * struct wp_walk - Private struct for pagetable walk callbacks
12  * @range: Range for mmu notifiers
13  * @tlbflush_start: Address of first modified pte
14  * @tlbflush_end: Address of last modified pte + 1
15  * @total: Total number of modified ptes
16  */
17 struct wp_walk {
18 	struct mmu_notifier_range range;
19 	unsigned long tlbflush_start;
20 	unsigned long tlbflush_end;
21 	unsigned long total;
22 };
23 
24 /**
25  * wp_pte - Write-protect a pte
26  * @pte: Pointer to the pte
27  * @addr: The start of protecting virtual address
28  * @end: The end of protecting virtual address
29  * @walk: pagetable walk callback argument
30  *
31  * The function write-protects a pte and records the range in
32  * virtual address space of touched ptes for efficient range TLB flushes.
33  */
34 static int wp_pte(pte_t *pte, unsigned long addr, unsigned long end,
35 		  struct mm_walk *walk)
36 {
37 	struct wp_walk *wpwalk = walk->private;
38 	pte_t ptent = ptep_get(pte);
39 
40 	if (pte_write(ptent)) {
41 		pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte);
42 
43 		ptent = pte_wrprotect(old_pte);
44 		ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent);
45 		wpwalk->total++;
46 		wpwalk->tlbflush_start = min(wpwalk->tlbflush_start, addr);
47 		wpwalk->tlbflush_end = max(wpwalk->tlbflush_end,
48 					   addr + PAGE_SIZE);
49 	}
50 
51 	return 0;
52 }
53 
54 /**
55  * struct clean_walk - Private struct for the clean_record_pte function.
56  * @base: struct wp_walk we derive from
57  * @bitmap_pgoff: Address_space Page offset of the first bit in @bitmap
58  * @bitmap: Bitmap with one bit for each page offset in the address_space range
59  * covered.
60  * @start: Address_space page offset of first modified pte relative
61  * to @bitmap_pgoff
62  * @end: Address_space page offset of last modified pte relative
63  * to @bitmap_pgoff
64  */
65 struct clean_walk {
66 	struct wp_walk base;
67 	pgoff_t bitmap_pgoff;
68 	unsigned long *bitmap;
69 	pgoff_t start;
70 	pgoff_t end;
71 };
72 
73 #define to_clean_walk(_wpwalk) container_of(_wpwalk, struct clean_walk, base)
74 
75 /**
76  * clean_record_pte - Clean a pte and record its address space offset in a
77  * bitmap
78  * @pte: Pointer to the pte
79  * @addr: The start of virtual address to be clean
80  * @end: The end of virtual address to be clean
81  * @walk: pagetable walk callback argument
82  *
83  * The function cleans a pte and records the range in
84  * virtual address space of touched ptes for efficient TLB flushes.
85  * It also records dirty ptes in a bitmap representing page offsets
86  * in the address_space, as well as the first and last of the bits
87  * touched.
88  */
89 static int clean_record_pte(pte_t *pte, unsigned long addr,
90 			    unsigned long end, struct mm_walk *walk)
91 {
92 	struct wp_walk *wpwalk = walk->private;
93 	struct clean_walk *cwalk = to_clean_walk(wpwalk);
94 	pte_t ptent = ptep_get(pte);
95 
96 	if (pte_dirty(ptent)) {
97 		pgoff_t pgoff = ((addr - walk->vma->vm_start) >> PAGE_SHIFT) +
98 			walk->vma->vm_pgoff - cwalk->bitmap_pgoff;
99 		pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte);
100 
101 		ptent = pte_mkclean(old_pte);
102 		ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent);
103 
104 		wpwalk->total++;
105 		wpwalk->tlbflush_start = min(wpwalk->tlbflush_start, addr);
106 		wpwalk->tlbflush_end = max(wpwalk->tlbflush_end,
107 					   addr + PAGE_SIZE);
108 
109 		__set_bit(pgoff, cwalk->bitmap);
110 		cwalk->start = min(cwalk->start, pgoff);
111 		cwalk->end = max(cwalk->end, pgoff + 1);
112 	}
113 
114 	return 0;
115 }
116 
117 /*
118  * wp_clean_pmd_entry - The pagewalk pmd callback.
119  *
120  * Dirty-tracking should take place on the PTE level, so
121  * WARN() if encountering a dirty huge pmd.
122  * Furthermore, never split huge pmds, since that currently
123  * causes dirty info loss. The pagefault handler should do
124  * that if needed.
125  */
126 static int wp_clean_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long end,
127 			      struct mm_walk *walk)
128 {
129 	pmd_t pmdval = pmdp_get_lockless(pmd);
130 
131 	/* Do not split a huge pmd, present or migrated */
132 	if (pmd_trans_huge(pmdval) || pmd_devmap(pmdval)) {
133 		WARN_ON(pmd_write(pmdval) || pmd_dirty(pmdval));
134 		walk->action = ACTION_CONTINUE;
135 	}
136 	return 0;
137 }
138 
139 /*
140  * wp_clean_pud_entry - The pagewalk pud callback.
141  *
142  * Dirty-tracking should take place on the PTE level, so
143  * WARN() if encountering a dirty huge puds.
144  * Furthermore, never split huge puds, since that currently
145  * causes dirty info loss. The pagefault handler should do
146  * that if needed.
147  */
148 static int wp_clean_pud_entry(pud_t *pud, unsigned long addr, unsigned long end,
149 			      struct mm_walk *walk)
150 {
151 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
152 	pud_t pudval = READ_ONCE(*pud);
153 
154 	/* Do not split a huge pud */
155 	if (pud_trans_huge(pudval) || pud_devmap(pudval)) {
156 		WARN_ON(pud_write(pudval) || pud_dirty(pudval));
157 		walk->action = ACTION_CONTINUE;
158 	}
159 #endif
160 	return 0;
161 }
162 
163 /*
164  * wp_clean_pre_vma - The pagewalk pre_vma callback.
165  *
166  * The pre_vma callback performs the cache flush, stages the tlb flush
167  * and calls the necessary mmu notifiers.
168  */
169 static int wp_clean_pre_vma(unsigned long start, unsigned long end,
170 			    struct mm_walk *walk)
171 {
172 	struct wp_walk *wpwalk = walk->private;
173 
174 	wpwalk->tlbflush_start = end;
175 	wpwalk->tlbflush_end = start;
176 
177 	mmu_notifier_range_init(&wpwalk->range, MMU_NOTIFY_PROTECTION_PAGE, 0,
178 				walk->mm, start, end);
179 	mmu_notifier_invalidate_range_start(&wpwalk->range);
180 	flush_cache_range(walk->vma, start, end);
181 
182 	/*
183 	 * We're not using tlb_gather_mmu() since typically
184 	 * only a small subrange of PTEs are affected, whereas
185 	 * tlb_gather_mmu() records the full range.
186 	 */
187 	inc_tlb_flush_pending(walk->mm);
188 
189 	return 0;
190 }
191 
192 /*
193  * wp_clean_post_vma - The pagewalk post_vma callback.
194  *
195  * The post_vma callback performs the tlb flush and calls necessary mmu
196  * notifiers.
197  */
198 static void wp_clean_post_vma(struct mm_walk *walk)
199 {
200 	struct wp_walk *wpwalk = walk->private;
201 
202 	if (mm_tlb_flush_nested(walk->mm))
203 		flush_tlb_range(walk->vma, wpwalk->range.start,
204 				wpwalk->range.end);
205 	else if (wpwalk->tlbflush_end > wpwalk->tlbflush_start)
206 		flush_tlb_range(walk->vma, wpwalk->tlbflush_start,
207 				wpwalk->tlbflush_end);
208 
209 	mmu_notifier_invalidate_range_end(&wpwalk->range);
210 	dec_tlb_flush_pending(walk->mm);
211 }
212 
213 /*
214  * wp_clean_test_walk - The pagewalk test_walk callback.
215  *
216  * Won't perform dirty-tracking on COW, read-only or HUGETLB vmas.
217  */
218 static int wp_clean_test_walk(unsigned long start, unsigned long end,
219 			      struct mm_walk *walk)
220 {
221 	unsigned long vm_flags = READ_ONCE(walk->vma->vm_flags);
222 
223 	/* Skip non-applicable VMAs */
224 	if ((vm_flags & (VM_SHARED | VM_MAYWRITE | VM_HUGETLB)) !=
225 	    (VM_SHARED | VM_MAYWRITE))
226 		return 1;
227 
228 	return 0;
229 }
230 
231 static const struct mm_walk_ops clean_walk_ops = {
232 	.pte_entry = clean_record_pte,
233 	.pmd_entry = wp_clean_pmd_entry,
234 	.pud_entry = wp_clean_pud_entry,
235 	.test_walk = wp_clean_test_walk,
236 	.pre_vma = wp_clean_pre_vma,
237 	.post_vma = wp_clean_post_vma
238 };
239 
240 static const struct mm_walk_ops wp_walk_ops = {
241 	.pte_entry = wp_pte,
242 	.pmd_entry = wp_clean_pmd_entry,
243 	.pud_entry = wp_clean_pud_entry,
244 	.test_walk = wp_clean_test_walk,
245 	.pre_vma = wp_clean_pre_vma,
246 	.post_vma = wp_clean_post_vma
247 };
248 
249 /**
250  * wp_shared_mapping_range - Write-protect all ptes in an address space range
251  * @mapping: The address_space we want to write protect
252  * @first_index: The first page offset in the range
253  * @nr: Number of incremental page offsets to cover
254  *
255  * Note: This function currently skips transhuge page-table entries, since
256  * it's intended for dirty-tracking on the PTE level. It will warn on
257  * encountering transhuge write-enabled entries, though, and can easily be
258  * extended to handle them as well.
259  *
260  * Return: The number of ptes actually write-protected. Note that
261  * already write-protected ptes are not counted.
262  */
263 unsigned long wp_shared_mapping_range(struct address_space *mapping,
264 				      pgoff_t first_index, pgoff_t nr)
265 {
266 	struct wp_walk wpwalk = { .total = 0 };
267 
268 	i_mmap_lock_read(mapping);
269 	WARN_ON(walk_page_mapping(mapping, first_index, nr, &wp_walk_ops,
270 				  &wpwalk));
271 	i_mmap_unlock_read(mapping);
272 
273 	return wpwalk.total;
274 }
275 EXPORT_SYMBOL_GPL(wp_shared_mapping_range);
276 
277 /**
278  * clean_record_shared_mapping_range - Clean and record all ptes in an
279  * address space range
280  * @mapping: The address_space we want to clean
281  * @first_index: The first page offset in the range
282  * @nr: Number of incremental page offsets to cover
283  * @bitmap_pgoff: The page offset of the first bit in @bitmap
284  * @bitmap: Pointer to a bitmap of at least @nr bits. The bitmap needs to
285  * cover the whole range @first_index..@first_index + @nr.
286  * @start: Pointer to number of the first set bit in @bitmap.
287  * is modified as new bits are set by the function.
288  * @end: Pointer to the number of the last set bit in @bitmap.
289  * none set. The value is modified as new bits are set by the function.
290  *
291  * When this function returns there is no guarantee that a CPU has
292  * not already dirtied new ptes. However it will not clean any ptes not
293  * reported in the bitmap. The guarantees are as follows:
294  *
295  * * All ptes dirty when the function starts executing will end up recorded
296  *   in the bitmap.
297  * * All ptes dirtied after that will either remain dirty, be recorded in the
298  *   bitmap or both.
299  *
300  * If a caller needs to make sure all dirty ptes are picked up and none
301  * additional are added, it first needs to write-protect the address-space
302  * range and make sure new writers are blocked in page_mkwrite() or
303  * pfn_mkwrite(). And then after a TLB flush following the write-protection
304  * pick up all dirty bits.
305  *
306  * This function currently skips transhuge page-table entries, since
307  * it's intended for dirty-tracking on the PTE level. It will warn on
308  * encountering transhuge dirty entries, though, and can easily be extended
309  * to handle them as well.
310  *
311  * Return: The number of dirty ptes actually cleaned.
312  */
313 unsigned long clean_record_shared_mapping_range(struct address_space *mapping,
314 						pgoff_t first_index, pgoff_t nr,
315 						pgoff_t bitmap_pgoff,
316 						unsigned long *bitmap,
317 						pgoff_t *start,
318 						pgoff_t *end)
319 {
320 	bool none_set = (*start >= *end);
321 	struct clean_walk cwalk = {
322 		.base = { .total = 0 },
323 		.bitmap_pgoff = bitmap_pgoff,
324 		.bitmap = bitmap,
325 		.start = none_set ? nr : *start,
326 		.end = none_set ? 0 : *end,
327 	};
328 
329 	i_mmap_lock_read(mapping);
330 	WARN_ON(walk_page_mapping(mapping, first_index, nr, &clean_walk_ops,
331 				  &cwalk.base));
332 	i_mmap_unlock_read(mapping);
333 
334 	*start = cwalk.start;
335 	*end = cwalk.end;
336 
337 	return cwalk.base.total;
338 }
339 EXPORT_SYMBOL_GPL(clean_record_shared_mapping_range);
340