xref: /openbmc/linux/mm/rmap.c (revision 3ddc8b84)
1 /*
2  * mm/rmap.c - physical to virtual reverse mappings
3  *
4  * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5  * Released under the General Public License (GPL).
6  *
7  * Simple, low overhead reverse mapping scheme.
8  * Please try to keep this thing as modular as possible.
9  *
10  * Provides methods for unmapping each kind of mapped page:
11  * the anon methods track anonymous pages, and
12  * the file methods track pages belonging to an inode.
13  *
14  * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15  * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16  * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17  * Contributions by Hugh Dickins 2003, 2004
18  */
19 
20 /*
21  * Lock ordering in mm:
22  *
23  * inode->i_rwsem	(while writing or truncating, not reading or faulting)
24  *   mm->mmap_lock
25  *     mapping->invalidate_lock (in filemap_fault)
26  *       page->flags PG_locked (lock_page)
27  *         hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below)
28  *           vma_start_write
29  *             mapping->i_mmap_rwsem
30  *               anon_vma->rwsem
31  *                 mm->page_table_lock or pte_lock
32  *                   swap_lock (in swap_duplicate, swap_info_get)
33  *                     mmlist_lock (in mmput, drain_mmlist and others)
34  *                     mapping->private_lock (in block_dirty_folio)
35  *                       folio_lock_memcg move_lock (in block_dirty_folio)
36  *                         i_pages lock (widely used)
37  *                           lruvec->lru_lock (in folio_lruvec_lock_irq)
38  *                     inode->i_lock (in set_page_dirty's __mark_inode_dirty)
39  *                     bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
40  *                       sb_lock (within inode_lock in fs/fs-writeback.c)
41  *                       i_pages lock (widely used, in set_page_dirty,
42  *                                 in arch-dependent flush_dcache_mmap_lock,
43  *                                 within bdi.wb->list_lock in __sync_single_inode)
44  *
45  * anon_vma->rwsem,mapping->i_mmap_rwsem   (memory_failure, collect_procs_anon)
46  *   ->tasklist_lock
47  *     pte map lock
48  *
49  * hugetlbfs PageHuge() take locks in this order:
50  *   hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
51  *     vma_lock (hugetlb specific lock for pmd_sharing)
52  *       mapping->i_mmap_rwsem (also used for hugetlb pmd sharing)
53  *         page->flags PG_locked (lock_page)
54  */
55 
56 #include <linux/mm.h>
57 #include <linux/sched/mm.h>
58 #include <linux/sched/task.h>
59 #include <linux/pagemap.h>
60 #include <linux/swap.h>
61 #include <linux/swapops.h>
62 #include <linux/slab.h>
63 #include <linux/init.h>
64 #include <linux/ksm.h>
65 #include <linux/rmap.h>
66 #include <linux/rcupdate.h>
67 #include <linux/export.h>
68 #include <linux/memcontrol.h>
69 #include <linux/mmu_notifier.h>
70 #include <linux/migrate.h>
71 #include <linux/hugetlb.h>
72 #include <linux/huge_mm.h>
73 #include <linux/backing-dev.h>
74 #include <linux/page_idle.h>
75 #include <linux/memremap.h>
76 #include <linux/userfaultfd_k.h>
77 #include <linux/mm_inline.h>
78 
79 #include <asm/tlbflush.h>
80 
81 #define CREATE_TRACE_POINTS
82 #include <trace/events/tlb.h>
83 #include <trace/events/migrate.h>
84 
85 #include "internal.h"
86 
87 static struct kmem_cache *anon_vma_cachep;
88 static struct kmem_cache *anon_vma_chain_cachep;
89 
90 static inline struct anon_vma *anon_vma_alloc(void)
91 {
92 	struct anon_vma *anon_vma;
93 
94 	anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
95 	if (anon_vma) {
96 		atomic_set(&anon_vma->refcount, 1);
97 		anon_vma->num_children = 0;
98 		anon_vma->num_active_vmas = 0;
99 		anon_vma->parent = anon_vma;
100 		/*
101 		 * Initialise the anon_vma root to point to itself. If called
102 		 * from fork, the root will be reset to the parents anon_vma.
103 		 */
104 		anon_vma->root = anon_vma;
105 	}
106 
107 	return anon_vma;
108 }
109 
110 static inline void anon_vma_free(struct anon_vma *anon_vma)
111 {
112 	VM_BUG_ON(atomic_read(&anon_vma->refcount));
113 
114 	/*
115 	 * Synchronize against folio_lock_anon_vma_read() such that
116 	 * we can safely hold the lock without the anon_vma getting
117 	 * freed.
118 	 *
119 	 * Relies on the full mb implied by the atomic_dec_and_test() from
120 	 * put_anon_vma() against the acquire barrier implied by
121 	 * down_read_trylock() from folio_lock_anon_vma_read(). This orders:
122 	 *
123 	 * folio_lock_anon_vma_read()	VS	put_anon_vma()
124 	 *   down_read_trylock()		  atomic_dec_and_test()
125 	 *   LOCK				  MB
126 	 *   atomic_read()			  rwsem_is_locked()
127 	 *
128 	 * LOCK should suffice since the actual taking of the lock must
129 	 * happen _before_ what follows.
130 	 */
131 	might_sleep();
132 	if (rwsem_is_locked(&anon_vma->root->rwsem)) {
133 		anon_vma_lock_write(anon_vma);
134 		anon_vma_unlock_write(anon_vma);
135 	}
136 
137 	kmem_cache_free(anon_vma_cachep, anon_vma);
138 }
139 
140 static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
141 {
142 	return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
143 }
144 
145 static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
146 {
147 	kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
148 }
149 
150 static void anon_vma_chain_link(struct vm_area_struct *vma,
151 				struct anon_vma_chain *avc,
152 				struct anon_vma *anon_vma)
153 {
154 	avc->vma = vma;
155 	avc->anon_vma = anon_vma;
156 	list_add(&avc->same_vma, &vma->anon_vma_chain);
157 	anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
158 }
159 
160 /**
161  * __anon_vma_prepare - attach an anon_vma to a memory region
162  * @vma: the memory region in question
163  *
164  * This makes sure the memory mapping described by 'vma' has
165  * an 'anon_vma' attached to it, so that we can associate the
166  * anonymous pages mapped into it with that anon_vma.
167  *
168  * The common case will be that we already have one, which
169  * is handled inline by anon_vma_prepare(). But if
170  * not we either need to find an adjacent mapping that we
171  * can re-use the anon_vma from (very common when the only
172  * reason for splitting a vma has been mprotect()), or we
173  * allocate a new one.
174  *
175  * Anon-vma allocations are very subtle, because we may have
176  * optimistically looked up an anon_vma in folio_lock_anon_vma_read()
177  * and that may actually touch the rwsem even in the newly
178  * allocated vma (it depends on RCU to make sure that the
179  * anon_vma isn't actually destroyed).
180  *
181  * As a result, we need to do proper anon_vma locking even
182  * for the new allocation. At the same time, we do not want
183  * to do any locking for the common case of already having
184  * an anon_vma.
185  *
186  * This must be called with the mmap_lock held for reading.
187  */
188 int __anon_vma_prepare(struct vm_area_struct *vma)
189 {
190 	struct mm_struct *mm = vma->vm_mm;
191 	struct anon_vma *anon_vma, *allocated;
192 	struct anon_vma_chain *avc;
193 
194 	might_sleep();
195 
196 	avc = anon_vma_chain_alloc(GFP_KERNEL);
197 	if (!avc)
198 		goto out_enomem;
199 
200 	anon_vma = find_mergeable_anon_vma(vma);
201 	allocated = NULL;
202 	if (!anon_vma) {
203 		anon_vma = anon_vma_alloc();
204 		if (unlikely(!anon_vma))
205 			goto out_enomem_free_avc;
206 		anon_vma->num_children++; /* self-parent link for new root */
207 		allocated = anon_vma;
208 	}
209 
210 	anon_vma_lock_write(anon_vma);
211 	/* page_table_lock to protect against threads */
212 	spin_lock(&mm->page_table_lock);
213 	if (likely(!vma->anon_vma)) {
214 		vma->anon_vma = anon_vma;
215 		anon_vma_chain_link(vma, avc, anon_vma);
216 		anon_vma->num_active_vmas++;
217 		allocated = NULL;
218 		avc = NULL;
219 	}
220 	spin_unlock(&mm->page_table_lock);
221 	anon_vma_unlock_write(anon_vma);
222 
223 	if (unlikely(allocated))
224 		put_anon_vma(allocated);
225 	if (unlikely(avc))
226 		anon_vma_chain_free(avc);
227 
228 	return 0;
229 
230  out_enomem_free_avc:
231 	anon_vma_chain_free(avc);
232  out_enomem:
233 	return -ENOMEM;
234 }
235 
236 /*
237  * This is a useful helper function for locking the anon_vma root as
238  * we traverse the vma->anon_vma_chain, looping over anon_vma's that
239  * have the same vma.
240  *
241  * Such anon_vma's should have the same root, so you'd expect to see
242  * just a single mutex_lock for the whole traversal.
243  */
244 static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
245 {
246 	struct anon_vma *new_root = anon_vma->root;
247 	if (new_root != root) {
248 		if (WARN_ON_ONCE(root))
249 			up_write(&root->rwsem);
250 		root = new_root;
251 		down_write(&root->rwsem);
252 	}
253 	return root;
254 }
255 
256 static inline void unlock_anon_vma_root(struct anon_vma *root)
257 {
258 	if (root)
259 		up_write(&root->rwsem);
260 }
261 
262 /*
263  * Attach the anon_vmas from src to dst.
264  * Returns 0 on success, -ENOMEM on failure.
265  *
266  * anon_vma_clone() is called by vma_expand(), vma_merge(), __split_vma(),
267  * copy_vma() and anon_vma_fork(). The first four want an exact copy of src,
268  * while the last one, anon_vma_fork(), may try to reuse an existing anon_vma to
269  * prevent endless growth of anon_vma. Since dst->anon_vma is set to NULL before
270  * call, we can identify this case by checking (!dst->anon_vma &&
271  * src->anon_vma).
272  *
273  * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find
274  * and reuse existing anon_vma which has no vmas and only one child anon_vma.
275  * This prevents degradation of anon_vma hierarchy to endless linear chain in
276  * case of constantly forking task. On the other hand, an anon_vma with more
277  * than one child isn't reused even if there was no alive vma, thus rmap
278  * walker has a good chance of avoiding scanning the whole hierarchy when it
279  * searches where page is mapped.
280  */
281 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
282 {
283 	struct anon_vma_chain *avc, *pavc;
284 	struct anon_vma *root = NULL;
285 
286 	list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
287 		struct anon_vma *anon_vma;
288 
289 		avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
290 		if (unlikely(!avc)) {
291 			unlock_anon_vma_root(root);
292 			root = NULL;
293 			avc = anon_vma_chain_alloc(GFP_KERNEL);
294 			if (!avc)
295 				goto enomem_failure;
296 		}
297 		anon_vma = pavc->anon_vma;
298 		root = lock_anon_vma_root(root, anon_vma);
299 		anon_vma_chain_link(dst, avc, anon_vma);
300 
301 		/*
302 		 * Reuse existing anon_vma if it has no vma and only one
303 		 * anon_vma child.
304 		 *
305 		 * Root anon_vma is never reused:
306 		 * it has self-parent reference and at least one child.
307 		 */
308 		if (!dst->anon_vma && src->anon_vma &&
309 		    anon_vma->num_children < 2 &&
310 		    anon_vma->num_active_vmas == 0)
311 			dst->anon_vma = anon_vma;
312 	}
313 	if (dst->anon_vma)
314 		dst->anon_vma->num_active_vmas++;
315 	unlock_anon_vma_root(root);
316 	return 0;
317 
318  enomem_failure:
319 	/*
320 	 * dst->anon_vma is dropped here otherwise its num_active_vmas can
321 	 * be incorrectly decremented in unlink_anon_vmas().
322 	 * We can safely do this because callers of anon_vma_clone() don't care
323 	 * about dst->anon_vma if anon_vma_clone() failed.
324 	 */
325 	dst->anon_vma = NULL;
326 	unlink_anon_vmas(dst);
327 	return -ENOMEM;
328 }
329 
330 /*
331  * Attach vma to its own anon_vma, as well as to the anon_vmas that
332  * the corresponding VMA in the parent process is attached to.
333  * Returns 0 on success, non-zero on failure.
334  */
335 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
336 {
337 	struct anon_vma_chain *avc;
338 	struct anon_vma *anon_vma;
339 	int error;
340 
341 	/* Don't bother if the parent process has no anon_vma here. */
342 	if (!pvma->anon_vma)
343 		return 0;
344 
345 	/* Drop inherited anon_vma, we'll reuse existing or allocate new. */
346 	vma->anon_vma = NULL;
347 
348 	/*
349 	 * First, attach the new VMA to the parent VMA's anon_vmas,
350 	 * so rmap can find non-COWed pages in child processes.
351 	 */
352 	error = anon_vma_clone(vma, pvma);
353 	if (error)
354 		return error;
355 
356 	/* An existing anon_vma has been reused, all done then. */
357 	if (vma->anon_vma)
358 		return 0;
359 
360 	/* Then add our own anon_vma. */
361 	anon_vma = anon_vma_alloc();
362 	if (!anon_vma)
363 		goto out_error;
364 	anon_vma->num_active_vmas++;
365 	avc = anon_vma_chain_alloc(GFP_KERNEL);
366 	if (!avc)
367 		goto out_error_free_anon_vma;
368 
369 	/*
370 	 * The root anon_vma's rwsem is the lock actually used when we
371 	 * lock any of the anon_vmas in this anon_vma tree.
372 	 */
373 	anon_vma->root = pvma->anon_vma->root;
374 	anon_vma->parent = pvma->anon_vma;
375 	/*
376 	 * With refcounts, an anon_vma can stay around longer than the
377 	 * process it belongs to. The root anon_vma needs to be pinned until
378 	 * this anon_vma is freed, because the lock lives in the root.
379 	 */
380 	get_anon_vma(anon_vma->root);
381 	/* Mark this anon_vma as the one where our new (COWed) pages go. */
382 	vma->anon_vma = anon_vma;
383 	anon_vma_lock_write(anon_vma);
384 	anon_vma_chain_link(vma, avc, anon_vma);
385 	anon_vma->parent->num_children++;
386 	anon_vma_unlock_write(anon_vma);
387 
388 	return 0;
389 
390  out_error_free_anon_vma:
391 	put_anon_vma(anon_vma);
392  out_error:
393 	unlink_anon_vmas(vma);
394 	return -ENOMEM;
395 }
396 
397 void unlink_anon_vmas(struct vm_area_struct *vma)
398 {
399 	struct anon_vma_chain *avc, *next;
400 	struct anon_vma *root = NULL;
401 
402 	/*
403 	 * Unlink each anon_vma chained to the VMA.  This list is ordered
404 	 * from newest to oldest, ensuring the root anon_vma gets freed last.
405 	 */
406 	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
407 		struct anon_vma *anon_vma = avc->anon_vma;
408 
409 		root = lock_anon_vma_root(root, anon_vma);
410 		anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
411 
412 		/*
413 		 * Leave empty anon_vmas on the list - we'll need
414 		 * to free them outside the lock.
415 		 */
416 		if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
417 			anon_vma->parent->num_children--;
418 			continue;
419 		}
420 
421 		list_del(&avc->same_vma);
422 		anon_vma_chain_free(avc);
423 	}
424 	if (vma->anon_vma) {
425 		vma->anon_vma->num_active_vmas--;
426 
427 		/*
428 		 * vma would still be needed after unlink, and anon_vma will be prepared
429 		 * when handle fault.
430 		 */
431 		vma->anon_vma = NULL;
432 	}
433 	unlock_anon_vma_root(root);
434 
435 	/*
436 	 * Iterate the list once more, it now only contains empty and unlinked
437 	 * anon_vmas, destroy them. Could not do before due to __put_anon_vma()
438 	 * needing to write-acquire the anon_vma->root->rwsem.
439 	 */
440 	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
441 		struct anon_vma *anon_vma = avc->anon_vma;
442 
443 		VM_WARN_ON(anon_vma->num_children);
444 		VM_WARN_ON(anon_vma->num_active_vmas);
445 		put_anon_vma(anon_vma);
446 
447 		list_del(&avc->same_vma);
448 		anon_vma_chain_free(avc);
449 	}
450 }
451 
452 static void anon_vma_ctor(void *data)
453 {
454 	struct anon_vma *anon_vma = data;
455 
456 	init_rwsem(&anon_vma->rwsem);
457 	atomic_set(&anon_vma->refcount, 0);
458 	anon_vma->rb_root = RB_ROOT_CACHED;
459 }
460 
461 void __init anon_vma_init(void)
462 {
463 	anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
464 			0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
465 			anon_vma_ctor);
466 	anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
467 			SLAB_PANIC|SLAB_ACCOUNT);
468 }
469 
470 /*
471  * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
472  *
473  * Since there is no serialization what so ever against page_remove_rmap()
474  * the best this function can do is return a refcount increased anon_vma
475  * that might have been relevant to this page.
476  *
477  * The page might have been remapped to a different anon_vma or the anon_vma
478  * returned may already be freed (and even reused).
479  *
480  * In case it was remapped to a different anon_vma, the new anon_vma will be a
481  * child of the old anon_vma, and the anon_vma lifetime rules will therefore
482  * ensure that any anon_vma obtained from the page will still be valid for as
483  * long as we observe page_mapped() [ hence all those page_mapped() tests ].
484  *
485  * All users of this function must be very careful when walking the anon_vma
486  * chain and verify that the page in question is indeed mapped in it
487  * [ something equivalent to page_mapped_in_vma() ].
488  *
489  * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from
490  * page_remove_rmap() that the anon_vma pointer from page->mapping is valid
491  * if there is a mapcount, we can dereference the anon_vma after observing
492  * those.
493  */
494 struct anon_vma *folio_get_anon_vma(struct folio *folio)
495 {
496 	struct anon_vma *anon_vma = NULL;
497 	unsigned long anon_mapping;
498 
499 	rcu_read_lock();
500 	anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
501 	if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
502 		goto out;
503 	if (!folio_mapped(folio))
504 		goto out;
505 
506 	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
507 	if (!atomic_inc_not_zero(&anon_vma->refcount)) {
508 		anon_vma = NULL;
509 		goto out;
510 	}
511 
512 	/*
513 	 * If this folio is still mapped, then its anon_vma cannot have been
514 	 * freed.  But if it has been unmapped, we have no security against the
515 	 * anon_vma structure being freed and reused (for another anon_vma:
516 	 * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero()
517 	 * above cannot corrupt).
518 	 */
519 	if (!folio_mapped(folio)) {
520 		rcu_read_unlock();
521 		put_anon_vma(anon_vma);
522 		return NULL;
523 	}
524 out:
525 	rcu_read_unlock();
526 
527 	return anon_vma;
528 }
529 
530 /*
531  * Similar to folio_get_anon_vma() except it locks the anon_vma.
532  *
533  * Its a little more complex as it tries to keep the fast path to a single
534  * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
535  * reference like with folio_get_anon_vma() and then block on the mutex
536  * on !rwc->try_lock case.
537  */
538 struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
539 					  struct rmap_walk_control *rwc)
540 {
541 	struct anon_vma *anon_vma = NULL;
542 	struct anon_vma *root_anon_vma;
543 	unsigned long anon_mapping;
544 
545 	rcu_read_lock();
546 	anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
547 	if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
548 		goto out;
549 	if (!folio_mapped(folio))
550 		goto out;
551 
552 	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
553 	root_anon_vma = READ_ONCE(anon_vma->root);
554 	if (down_read_trylock(&root_anon_vma->rwsem)) {
555 		/*
556 		 * If the folio is still mapped, then this anon_vma is still
557 		 * its anon_vma, and holding the mutex ensures that it will
558 		 * not go away, see anon_vma_free().
559 		 */
560 		if (!folio_mapped(folio)) {
561 			up_read(&root_anon_vma->rwsem);
562 			anon_vma = NULL;
563 		}
564 		goto out;
565 	}
566 
567 	if (rwc && rwc->try_lock) {
568 		anon_vma = NULL;
569 		rwc->contended = true;
570 		goto out;
571 	}
572 
573 	/* trylock failed, we got to sleep */
574 	if (!atomic_inc_not_zero(&anon_vma->refcount)) {
575 		anon_vma = NULL;
576 		goto out;
577 	}
578 
579 	if (!folio_mapped(folio)) {
580 		rcu_read_unlock();
581 		put_anon_vma(anon_vma);
582 		return NULL;
583 	}
584 
585 	/* we pinned the anon_vma, its safe to sleep */
586 	rcu_read_unlock();
587 	anon_vma_lock_read(anon_vma);
588 
589 	if (atomic_dec_and_test(&anon_vma->refcount)) {
590 		/*
591 		 * Oops, we held the last refcount, release the lock
592 		 * and bail -- can't simply use put_anon_vma() because
593 		 * we'll deadlock on the anon_vma_lock_write() recursion.
594 		 */
595 		anon_vma_unlock_read(anon_vma);
596 		__put_anon_vma(anon_vma);
597 		anon_vma = NULL;
598 	}
599 
600 	return anon_vma;
601 
602 out:
603 	rcu_read_unlock();
604 	return anon_vma;
605 }
606 
607 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
608 /*
609  * Flush TLB entries for recently unmapped pages from remote CPUs. It is
610  * important if a PTE was dirty when it was unmapped that it's flushed
611  * before any IO is initiated on the page to prevent lost writes. Similarly,
612  * it must be flushed before freeing to prevent data leakage.
613  */
614 void try_to_unmap_flush(void)
615 {
616 	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
617 
618 	if (!tlb_ubc->flush_required)
619 		return;
620 
621 	arch_tlbbatch_flush(&tlb_ubc->arch);
622 	tlb_ubc->flush_required = false;
623 	tlb_ubc->writable = false;
624 }
625 
626 /* Flush iff there are potentially writable TLB entries that can race with IO */
627 void try_to_unmap_flush_dirty(void)
628 {
629 	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
630 
631 	if (tlb_ubc->writable)
632 		try_to_unmap_flush();
633 }
634 
635 /*
636  * Bits 0-14 of mm->tlb_flush_batched record pending generations.
637  * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations.
638  */
639 #define TLB_FLUSH_BATCH_FLUSHED_SHIFT	16
640 #define TLB_FLUSH_BATCH_PENDING_MASK			\
641 	((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1)
642 #define TLB_FLUSH_BATCH_PENDING_LARGE			\
643 	(TLB_FLUSH_BATCH_PENDING_MASK / 2)
644 
645 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
646 				      unsigned long uaddr)
647 {
648 	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
649 	int batch;
650 	bool writable = pte_dirty(pteval);
651 
652 	if (!pte_accessible(mm, pteval))
653 		return;
654 
655 	arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr);
656 	tlb_ubc->flush_required = true;
657 
658 	/*
659 	 * Ensure compiler does not re-order the setting of tlb_flush_batched
660 	 * before the PTE is cleared.
661 	 */
662 	barrier();
663 	batch = atomic_read(&mm->tlb_flush_batched);
664 retry:
665 	if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) {
666 		/*
667 		 * Prevent `pending' from catching up with `flushed' because of
668 		 * overflow.  Reset `pending' and `flushed' to be 1 and 0 if
669 		 * `pending' becomes large.
670 		 */
671 		if (!atomic_try_cmpxchg(&mm->tlb_flush_batched, &batch, 1))
672 			goto retry;
673 	} else {
674 		atomic_inc(&mm->tlb_flush_batched);
675 	}
676 
677 	/*
678 	 * If the PTE was dirty then it's best to assume it's writable. The
679 	 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
680 	 * before the page is queued for IO.
681 	 */
682 	if (writable)
683 		tlb_ubc->writable = true;
684 }
685 
686 /*
687  * Returns true if the TLB flush should be deferred to the end of a batch of
688  * unmap operations to reduce IPIs.
689  */
690 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
691 {
692 	if (!(flags & TTU_BATCH_FLUSH))
693 		return false;
694 
695 	return arch_tlbbatch_should_defer(mm);
696 }
697 
698 /*
699  * Reclaim unmaps pages under the PTL but do not flush the TLB prior to
700  * releasing the PTL if TLB flushes are batched. It's possible for a parallel
701  * operation such as mprotect or munmap to race between reclaim unmapping
702  * the page and flushing the page. If this race occurs, it potentially allows
703  * access to data via a stale TLB entry. Tracking all mm's that have TLB
704  * batching in flight would be expensive during reclaim so instead track
705  * whether TLB batching occurred in the past and if so then do a flush here
706  * if required. This will cost one additional flush per reclaim cycle paid
707  * by the first operation at risk such as mprotect and mumap.
708  *
709  * This must be called under the PTL so that an access to tlb_flush_batched
710  * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise
711  * via the PTL.
712  */
713 void flush_tlb_batched_pending(struct mm_struct *mm)
714 {
715 	int batch = atomic_read(&mm->tlb_flush_batched);
716 	int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK;
717 	int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT;
718 
719 	if (pending != flushed) {
720 		arch_flush_tlb_batched_pending(mm);
721 		/*
722 		 * If the new TLB flushing is pending during flushing, leave
723 		 * mm->tlb_flush_batched as is, to avoid losing flushing.
724 		 */
725 		atomic_cmpxchg(&mm->tlb_flush_batched, batch,
726 			       pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT));
727 	}
728 }
729 #else
730 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
731 				      unsigned long uaddr)
732 {
733 }
734 
735 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
736 {
737 	return false;
738 }
739 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
740 
741 /*
742  * At what user virtual address is page expected in vma?
743  * Caller should check the page is actually part of the vma.
744  */
745 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
746 {
747 	struct folio *folio = page_folio(page);
748 	if (folio_test_anon(folio)) {
749 		struct anon_vma *page__anon_vma = folio_anon_vma(folio);
750 		/*
751 		 * Note: swapoff's unuse_vma() is more efficient with this
752 		 * check, and needs it to match anon_vma when KSM is active.
753 		 */
754 		if (!vma->anon_vma || !page__anon_vma ||
755 		    vma->anon_vma->root != page__anon_vma->root)
756 			return -EFAULT;
757 	} else if (!vma->vm_file) {
758 		return -EFAULT;
759 	} else if (vma->vm_file->f_mapping != folio->mapping) {
760 		return -EFAULT;
761 	}
762 
763 	return vma_address(page, vma);
764 }
765 
766 /*
767  * Returns the actual pmd_t* where we expect 'address' to be mapped from, or
768  * NULL if it doesn't exist.  No guarantees / checks on what the pmd_t*
769  * represents.
770  */
771 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
772 {
773 	pgd_t *pgd;
774 	p4d_t *p4d;
775 	pud_t *pud;
776 	pmd_t *pmd = NULL;
777 
778 	pgd = pgd_offset(mm, address);
779 	if (!pgd_present(*pgd))
780 		goto out;
781 
782 	p4d = p4d_offset(pgd, address);
783 	if (!p4d_present(*p4d))
784 		goto out;
785 
786 	pud = pud_offset(p4d, address);
787 	if (!pud_present(*pud))
788 		goto out;
789 
790 	pmd = pmd_offset(pud, address);
791 out:
792 	return pmd;
793 }
794 
795 struct folio_referenced_arg {
796 	int mapcount;
797 	int referenced;
798 	unsigned long vm_flags;
799 	struct mem_cgroup *memcg;
800 };
801 /*
802  * arg: folio_referenced_arg will be passed
803  */
804 static bool folio_referenced_one(struct folio *folio,
805 		struct vm_area_struct *vma, unsigned long address, void *arg)
806 {
807 	struct folio_referenced_arg *pra = arg;
808 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
809 	int referenced = 0;
810 
811 	while (page_vma_mapped_walk(&pvmw)) {
812 		address = pvmw.address;
813 
814 		if ((vma->vm_flags & VM_LOCKED) &&
815 		    (!folio_test_large(folio) || !pvmw.pte)) {
816 			/* Restore the mlock which got missed */
817 			mlock_vma_folio(folio, vma, !pvmw.pte);
818 			page_vma_mapped_walk_done(&pvmw);
819 			pra->vm_flags |= VM_LOCKED;
820 			return false; /* To break the loop */
821 		}
822 
823 		if (pvmw.pte) {
824 			if (lru_gen_enabled() &&
825 			    pte_young(ptep_get(pvmw.pte))) {
826 				lru_gen_look_around(&pvmw);
827 				referenced++;
828 			}
829 
830 			if (ptep_clear_flush_young_notify(vma, address,
831 						pvmw.pte))
832 				referenced++;
833 		} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
834 			if (pmdp_clear_flush_young_notify(vma, address,
835 						pvmw.pmd))
836 				referenced++;
837 		} else {
838 			/* unexpected pmd-mapped folio? */
839 			WARN_ON_ONCE(1);
840 		}
841 
842 		pra->mapcount--;
843 	}
844 
845 	if (referenced)
846 		folio_clear_idle(folio);
847 	if (folio_test_clear_young(folio))
848 		referenced++;
849 
850 	if (referenced) {
851 		pra->referenced++;
852 		pra->vm_flags |= vma->vm_flags & ~VM_LOCKED;
853 	}
854 
855 	if (!pra->mapcount)
856 		return false; /* To break the loop */
857 
858 	return true;
859 }
860 
861 static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg)
862 {
863 	struct folio_referenced_arg *pra = arg;
864 	struct mem_cgroup *memcg = pra->memcg;
865 
866 	/*
867 	 * Ignore references from this mapping if it has no recency. If the
868 	 * folio has been used in another mapping, we will catch it; if this
869 	 * other mapping is already gone, the unmap path will have set the
870 	 * referenced flag or activated the folio in zap_pte_range().
871 	 */
872 	if (!vma_has_recency(vma))
873 		return true;
874 
875 	/*
876 	 * If we are reclaiming on behalf of a cgroup, skip counting on behalf
877 	 * of references from different cgroups.
878 	 */
879 	if (memcg && !mm_match_cgroup(vma->vm_mm, memcg))
880 		return true;
881 
882 	return false;
883 }
884 
885 /**
886  * folio_referenced() - Test if the folio was referenced.
887  * @folio: The folio to test.
888  * @is_locked: Caller holds lock on the folio.
889  * @memcg: target memory cgroup
890  * @vm_flags: A combination of all the vma->vm_flags which referenced the folio.
891  *
892  * Quick test_and_clear_referenced for all mappings of a folio,
893  *
894  * Return: The number of mappings which referenced the folio. Return -1 if
895  * the function bailed out due to rmap lock contention.
896  */
897 int folio_referenced(struct folio *folio, int is_locked,
898 		     struct mem_cgroup *memcg, unsigned long *vm_flags)
899 {
900 	int we_locked = 0;
901 	struct folio_referenced_arg pra = {
902 		.mapcount = folio_mapcount(folio),
903 		.memcg = memcg,
904 	};
905 	struct rmap_walk_control rwc = {
906 		.rmap_one = folio_referenced_one,
907 		.arg = (void *)&pra,
908 		.anon_lock = folio_lock_anon_vma_read,
909 		.try_lock = true,
910 		.invalid_vma = invalid_folio_referenced_vma,
911 	};
912 
913 	*vm_flags = 0;
914 	if (!pra.mapcount)
915 		return 0;
916 
917 	if (!folio_raw_mapping(folio))
918 		return 0;
919 
920 	if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) {
921 		we_locked = folio_trylock(folio);
922 		if (!we_locked)
923 			return 1;
924 	}
925 
926 	rmap_walk(folio, &rwc);
927 	*vm_flags = pra.vm_flags;
928 
929 	if (we_locked)
930 		folio_unlock(folio);
931 
932 	return rwc.contended ? -1 : pra.referenced;
933 }
934 
935 static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw)
936 {
937 	int cleaned = 0;
938 	struct vm_area_struct *vma = pvmw->vma;
939 	struct mmu_notifier_range range;
940 	unsigned long address = pvmw->address;
941 
942 	/*
943 	 * We have to assume the worse case ie pmd for invalidation. Note that
944 	 * the folio can not be freed from this function.
945 	 */
946 	mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0,
947 				vma->vm_mm, address, vma_address_end(pvmw));
948 	mmu_notifier_invalidate_range_start(&range);
949 
950 	while (page_vma_mapped_walk(pvmw)) {
951 		int ret = 0;
952 
953 		address = pvmw->address;
954 		if (pvmw->pte) {
955 			pte_t *pte = pvmw->pte;
956 			pte_t entry = ptep_get(pte);
957 
958 			if (!pte_dirty(entry) && !pte_write(entry))
959 				continue;
960 
961 			flush_cache_page(vma, address, pte_pfn(entry));
962 			entry = ptep_clear_flush(vma, address, pte);
963 			entry = pte_wrprotect(entry);
964 			entry = pte_mkclean(entry);
965 			set_pte_at(vma->vm_mm, address, pte, entry);
966 			ret = 1;
967 		} else {
968 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
969 			pmd_t *pmd = pvmw->pmd;
970 			pmd_t entry;
971 
972 			if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
973 				continue;
974 
975 			flush_cache_range(vma, address,
976 					  address + HPAGE_PMD_SIZE);
977 			entry = pmdp_invalidate(vma, address, pmd);
978 			entry = pmd_wrprotect(entry);
979 			entry = pmd_mkclean(entry);
980 			set_pmd_at(vma->vm_mm, address, pmd, entry);
981 			ret = 1;
982 #else
983 			/* unexpected pmd-mapped folio? */
984 			WARN_ON_ONCE(1);
985 #endif
986 		}
987 
988 		if (ret)
989 			cleaned++;
990 	}
991 
992 	mmu_notifier_invalidate_range_end(&range);
993 
994 	return cleaned;
995 }
996 
997 static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma,
998 			     unsigned long address, void *arg)
999 {
1000 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC);
1001 	int *cleaned = arg;
1002 
1003 	*cleaned += page_vma_mkclean_one(&pvmw);
1004 
1005 	return true;
1006 }
1007 
1008 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
1009 {
1010 	if (vma->vm_flags & VM_SHARED)
1011 		return false;
1012 
1013 	return true;
1014 }
1015 
1016 int folio_mkclean(struct folio *folio)
1017 {
1018 	int cleaned = 0;
1019 	struct address_space *mapping;
1020 	struct rmap_walk_control rwc = {
1021 		.arg = (void *)&cleaned,
1022 		.rmap_one = page_mkclean_one,
1023 		.invalid_vma = invalid_mkclean_vma,
1024 	};
1025 
1026 	BUG_ON(!folio_test_locked(folio));
1027 
1028 	if (!folio_mapped(folio))
1029 		return 0;
1030 
1031 	mapping = folio_mapping(folio);
1032 	if (!mapping)
1033 		return 0;
1034 
1035 	rmap_walk(folio, &rwc);
1036 
1037 	return cleaned;
1038 }
1039 EXPORT_SYMBOL_GPL(folio_mkclean);
1040 
1041 /**
1042  * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of
1043  *                     [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff)
1044  *                     within the @vma of shared mappings. And since clean PTEs
1045  *                     should also be readonly, write protects them too.
1046  * @pfn: start pfn.
1047  * @nr_pages: number of physically contiguous pages srarting with @pfn.
1048  * @pgoff: page offset that the @pfn mapped with.
1049  * @vma: vma that @pfn mapped within.
1050  *
1051  * Returns the number of cleaned PTEs (including PMDs).
1052  */
1053 int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
1054 		      struct vm_area_struct *vma)
1055 {
1056 	struct page_vma_mapped_walk pvmw = {
1057 		.pfn		= pfn,
1058 		.nr_pages	= nr_pages,
1059 		.pgoff		= pgoff,
1060 		.vma		= vma,
1061 		.flags		= PVMW_SYNC,
1062 	};
1063 
1064 	if (invalid_mkclean_vma(vma, NULL))
1065 		return 0;
1066 
1067 	pvmw.address = vma_pgoff_address(pgoff, nr_pages, vma);
1068 	VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma);
1069 
1070 	return page_vma_mkclean_one(&pvmw);
1071 }
1072 
1073 int folio_total_mapcount(struct folio *folio)
1074 {
1075 	int mapcount = folio_entire_mapcount(folio);
1076 	int nr_pages;
1077 	int i;
1078 
1079 	/* In the common case, avoid the loop when no pages mapped by PTE */
1080 	if (folio_nr_pages_mapped(folio) == 0)
1081 		return mapcount;
1082 	/*
1083 	 * Add all the PTE mappings of those pages mapped by PTE.
1084 	 * Limit the loop to folio_nr_pages_mapped()?
1085 	 * Perhaps: given all the raciness, that may be a good or a bad idea.
1086 	 */
1087 	nr_pages = folio_nr_pages(folio);
1088 	for (i = 0; i < nr_pages; i++)
1089 		mapcount += atomic_read(&folio_page(folio, i)->_mapcount);
1090 
1091 	/* But each of those _mapcounts was based on -1 */
1092 	mapcount += nr_pages;
1093 	return mapcount;
1094 }
1095 
1096 /**
1097  * page_move_anon_rmap - move a page to our anon_vma
1098  * @page:	the page to move to our anon_vma
1099  * @vma:	the vma the page belongs to
1100  *
1101  * When a page belongs exclusively to one process after a COW event,
1102  * that page can be moved into the anon_vma that belongs to just that
1103  * process, so the rmap code will not search the parent or sibling
1104  * processes.
1105  */
1106 void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
1107 {
1108 	void *anon_vma = vma->anon_vma;
1109 	struct folio *folio = page_folio(page);
1110 
1111 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1112 	VM_BUG_ON_VMA(!anon_vma, vma);
1113 
1114 	anon_vma += PAGE_MAPPING_ANON;
1115 	/*
1116 	 * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written
1117 	 * simultaneously, so a concurrent reader (eg folio_referenced()'s
1118 	 * folio_test_anon()) will not see one without the other.
1119 	 */
1120 	WRITE_ONCE(folio->mapping, anon_vma);
1121 	SetPageAnonExclusive(page);
1122 }
1123 
1124 /**
1125  * __page_set_anon_rmap - set up new anonymous rmap
1126  * @folio:	Folio which contains page.
1127  * @page:	Page to add to rmap.
1128  * @vma:	VM area to add page to.
1129  * @address:	User virtual address of the mapping
1130  * @exclusive:	the page is exclusively owned by the current process
1131  */
1132 static void __page_set_anon_rmap(struct folio *folio, struct page *page,
1133 	struct vm_area_struct *vma, unsigned long address, int exclusive)
1134 {
1135 	struct anon_vma *anon_vma = vma->anon_vma;
1136 
1137 	BUG_ON(!anon_vma);
1138 
1139 	if (folio_test_anon(folio))
1140 		goto out;
1141 
1142 	/*
1143 	 * If the page isn't exclusively mapped into this vma,
1144 	 * we must use the _oldest_ possible anon_vma for the
1145 	 * page mapping!
1146 	 */
1147 	if (!exclusive)
1148 		anon_vma = anon_vma->root;
1149 
1150 	/*
1151 	 * page_idle does a lockless/optimistic rmap scan on folio->mapping.
1152 	 * Make sure the compiler doesn't split the stores of anon_vma and
1153 	 * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code
1154 	 * could mistake the mapping for a struct address_space and crash.
1155 	 */
1156 	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1157 	WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma);
1158 	folio->index = linear_page_index(vma, address);
1159 out:
1160 	if (exclusive)
1161 		SetPageAnonExclusive(page);
1162 }
1163 
1164 /**
1165  * __page_check_anon_rmap - sanity check anonymous rmap addition
1166  * @folio:	The folio containing @page.
1167  * @page:	the page to check the mapping of
1168  * @vma:	the vm area in which the mapping is added
1169  * @address:	the user virtual address mapped
1170  */
1171 static void __page_check_anon_rmap(struct folio *folio, struct page *page,
1172 	struct vm_area_struct *vma, unsigned long address)
1173 {
1174 	/*
1175 	 * The page's anon-rmap details (mapping and index) are guaranteed to
1176 	 * be set up correctly at this point.
1177 	 *
1178 	 * We have exclusion against page_add_anon_rmap because the caller
1179 	 * always holds the page locked.
1180 	 *
1181 	 * We have exclusion against page_add_new_anon_rmap because those pages
1182 	 * are initially only visible via the pagetables, and the pte is locked
1183 	 * over the call to page_add_new_anon_rmap.
1184 	 */
1185 	VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root,
1186 			folio);
1187 	VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address),
1188 		       page);
1189 }
1190 
1191 /**
1192  * page_add_anon_rmap - add pte mapping to an anonymous page
1193  * @page:	the page to add the mapping to
1194  * @vma:	the vm area in which the mapping is added
1195  * @address:	the user virtual address mapped
1196  * @flags:	the rmap flags
1197  *
1198  * The caller needs to hold the pte lock, and the page must be locked in
1199  * the anon_vma case: to serialize mapping,index checking after setting,
1200  * and to ensure that PageAnon is not being upgraded racily to PageKsm
1201  * (but PageKsm is never downgraded to PageAnon).
1202  */
1203 void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
1204 		unsigned long address, rmap_t flags)
1205 {
1206 	struct folio *folio = page_folio(page);
1207 	atomic_t *mapped = &folio->_nr_pages_mapped;
1208 	int nr = 0, nr_pmdmapped = 0;
1209 	bool compound = flags & RMAP_COMPOUND;
1210 	bool first = true;
1211 
1212 	/* Is page being mapped by PTE? Is this its first map to be added? */
1213 	if (likely(!compound)) {
1214 		first = atomic_inc_and_test(&page->_mapcount);
1215 		nr = first;
1216 		if (first && folio_test_large(folio)) {
1217 			nr = atomic_inc_return_relaxed(mapped);
1218 			nr = (nr < COMPOUND_MAPPED);
1219 		}
1220 	} else if (folio_test_pmd_mappable(folio)) {
1221 		/* That test is redundant: it's for safety or to optimize out */
1222 
1223 		first = atomic_inc_and_test(&folio->_entire_mapcount);
1224 		if (first) {
1225 			nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
1226 			if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
1227 				nr_pmdmapped = folio_nr_pages(folio);
1228 				nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
1229 				/* Raced ahead of a remove and another add? */
1230 				if (unlikely(nr < 0))
1231 					nr = 0;
1232 			} else {
1233 				/* Raced ahead of a remove of COMPOUND_MAPPED */
1234 				nr = 0;
1235 			}
1236 		}
1237 	}
1238 
1239 	VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page);
1240 	VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page);
1241 
1242 	if (nr_pmdmapped)
1243 		__lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped);
1244 	if (nr)
1245 		__lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
1246 
1247 	if (likely(!folio_test_ksm(folio))) {
1248 		/* address might be in next vma when migration races vma_merge */
1249 		if (first)
1250 			__page_set_anon_rmap(folio, page, vma, address,
1251 					     !!(flags & RMAP_EXCLUSIVE));
1252 		else
1253 			__page_check_anon_rmap(folio, page, vma, address);
1254 	}
1255 
1256 	mlock_vma_folio(folio, vma, compound);
1257 }
1258 
1259 /**
1260  * folio_add_new_anon_rmap - Add mapping to a new anonymous folio.
1261  * @folio:	The folio to add the mapping to.
1262  * @vma:	the vm area in which the mapping is added
1263  * @address:	the user virtual address mapped
1264  *
1265  * Like page_add_anon_rmap() but must only be called on *new* folios.
1266  * This means the inc-and-test can be bypassed.
1267  * The folio does not have to be locked.
1268  *
1269  * If the folio is large, it is accounted as a THP.  As the folio
1270  * is new, it's assumed to be mapped exclusively by a single process.
1271  */
1272 void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
1273 		unsigned long address)
1274 {
1275 	int nr;
1276 
1277 	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
1278 	__folio_set_swapbacked(folio);
1279 
1280 	if (likely(!folio_test_pmd_mappable(folio))) {
1281 		/* increment count (starts at -1) */
1282 		atomic_set(&folio->_mapcount, 0);
1283 		nr = 1;
1284 	} else {
1285 		/* increment count (starts at -1) */
1286 		atomic_set(&folio->_entire_mapcount, 0);
1287 		atomic_set(&folio->_nr_pages_mapped, COMPOUND_MAPPED);
1288 		nr = folio_nr_pages(folio);
1289 		__lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr);
1290 	}
1291 
1292 	__lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
1293 	__page_set_anon_rmap(folio, &folio->page, vma, address, 1);
1294 }
1295 
1296 /**
1297  * folio_add_file_rmap_range - add pte mapping to page range of a folio
1298  * @folio:	The folio to add the mapping to
1299  * @page:	The first page to add
1300  * @nr_pages:	The number of pages which will be mapped
1301  * @vma:	the vm area in which the mapping is added
1302  * @compound:	charge the page as compound or small page
1303  *
1304  * The page range of folio is defined by [first_page, first_page + nr_pages)
1305  *
1306  * The caller needs to hold the pte lock.
1307  */
1308 void folio_add_file_rmap_range(struct folio *folio, struct page *page,
1309 			unsigned int nr_pages, struct vm_area_struct *vma,
1310 			bool compound)
1311 {
1312 	atomic_t *mapped = &folio->_nr_pages_mapped;
1313 	unsigned int nr_pmdmapped = 0, first;
1314 	int nr = 0;
1315 
1316 	VM_WARN_ON_FOLIO(compound && !folio_test_pmd_mappable(folio), folio);
1317 
1318 	/* Is page being mapped by PTE? Is this its first map to be added? */
1319 	if (likely(!compound)) {
1320 		do {
1321 			first = atomic_inc_and_test(&page->_mapcount);
1322 			if (first && folio_test_large(folio)) {
1323 				first = atomic_inc_return_relaxed(mapped);
1324 				first = (first < COMPOUND_MAPPED);
1325 			}
1326 
1327 			if (first)
1328 				nr++;
1329 		} while (page++, --nr_pages > 0);
1330 	} else if (folio_test_pmd_mappable(folio)) {
1331 		/* That test is redundant: it's for safety or to optimize out */
1332 
1333 		first = atomic_inc_and_test(&folio->_entire_mapcount);
1334 		if (first) {
1335 			nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
1336 			if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
1337 				nr_pmdmapped = folio_nr_pages(folio);
1338 				nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
1339 				/* Raced ahead of a remove and another add? */
1340 				if (unlikely(nr < 0))
1341 					nr = 0;
1342 			} else {
1343 				/* Raced ahead of a remove of COMPOUND_MAPPED */
1344 				nr = 0;
1345 			}
1346 		}
1347 	}
1348 
1349 	if (nr_pmdmapped)
1350 		__lruvec_stat_mod_folio(folio, folio_test_swapbacked(folio) ?
1351 			NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped);
1352 	if (nr)
1353 		__lruvec_stat_mod_folio(folio, NR_FILE_MAPPED, nr);
1354 
1355 	mlock_vma_folio(folio, vma, compound);
1356 }
1357 
1358 /**
1359  * page_add_file_rmap - add pte mapping to a file page
1360  * @page:	the page to add the mapping to
1361  * @vma:	the vm area in which the mapping is added
1362  * @compound:	charge the page as compound or small page
1363  *
1364  * The caller needs to hold the pte lock.
1365  */
1366 void page_add_file_rmap(struct page *page, struct vm_area_struct *vma,
1367 		bool compound)
1368 {
1369 	struct folio *folio = page_folio(page);
1370 	unsigned int nr_pages;
1371 
1372 	VM_WARN_ON_ONCE_PAGE(compound && !PageTransHuge(page), page);
1373 
1374 	if (likely(!compound))
1375 		nr_pages = 1;
1376 	else
1377 		nr_pages = folio_nr_pages(folio);
1378 
1379 	folio_add_file_rmap_range(folio, page, nr_pages, vma, compound);
1380 }
1381 
1382 /**
1383  * page_remove_rmap - take down pte mapping from a page
1384  * @page:	page to remove mapping from
1385  * @vma:	the vm area from which the mapping is removed
1386  * @compound:	uncharge the page as compound or small page
1387  *
1388  * The caller needs to hold the pte lock.
1389  */
1390 void page_remove_rmap(struct page *page, struct vm_area_struct *vma,
1391 		bool compound)
1392 {
1393 	struct folio *folio = page_folio(page);
1394 	atomic_t *mapped = &folio->_nr_pages_mapped;
1395 	int nr = 0, nr_pmdmapped = 0;
1396 	bool last;
1397 	enum node_stat_item idx;
1398 
1399 	VM_BUG_ON_PAGE(compound && !PageHead(page), page);
1400 
1401 	/* Hugetlb pages are not counted in NR_*MAPPED */
1402 	if (unlikely(folio_test_hugetlb(folio))) {
1403 		/* hugetlb pages are always mapped with pmds */
1404 		atomic_dec(&folio->_entire_mapcount);
1405 		return;
1406 	}
1407 
1408 	/* Is page being unmapped by PTE? Is this its last map to be removed? */
1409 	if (likely(!compound)) {
1410 		last = atomic_add_negative(-1, &page->_mapcount);
1411 		nr = last;
1412 		if (last && folio_test_large(folio)) {
1413 			nr = atomic_dec_return_relaxed(mapped);
1414 			nr = (nr < COMPOUND_MAPPED);
1415 		}
1416 	} else if (folio_test_pmd_mappable(folio)) {
1417 		/* That test is redundant: it's for safety or to optimize out */
1418 
1419 		last = atomic_add_negative(-1, &folio->_entire_mapcount);
1420 		if (last) {
1421 			nr = atomic_sub_return_relaxed(COMPOUND_MAPPED, mapped);
1422 			if (likely(nr < COMPOUND_MAPPED)) {
1423 				nr_pmdmapped = folio_nr_pages(folio);
1424 				nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
1425 				/* Raced ahead of another remove and an add? */
1426 				if (unlikely(nr < 0))
1427 					nr = 0;
1428 			} else {
1429 				/* An add of COMPOUND_MAPPED raced ahead */
1430 				nr = 0;
1431 			}
1432 		}
1433 	}
1434 
1435 	if (nr_pmdmapped) {
1436 		if (folio_test_anon(folio))
1437 			idx = NR_ANON_THPS;
1438 		else if (folio_test_swapbacked(folio))
1439 			idx = NR_SHMEM_PMDMAPPED;
1440 		else
1441 			idx = NR_FILE_PMDMAPPED;
1442 		__lruvec_stat_mod_folio(folio, idx, -nr_pmdmapped);
1443 	}
1444 	if (nr) {
1445 		idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED;
1446 		__lruvec_stat_mod_folio(folio, idx, -nr);
1447 
1448 		/*
1449 		 * Queue anon THP for deferred split if at least one
1450 		 * page of the folio is unmapped and at least one page
1451 		 * is still mapped.
1452 		 */
1453 		if (folio_test_pmd_mappable(folio) && folio_test_anon(folio))
1454 			if (!compound || nr < nr_pmdmapped)
1455 				deferred_split_folio(folio);
1456 	}
1457 
1458 	/*
1459 	 * It would be tidy to reset folio_test_anon mapping when fully
1460 	 * unmapped, but that might overwrite a racing page_add_anon_rmap
1461 	 * which increments mapcount after us but sets mapping before us:
1462 	 * so leave the reset to free_pages_prepare, and remember that
1463 	 * it's only reliable while mapped.
1464 	 */
1465 
1466 	munlock_vma_folio(folio, vma, compound);
1467 }
1468 
1469 /*
1470  * @arg: enum ttu_flags will be passed to this argument
1471  */
1472 static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
1473 		     unsigned long address, void *arg)
1474 {
1475 	struct mm_struct *mm = vma->vm_mm;
1476 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
1477 	pte_t pteval;
1478 	struct page *subpage;
1479 	bool anon_exclusive, ret = true;
1480 	struct mmu_notifier_range range;
1481 	enum ttu_flags flags = (enum ttu_flags)(long)arg;
1482 	unsigned long pfn;
1483 	unsigned long hsz = 0;
1484 
1485 	/*
1486 	 * When racing against e.g. zap_pte_range() on another cpu,
1487 	 * in between its ptep_get_and_clear_full() and page_remove_rmap(),
1488 	 * try_to_unmap() may return before page_mapped() has become false,
1489 	 * if page table locking is skipped: use TTU_SYNC to wait for that.
1490 	 */
1491 	if (flags & TTU_SYNC)
1492 		pvmw.flags = PVMW_SYNC;
1493 
1494 	if (flags & TTU_SPLIT_HUGE_PMD)
1495 		split_huge_pmd_address(vma, address, false, folio);
1496 
1497 	/*
1498 	 * For THP, we have to assume the worse case ie pmd for invalidation.
1499 	 * For hugetlb, it could be much worse if we need to do pud
1500 	 * invalidation in the case of pmd sharing.
1501 	 *
1502 	 * Note that the folio can not be freed in this function as call of
1503 	 * try_to_unmap() must hold a reference on the folio.
1504 	 */
1505 	range.end = vma_address_end(&pvmw);
1506 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
1507 				address, range.end);
1508 	if (folio_test_hugetlb(folio)) {
1509 		/*
1510 		 * If sharing is possible, start and end will be adjusted
1511 		 * accordingly.
1512 		 */
1513 		adjust_range_if_pmd_sharing_possible(vma, &range.start,
1514 						     &range.end);
1515 
1516 		/* We need the huge page size for set_huge_pte_at() */
1517 		hsz = huge_page_size(hstate_vma(vma));
1518 	}
1519 	mmu_notifier_invalidate_range_start(&range);
1520 
1521 	while (page_vma_mapped_walk(&pvmw)) {
1522 		/* Unexpected PMD-mapped THP? */
1523 		VM_BUG_ON_FOLIO(!pvmw.pte, folio);
1524 
1525 		/*
1526 		 * If the folio is in an mlock()d vma, we must not swap it out.
1527 		 */
1528 		if (!(flags & TTU_IGNORE_MLOCK) &&
1529 		    (vma->vm_flags & VM_LOCKED)) {
1530 			/* Restore the mlock which got missed */
1531 			mlock_vma_folio(folio, vma, false);
1532 			page_vma_mapped_walk_done(&pvmw);
1533 			ret = false;
1534 			break;
1535 		}
1536 
1537 		pfn = pte_pfn(ptep_get(pvmw.pte));
1538 		subpage = folio_page(folio, pfn - folio_pfn(folio));
1539 		address = pvmw.address;
1540 		anon_exclusive = folio_test_anon(folio) &&
1541 				 PageAnonExclusive(subpage);
1542 
1543 		if (folio_test_hugetlb(folio)) {
1544 			bool anon = folio_test_anon(folio);
1545 
1546 			/*
1547 			 * The try_to_unmap() is only passed a hugetlb page
1548 			 * in the case where the hugetlb page is poisoned.
1549 			 */
1550 			VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage);
1551 			/*
1552 			 * huge_pmd_unshare may unmap an entire PMD page.
1553 			 * There is no way of knowing exactly which PMDs may
1554 			 * be cached for this mm, so we must flush them all.
1555 			 * start/end were already adjusted above to cover this
1556 			 * range.
1557 			 */
1558 			flush_cache_range(vma, range.start, range.end);
1559 
1560 			/*
1561 			 * To call huge_pmd_unshare, i_mmap_rwsem must be
1562 			 * held in write mode.  Caller needs to explicitly
1563 			 * do this outside rmap routines.
1564 			 *
1565 			 * We also must hold hugetlb vma_lock in write mode.
1566 			 * Lock order dictates acquiring vma_lock BEFORE
1567 			 * i_mmap_rwsem.  We can only try lock here and fail
1568 			 * if unsuccessful.
1569 			 */
1570 			if (!anon) {
1571 				VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
1572 				if (!hugetlb_vma_trylock_write(vma)) {
1573 					page_vma_mapped_walk_done(&pvmw);
1574 					ret = false;
1575 					break;
1576 				}
1577 				if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
1578 					hugetlb_vma_unlock_write(vma);
1579 					flush_tlb_range(vma,
1580 						range.start, range.end);
1581 					/*
1582 					 * The ref count of the PMD page was
1583 					 * dropped which is part of the way map
1584 					 * counting is done for shared PMDs.
1585 					 * Return 'true' here.  When there is
1586 					 * no other sharing, huge_pmd_unshare
1587 					 * returns false and we will unmap the
1588 					 * actual page and drop map count
1589 					 * to zero.
1590 					 */
1591 					page_vma_mapped_walk_done(&pvmw);
1592 					break;
1593 				}
1594 				hugetlb_vma_unlock_write(vma);
1595 			}
1596 			pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
1597 		} else {
1598 			flush_cache_page(vma, address, pfn);
1599 			/* Nuke the page table entry. */
1600 			if (should_defer_flush(mm, flags)) {
1601 				/*
1602 				 * We clear the PTE but do not flush so potentially
1603 				 * a remote CPU could still be writing to the folio.
1604 				 * If the entry was previously clean then the
1605 				 * architecture must guarantee that a clear->dirty
1606 				 * transition on a cached TLB entry is written through
1607 				 * and traps if the PTE is unmapped.
1608 				 */
1609 				pteval = ptep_get_and_clear(mm, address, pvmw.pte);
1610 
1611 				set_tlb_ubc_flush_pending(mm, pteval, address);
1612 			} else {
1613 				pteval = ptep_clear_flush(vma, address, pvmw.pte);
1614 			}
1615 		}
1616 
1617 		/*
1618 		 * Now the pte is cleared. If this pte was uffd-wp armed,
1619 		 * we may want to replace a none pte with a marker pte if
1620 		 * it's file-backed, so we don't lose the tracking info.
1621 		 */
1622 		pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval);
1623 
1624 		/* Set the dirty flag on the folio now the pte is gone. */
1625 		if (pte_dirty(pteval))
1626 			folio_mark_dirty(folio);
1627 
1628 		/* Update high watermark before we lower rss */
1629 		update_hiwater_rss(mm);
1630 
1631 		if (PageHWPoison(subpage) && (flags & TTU_HWPOISON)) {
1632 			pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
1633 			if (folio_test_hugetlb(folio)) {
1634 				hugetlb_count_sub(folio_nr_pages(folio), mm);
1635 				set_huge_pte_at(mm, address, pvmw.pte, pteval,
1636 						hsz);
1637 			} else {
1638 				dec_mm_counter(mm, mm_counter(&folio->page));
1639 				set_pte_at(mm, address, pvmw.pte, pteval);
1640 			}
1641 
1642 		} else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
1643 			/*
1644 			 * The guest indicated that the page content is of no
1645 			 * interest anymore. Simply discard the pte, vmscan
1646 			 * will take care of the rest.
1647 			 * A future reference will then fault in a new zero
1648 			 * page. When userfaultfd is active, we must not drop
1649 			 * this page though, as its main user (postcopy
1650 			 * migration) will not expect userfaults on already
1651 			 * copied pages.
1652 			 */
1653 			dec_mm_counter(mm, mm_counter(&folio->page));
1654 		} else if (folio_test_anon(folio)) {
1655 			swp_entry_t entry = page_swap_entry(subpage);
1656 			pte_t swp_pte;
1657 			/*
1658 			 * Store the swap location in the pte.
1659 			 * See handle_pte_fault() ...
1660 			 */
1661 			if (unlikely(folio_test_swapbacked(folio) !=
1662 					folio_test_swapcache(folio))) {
1663 				WARN_ON_ONCE(1);
1664 				ret = false;
1665 				page_vma_mapped_walk_done(&pvmw);
1666 				break;
1667 			}
1668 
1669 			/* MADV_FREE page check */
1670 			if (!folio_test_swapbacked(folio)) {
1671 				int ref_count, map_count;
1672 
1673 				/*
1674 				 * Synchronize with gup_pte_range():
1675 				 * - clear PTE; barrier; read refcount
1676 				 * - inc refcount; barrier; read PTE
1677 				 */
1678 				smp_mb();
1679 
1680 				ref_count = folio_ref_count(folio);
1681 				map_count = folio_mapcount(folio);
1682 
1683 				/*
1684 				 * Order reads for page refcount and dirty flag
1685 				 * (see comments in __remove_mapping()).
1686 				 */
1687 				smp_rmb();
1688 
1689 				/*
1690 				 * The only page refs must be one from isolation
1691 				 * plus the rmap(s) (dropped by discard:).
1692 				 */
1693 				if (ref_count == 1 + map_count &&
1694 				    !folio_test_dirty(folio)) {
1695 					dec_mm_counter(mm, MM_ANONPAGES);
1696 					goto discard;
1697 				}
1698 
1699 				/*
1700 				 * If the folio was redirtied, it cannot be
1701 				 * discarded. Remap the page to page table.
1702 				 */
1703 				set_pte_at(mm, address, pvmw.pte, pteval);
1704 				folio_set_swapbacked(folio);
1705 				ret = false;
1706 				page_vma_mapped_walk_done(&pvmw);
1707 				break;
1708 			}
1709 
1710 			if (swap_duplicate(entry) < 0) {
1711 				set_pte_at(mm, address, pvmw.pte, pteval);
1712 				ret = false;
1713 				page_vma_mapped_walk_done(&pvmw);
1714 				break;
1715 			}
1716 			if (arch_unmap_one(mm, vma, address, pteval) < 0) {
1717 				swap_free(entry);
1718 				set_pte_at(mm, address, pvmw.pte, pteval);
1719 				ret = false;
1720 				page_vma_mapped_walk_done(&pvmw);
1721 				break;
1722 			}
1723 
1724 			/* See page_try_share_anon_rmap(): clear PTE first. */
1725 			if (anon_exclusive &&
1726 			    page_try_share_anon_rmap(subpage)) {
1727 				swap_free(entry);
1728 				set_pte_at(mm, address, pvmw.pte, pteval);
1729 				ret = false;
1730 				page_vma_mapped_walk_done(&pvmw);
1731 				break;
1732 			}
1733 			if (list_empty(&mm->mmlist)) {
1734 				spin_lock(&mmlist_lock);
1735 				if (list_empty(&mm->mmlist))
1736 					list_add(&mm->mmlist, &init_mm.mmlist);
1737 				spin_unlock(&mmlist_lock);
1738 			}
1739 			dec_mm_counter(mm, MM_ANONPAGES);
1740 			inc_mm_counter(mm, MM_SWAPENTS);
1741 			swp_pte = swp_entry_to_pte(entry);
1742 			if (anon_exclusive)
1743 				swp_pte = pte_swp_mkexclusive(swp_pte);
1744 			if (pte_soft_dirty(pteval))
1745 				swp_pte = pte_swp_mksoft_dirty(swp_pte);
1746 			if (pte_uffd_wp(pteval))
1747 				swp_pte = pte_swp_mkuffd_wp(swp_pte);
1748 			set_pte_at(mm, address, pvmw.pte, swp_pte);
1749 		} else {
1750 			/*
1751 			 * This is a locked file-backed folio,
1752 			 * so it cannot be removed from the page
1753 			 * cache and replaced by a new folio before
1754 			 * mmu_notifier_invalidate_range_end, so no
1755 			 * concurrent thread might update its page table
1756 			 * to point at a new folio while a device is
1757 			 * still using this folio.
1758 			 *
1759 			 * See Documentation/mm/mmu_notifier.rst
1760 			 */
1761 			dec_mm_counter(mm, mm_counter_file(&folio->page));
1762 		}
1763 discard:
1764 		page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
1765 		if (vma->vm_flags & VM_LOCKED)
1766 			mlock_drain_local();
1767 		folio_put(folio);
1768 	}
1769 
1770 	mmu_notifier_invalidate_range_end(&range);
1771 
1772 	return ret;
1773 }
1774 
1775 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
1776 {
1777 	return vma_is_temporary_stack(vma);
1778 }
1779 
1780 static int folio_not_mapped(struct folio *folio)
1781 {
1782 	return !folio_mapped(folio);
1783 }
1784 
1785 /**
1786  * try_to_unmap - Try to remove all page table mappings to a folio.
1787  * @folio: The folio to unmap.
1788  * @flags: action and flags
1789  *
1790  * Tries to remove all the page table entries which are mapping this
1791  * folio.  It is the caller's responsibility to check if the folio is
1792  * still mapped if needed (use TTU_SYNC to prevent accounting races).
1793  *
1794  * Context: Caller must hold the folio lock.
1795  */
1796 void try_to_unmap(struct folio *folio, enum ttu_flags flags)
1797 {
1798 	struct rmap_walk_control rwc = {
1799 		.rmap_one = try_to_unmap_one,
1800 		.arg = (void *)flags,
1801 		.done = folio_not_mapped,
1802 		.anon_lock = folio_lock_anon_vma_read,
1803 	};
1804 
1805 	if (flags & TTU_RMAP_LOCKED)
1806 		rmap_walk_locked(folio, &rwc);
1807 	else
1808 		rmap_walk(folio, &rwc);
1809 }
1810 
1811 /*
1812  * @arg: enum ttu_flags will be passed to this argument.
1813  *
1814  * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs
1815  * containing migration entries.
1816  */
1817 static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
1818 		     unsigned long address, void *arg)
1819 {
1820 	struct mm_struct *mm = vma->vm_mm;
1821 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
1822 	pte_t pteval;
1823 	struct page *subpage;
1824 	bool anon_exclusive, ret = true;
1825 	struct mmu_notifier_range range;
1826 	enum ttu_flags flags = (enum ttu_flags)(long)arg;
1827 	unsigned long pfn;
1828 	unsigned long hsz = 0;
1829 
1830 	/*
1831 	 * When racing against e.g. zap_pte_range() on another cpu,
1832 	 * in between its ptep_get_and_clear_full() and page_remove_rmap(),
1833 	 * try_to_migrate() may return before page_mapped() has become false,
1834 	 * if page table locking is skipped: use TTU_SYNC to wait for that.
1835 	 */
1836 	if (flags & TTU_SYNC)
1837 		pvmw.flags = PVMW_SYNC;
1838 
1839 	/*
1840 	 * unmap_page() in mm/huge_memory.c is the only user of migration with
1841 	 * TTU_SPLIT_HUGE_PMD and it wants to freeze.
1842 	 */
1843 	if (flags & TTU_SPLIT_HUGE_PMD)
1844 		split_huge_pmd_address(vma, address, true, folio);
1845 
1846 	/*
1847 	 * For THP, we have to assume the worse case ie pmd for invalidation.
1848 	 * For hugetlb, it could be much worse if we need to do pud
1849 	 * invalidation in the case of pmd sharing.
1850 	 *
1851 	 * Note that the page can not be free in this function as call of
1852 	 * try_to_unmap() must hold a reference on the page.
1853 	 */
1854 	range.end = vma_address_end(&pvmw);
1855 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
1856 				address, range.end);
1857 	if (folio_test_hugetlb(folio)) {
1858 		/*
1859 		 * If sharing is possible, start and end will be adjusted
1860 		 * accordingly.
1861 		 */
1862 		adjust_range_if_pmd_sharing_possible(vma, &range.start,
1863 						     &range.end);
1864 
1865 		/* We need the huge page size for set_huge_pte_at() */
1866 		hsz = huge_page_size(hstate_vma(vma));
1867 	}
1868 	mmu_notifier_invalidate_range_start(&range);
1869 
1870 	while (page_vma_mapped_walk(&pvmw)) {
1871 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1872 		/* PMD-mapped THP migration entry */
1873 		if (!pvmw.pte) {
1874 			subpage = folio_page(folio,
1875 				pmd_pfn(*pvmw.pmd) - folio_pfn(folio));
1876 			VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
1877 					!folio_test_pmd_mappable(folio), folio);
1878 
1879 			if (set_pmd_migration_entry(&pvmw, subpage)) {
1880 				ret = false;
1881 				page_vma_mapped_walk_done(&pvmw);
1882 				break;
1883 			}
1884 			continue;
1885 		}
1886 #endif
1887 
1888 		/* Unexpected PMD-mapped THP? */
1889 		VM_BUG_ON_FOLIO(!pvmw.pte, folio);
1890 
1891 		pfn = pte_pfn(ptep_get(pvmw.pte));
1892 
1893 		if (folio_is_zone_device(folio)) {
1894 			/*
1895 			 * Our PTE is a non-present device exclusive entry and
1896 			 * calculating the subpage as for the common case would
1897 			 * result in an invalid pointer.
1898 			 *
1899 			 * Since only PAGE_SIZE pages can currently be
1900 			 * migrated, just set it to page. This will need to be
1901 			 * changed when hugepage migrations to device private
1902 			 * memory are supported.
1903 			 */
1904 			VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio);
1905 			subpage = &folio->page;
1906 		} else {
1907 			subpage = folio_page(folio, pfn - folio_pfn(folio));
1908 		}
1909 		address = pvmw.address;
1910 		anon_exclusive = folio_test_anon(folio) &&
1911 				 PageAnonExclusive(subpage);
1912 
1913 		if (folio_test_hugetlb(folio)) {
1914 			bool anon = folio_test_anon(folio);
1915 
1916 			/*
1917 			 * huge_pmd_unshare may unmap an entire PMD page.
1918 			 * There is no way of knowing exactly which PMDs may
1919 			 * be cached for this mm, so we must flush them all.
1920 			 * start/end were already adjusted above to cover this
1921 			 * range.
1922 			 */
1923 			flush_cache_range(vma, range.start, range.end);
1924 
1925 			/*
1926 			 * To call huge_pmd_unshare, i_mmap_rwsem must be
1927 			 * held in write mode.  Caller needs to explicitly
1928 			 * do this outside rmap routines.
1929 			 *
1930 			 * We also must hold hugetlb vma_lock in write mode.
1931 			 * Lock order dictates acquiring vma_lock BEFORE
1932 			 * i_mmap_rwsem.  We can only try lock here and
1933 			 * fail if unsuccessful.
1934 			 */
1935 			if (!anon) {
1936 				VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
1937 				if (!hugetlb_vma_trylock_write(vma)) {
1938 					page_vma_mapped_walk_done(&pvmw);
1939 					ret = false;
1940 					break;
1941 				}
1942 				if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
1943 					hugetlb_vma_unlock_write(vma);
1944 					flush_tlb_range(vma,
1945 						range.start, range.end);
1946 
1947 					/*
1948 					 * The ref count of the PMD page was
1949 					 * dropped which is part of the way map
1950 					 * counting is done for shared PMDs.
1951 					 * Return 'true' here.  When there is
1952 					 * no other sharing, huge_pmd_unshare
1953 					 * returns false and we will unmap the
1954 					 * actual page and drop map count
1955 					 * to zero.
1956 					 */
1957 					page_vma_mapped_walk_done(&pvmw);
1958 					break;
1959 				}
1960 				hugetlb_vma_unlock_write(vma);
1961 			}
1962 			/* Nuke the hugetlb page table entry */
1963 			pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
1964 		} else {
1965 			flush_cache_page(vma, address, pfn);
1966 			/* Nuke the page table entry. */
1967 			if (should_defer_flush(mm, flags)) {
1968 				/*
1969 				 * We clear the PTE but do not flush so potentially
1970 				 * a remote CPU could still be writing to the folio.
1971 				 * If the entry was previously clean then the
1972 				 * architecture must guarantee that a clear->dirty
1973 				 * transition on a cached TLB entry is written through
1974 				 * and traps if the PTE is unmapped.
1975 				 */
1976 				pteval = ptep_get_and_clear(mm, address, pvmw.pte);
1977 
1978 				set_tlb_ubc_flush_pending(mm, pteval, address);
1979 			} else {
1980 				pteval = ptep_clear_flush(vma, address, pvmw.pte);
1981 			}
1982 		}
1983 
1984 		/* Set the dirty flag on the folio now the pte is gone. */
1985 		if (pte_dirty(pteval))
1986 			folio_mark_dirty(folio);
1987 
1988 		/* Update high watermark before we lower rss */
1989 		update_hiwater_rss(mm);
1990 
1991 		if (folio_is_device_private(folio)) {
1992 			unsigned long pfn = folio_pfn(folio);
1993 			swp_entry_t entry;
1994 			pte_t swp_pte;
1995 
1996 			if (anon_exclusive)
1997 				BUG_ON(page_try_share_anon_rmap(subpage));
1998 
1999 			/*
2000 			 * Store the pfn of the page in a special migration
2001 			 * pte. do_swap_page() will wait until the migration
2002 			 * pte is removed and then restart fault handling.
2003 			 */
2004 			entry = pte_to_swp_entry(pteval);
2005 			if (is_writable_device_private_entry(entry))
2006 				entry = make_writable_migration_entry(pfn);
2007 			else if (anon_exclusive)
2008 				entry = make_readable_exclusive_migration_entry(pfn);
2009 			else
2010 				entry = make_readable_migration_entry(pfn);
2011 			swp_pte = swp_entry_to_pte(entry);
2012 
2013 			/*
2014 			 * pteval maps a zone device page and is therefore
2015 			 * a swap pte.
2016 			 */
2017 			if (pte_swp_soft_dirty(pteval))
2018 				swp_pte = pte_swp_mksoft_dirty(swp_pte);
2019 			if (pte_swp_uffd_wp(pteval))
2020 				swp_pte = pte_swp_mkuffd_wp(swp_pte);
2021 			set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
2022 			trace_set_migration_pte(pvmw.address, pte_val(swp_pte),
2023 						compound_order(&folio->page));
2024 			/*
2025 			 * No need to invalidate here it will synchronize on
2026 			 * against the special swap migration pte.
2027 			 */
2028 		} else if (PageHWPoison(subpage)) {
2029 			pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
2030 			if (folio_test_hugetlb(folio)) {
2031 				hugetlb_count_sub(folio_nr_pages(folio), mm);
2032 				set_huge_pte_at(mm, address, pvmw.pte, pteval,
2033 						hsz);
2034 			} else {
2035 				dec_mm_counter(mm, mm_counter(&folio->page));
2036 				set_pte_at(mm, address, pvmw.pte, pteval);
2037 			}
2038 
2039 		} else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
2040 			/*
2041 			 * The guest indicated that the page content is of no
2042 			 * interest anymore. Simply discard the pte, vmscan
2043 			 * will take care of the rest.
2044 			 * A future reference will then fault in a new zero
2045 			 * page. When userfaultfd is active, we must not drop
2046 			 * this page though, as its main user (postcopy
2047 			 * migration) will not expect userfaults on already
2048 			 * copied pages.
2049 			 */
2050 			dec_mm_counter(mm, mm_counter(&folio->page));
2051 		} else {
2052 			swp_entry_t entry;
2053 			pte_t swp_pte;
2054 
2055 			if (arch_unmap_one(mm, vma, address, pteval) < 0) {
2056 				if (folio_test_hugetlb(folio))
2057 					set_huge_pte_at(mm, address, pvmw.pte,
2058 							pteval, hsz);
2059 				else
2060 					set_pte_at(mm, address, pvmw.pte, pteval);
2061 				ret = false;
2062 				page_vma_mapped_walk_done(&pvmw);
2063 				break;
2064 			}
2065 			VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) &&
2066 				       !anon_exclusive, subpage);
2067 
2068 			/* See page_try_share_anon_rmap(): clear PTE first. */
2069 			if (anon_exclusive &&
2070 			    page_try_share_anon_rmap(subpage)) {
2071 				if (folio_test_hugetlb(folio))
2072 					set_huge_pte_at(mm, address, pvmw.pte,
2073 							pteval, hsz);
2074 				else
2075 					set_pte_at(mm, address, pvmw.pte, pteval);
2076 				ret = false;
2077 				page_vma_mapped_walk_done(&pvmw);
2078 				break;
2079 			}
2080 
2081 			/*
2082 			 * Store the pfn of the page in a special migration
2083 			 * pte. do_swap_page() will wait until the migration
2084 			 * pte is removed and then restart fault handling.
2085 			 */
2086 			if (pte_write(pteval))
2087 				entry = make_writable_migration_entry(
2088 							page_to_pfn(subpage));
2089 			else if (anon_exclusive)
2090 				entry = make_readable_exclusive_migration_entry(
2091 							page_to_pfn(subpage));
2092 			else
2093 				entry = make_readable_migration_entry(
2094 							page_to_pfn(subpage));
2095 			if (pte_young(pteval))
2096 				entry = make_migration_entry_young(entry);
2097 			if (pte_dirty(pteval))
2098 				entry = make_migration_entry_dirty(entry);
2099 			swp_pte = swp_entry_to_pte(entry);
2100 			if (pte_soft_dirty(pteval))
2101 				swp_pte = pte_swp_mksoft_dirty(swp_pte);
2102 			if (pte_uffd_wp(pteval))
2103 				swp_pte = pte_swp_mkuffd_wp(swp_pte);
2104 			if (folio_test_hugetlb(folio))
2105 				set_huge_pte_at(mm, address, pvmw.pte, swp_pte,
2106 						hsz);
2107 			else
2108 				set_pte_at(mm, address, pvmw.pte, swp_pte);
2109 			trace_set_migration_pte(address, pte_val(swp_pte),
2110 						compound_order(&folio->page));
2111 			/*
2112 			 * No need to invalidate here it will synchronize on
2113 			 * against the special swap migration pte.
2114 			 */
2115 		}
2116 
2117 		page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
2118 		if (vma->vm_flags & VM_LOCKED)
2119 			mlock_drain_local();
2120 		folio_put(folio);
2121 	}
2122 
2123 	mmu_notifier_invalidate_range_end(&range);
2124 
2125 	return ret;
2126 }
2127 
2128 /**
2129  * try_to_migrate - try to replace all page table mappings with swap entries
2130  * @folio: the folio to replace page table entries for
2131  * @flags: action and flags
2132  *
2133  * Tries to remove all the page table entries which are mapping this folio and
2134  * replace them with special swap entries. Caller must hold the folio lock.
2135  */
2136 void try_to_migrate(struct folio *folio, enum ttu_flags flags)
2137 {
2138 	struct rmap_walk_control rwc = {
2139 		.rmap_one = try_to_migrate_one,
2140 		.arg = (void *)flags,
2141 		.done = folio_not_mapped,
2142 		.anon_lock = folio_lock_anon_vma_read,
2143 	};
2144 
2145 	/*
2146 	 * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and
2147 	 * TTU_SPLIT_HUGE_PMD, TTU_SYNC, and TTU_BATCH_FLUSH flags.
2148 	 */
2149 	if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
2150 					TTU_SYNC | TTU_BATCH_FLUSH)))
2151 		return;
2152 
2153 	if (folio_is_zone_device(folio) &&
2154 	    (!folio_is_device_private(folio) && !folio_is_device_coherent(folio)))
2155 		return;
2156 
2157 	/*
2158 	 * During exec, a temporary VMA is setup and later moved.
2159 	 * The VMA is moved under the anon_vma lock but not the
2160 	 * page tables leading to a race where migration cannot
2161 	 * find the migration ptes. Rather than increasing the
2162 	 * locking requirements of exec(), migration skips
2163 	 * temporary VMAs until after exec() completes.
2164 	 */
2165 	if (!folio_test_ksm(folio) && folio_test_anon(folio))
2166 		rwc.invalid_vma = invalid_migration_vma;
2167 
2168 	if (flags & TTU_RMAP_LOCKED)
2169 		rmap_walk_locked(folio, &rwc);
2170 	else
2171 		rmap_walk(folio, &rwc);
2172 }
2173 
2174 #ifdef CONFIG_DEVICE_PRIVATE
2175 struct make_exclusive_args {
2176 	struct mm_struct *mm;
2177 	unsigned long address;
2178 	void *owner;
2179 	bool valid;
2180 };
2181 
2182 static bool page_make_device_exclusive_one(struct folio *folio,
2183 		struct vm_area_struct *vma, unsigned long address, void *priv)
2184 {
2185 	struct mm_struct *mm = vma->vm_mm;
2186 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
2187 	struct make_exclusive_args *args = priv;
2188 	pte_t pteval;
2189 	struct page *subpage;
2190 	bool ret = true;
2191 	struct mmu_notifier_range range;
2192 	swp_entry_t entry;
2193 	pte_t swp_pte;
2194 	pte_t ptent;
2195 
2196 	mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0,
2197 				      vma->vm_mm, address, min(vma->vm_end,
2198 				      address + folio_size(folio)),
2199 				      args->owner);
2200 	mmu_notifier_invalidate_range_start(&range);
2201 
2202 	while (page_vma_mapped_walk(&pvmw)) {
2203 		/* Unexpected PMD-mapped THP? */
2204 		VM_BUG_ON_FOLIO(!pvmw.pte, folio);
2205 
2206 		ptent = ptep_get(pvmw.pte);
2207 		if (!pte_present(ptent)) {
2208 			ret = false;
2209 			page_vma_mapped_walk_done(&pvmw);
2210 			break;
2211 		}
2212 
2213 		subpage = folio_page(folio,
2214 				pte_pfn(ptent) - folio_pfn(folio));
2215 		address = pvmw.address;
2216 
2217 		/* Nuke the page table entry. */
2218 		flush_cache_page(vma, address, pte_pfn(ptent));
2219 		pteval = ptep_clear_flush(vma, address, pvmw.pte);
2220 
2221 		/* Set the dirty flag on the folio now the pte is gone. */
2222 		if (pte_dirty(pteval))
2223 			folio_mark_dirty(folio);
2224 
2225 		/*
2226 		 * Check that our target page is still mapped at the expected
2227 		 * address.
2228 		 */
2229 		if (args->mm == mm && args->address == address &&
2230 		    pte_write(pteval))
2231 			args->valid = true;
2232 
2233 		/*
2234 		 * Store the pfn of the page in a special migration
2235 		 * pte. do_swap_page() will wait until the migration
2236 		 * pte is removed and then restart fault handling.
2237 		 */
2238 		if (pte_write(pteval))
2239 			entry = make_writable_device_exclusive_entry(
2240 							page_to_pfn(subpage));
2241 		else
2242 			entry = make_readable_device_exclusive_entry(
2243 							page_to_pfn(subpage));
2244 		swp_pte = swp_entry_to_pte(entry);
2245 		if (pte_soft_dirty(pteval))
2246 			swp_pte = pte_swp_mksoft_dirty(swp_pte);
2247 		if (pte_uffd_wp(pteval))
2248 			swp_pte = pte_swp_mkuffd_wp(swp_pte);
2249 
2250 		set_pte_at(mm, address, pvmw.pte, swp_pte);
2251 
2252 		/*
2253 		 * There is a reference on the page for the swap entry which has
2254 		 * been removed, so shouldn't take another.
2255 		 */
2256 		page_remove_rmap(subpage, vma, false);
2257 	}
2258 
2259 	mmu_notifier_invalidate_range_end(&range);
2260 
2261 	return ret;
2262 }
2263 
2264 /**
2265  * folio_make_device_exclusive - Mark the folio exclusively owned by a device.
2266  * @folio: The folio to replace page table entries for.
2267  * @mm: The mm_struct where the folio is expected to be mapped.
2268  * @address: Address where the folio is expected to be mapped.
2269  * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks
2270  *
2271  * Tries to remove all the page table entries which are mapping this
2272  * folio and replace them with special device exclusive swap entries to
2273  * grant a device exclusive access to the folio.
2274  *
2275  * Context: Caller must hold the folio lock.
2276  * Return: false if the page is still mapped, or if it could not be unmapped
2277  * from the expected address. Otherwise returns true (success).
2278  */
2279 static bool folio_make_device_exclusive(struct folio *folio,
2280 		struct mm_struct *mm, unsigned long address, void *owner)
2281 {
2282 	struct make_exclusive_args args = {
2283 		.mm = mm,
2284 		.address = address,
2285 		.owner = owner,
2286 		.valid = false,
2287 	};
2288 	struct rmap_walk_control rwc = {
2289 		.rmap_one = page_make_device_exclusive_one,
2290 		.done = folio_not_mapped,
2291 		.anon_lock = folio_lock_anon_vma_read,
2292 		.arg = &args,
2293 	};
2294 
2295 	/*
2296 	 * Restrict to anonymous folios for now to avoid potential writeback
2297 	 * issues.
2298 	 */
2299 	if (!folio_test_anon(folio))
2300 		return false;
2301 
2302 	rmap_walk(folio, &rwc);
2303 
2304 	return args.valid && !folio_mapcount(folio);
2305 }
2306 
2307 /**
2308  * make_device_exclusive_range() - Mark a range for exclusive use by a device
2309  * @mm: mm_struct of associated target process
2310  * @start: start of the region to mark for exclusive device access
2311  * @end: end address of region
2312  * @pages: returns the pages which were successfully marked for exclusive access
2313  * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering
2314  *
2315  * Returns: number of pages found in the range by GUP. A page is marked for
2316  * exclusive access only if the page pointer is non-NULL.
2317  *
2318  * This function finds ptes mapping page(s) to the given address range, locks
2319  * them and replaces mappings with special swap entries preventing userspace CPU
2320  * access. On fault these entries are replaced with the original mapping after
2321  * calling MMU notifiers.
2322  *
2323  * A driver using this to program access from a device must use a mmu notifier
2324  * critical section to hold a device specific lock during programming. Once
2325  * programming is complete it should drop the page lock and reference after
2326  * which point CPU access to the page will revoke the exclusive access.
2327  */
2328 int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
2329 				unsigned long end, struct page **pages,
2330 				void *owner)
2331 {
2332 	long npages = (end - start) >> PAGE_SHIFT;
2333 	long i;
2334 
2335 	npages = get_user_pages_remote(mm, start, npages,
2336 				       FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD,
2337 				       pages, NULL);
2338 	if (npages < 0)
2339 		return npages;
2340 
2341 	for (i = 0; i < npages; i++, start += PAGE_SIZE) {
2342 		struct folio *folio = page_folio(pages[i]);
2343 		if (PageTail(pages[i]) || !folio_trylock(folio)) {
2344 			folio_put(folio);
2345 			pages[i] = NULL;
2346 			continue;
2347 		}
2348 
2349 		if (!folio_make_device_exclusive(folio, mm, start, owner)) {
2350 			folio_unlock(folio);
2351 			folio_put(folio);
2352 			pages[i] = NULL;
2353 		}
2354 	}
2355 
2356 	return npages;
2357 }
2358 EXPORT_SYMBOL_GPL(make_device_exclusive_range);
2359 #endif
2360 
2361 void __put_anon_vma(struct anon_vma *anon_vma)
2362 {
2363 	struct anon_vma *root = anon_vma->root;
2364 
2365 	anon_vma_free(anon_vma);
2366 	if (root != anon_vma && atomic_dec_and_test(&root->refcount))
2367 		anon_vma_free(root);
2368 }
2369 
2370 static struct anon_vma *rmap_walk_anon_lock(struct folio *folio,
2371 					    struct rmap_walk_control *rwc)
2372 {
2373 	struct anon_vma *anon_vma;
2374 
2375 	if (rwc->anon_lock)
2376 		return rwc->anon_lock(folio, rwc);
2377 
2378 	/*
2379 	 * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read()
2380 	 * because that depends on page_mapped(); but not all its usages
2381 	 * are holding mmap_lock. Users without mmap_lock are required to
2382 	 * take a reference count to prevent the anon_vma disappearing
2383 	 */
2384 	anon_vma = folio_anon_vma(folio);
2385 	if (!anon_vma)
2386 		return NULL;
2387 
2388 	if (anon_vma_trylock_read(anon_vma))
2389 		goto out;
2390 
2391 	if (rwc->try_lock) {
2392 		anon_vma = NULL;
2393 		rwc->contended = true;
2394 		goto out;
2395 	}
2396 
2397 	anon_vma_lock_read(anon_vma);
2398 out:
2399 	return anon_vma;
2400 }
2401 
2402 /*
2403  * rmap_walk_anon - do something to anonymous page using the object-based
2404  * rmap method
2405  * @folio: the folio to be handled
2406  * @rwc: control variable according to each walk type
2407  * @locked: caller holds relevant rmap lock
2408  *
2409  * Find all the mappings of a folio using the mapping pointer and the vma
2410  * chains contained in the anon_vma struct it points to.
2411  */
2412 static void rmap_walk_anon(struct folio *folio,
2413 		struct rmap_walk_control *rwc, bool locked)
2414 {
2415 	struct anon_vma *anon_vma;
2416 	pgoff_t pgoff_start, pgoff_end;
2417 	struct anon_vma_chain *avc;
2418 
2419 	if (locked) {
2420 		anon_vma = folio_anon_vma(folio);
2421 		/* anon_vma disappear under us? */
2422 		VM_BUG_ON_FOLIO(!anon_vma, folio);
2423 	} else {
2424 		anon_vma = rmap_walk_anon_lock(folio, rwc);
2425 	}
2426 	if (!anon_vma)
2427 		return;
2428 
2429 	pgoff_start = folio_pgoff(folio);
2430 	pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
2431 	anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
2432 			pgoff_start, pgoff_end) {
2433 		struct vm_area_struct *vma = avc->vma;
2434 		unsigned long address = vma_address(&folio->page, vma);
2435 
2436 		VM_BUG_ON_VMA(address == -EFAULT, vma);
2437 		cond_resched();
2438 
2439 		if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
2440 			continue;
2441 
2442 		if (!rwc->rmap_one(folio, vma, address, rwc->arg))
2443 			break;
2444 		if (rwc->done && rwc->done(folio))
2445 			break;
2446 	}
2447 
2448 	if (!locked)
2449 		anon_vma_unlock_read(anon_vma);
2450 }
2451 
2452 /*
2453  * rmap_walk_file - do something to file page using the object-based rmap method
2454  * @folio: the folio to be handled
2455  * @rwc: control variable according to each walk type
2456  * @locked: caller holds relevant rmap lock
2457  *
2458  * Find all the mappings of a folio using the mapping pointer and the vma chains
2459  * contained in the address_space struct it points to.
2460  */
2461 static void rmap_walk_file(struct folio *folio,
2462 		struct rmap_walk_control *rwc, bool locked)
2463 {
2464 	struct address_space *mapping = folio_mapping(folio);
2465 	pgoff_t pgoff_start, pgoff_end;
2466 	struct vm_area_struct *vma;
2467 
2468 	/*
2469 	 * The page lock not only makes sure that page->mapping cannot
2470 	 * suddenly be NULLified by truncation, it makes sure that the
2471 	 * structure at mapping cannot be freed and reused yet,
2472 	 * so we can safely take mapping->i_mmap_rwsem.
2473 	 */
2474 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
2475 
2476 	if (!mapping)
2477 		return;
2478 
2479 	pgoff_start = folio_pgoff(folio);
2480 	pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
2481 	if (!locked) {
2482 		if (i_mmap_trylock_read(mapping))
2483 			goto lookup;
2484 
2485 		if (rwc->try_lock) {
2486 			rwc->contended = true;
2487 			return;
2488 		}
2489 
2490 		i_mmap_lock_read(mapping);
2491 	}
2492 lookup:
2493 	vma_interval_tree_foreach(vma, &mapping->i_mmap,
2494 			pgoff_start, pgoff_end) {
2495 		unsigned long address = vma_address(&folio->page, vma);
2496 
2497 		VM_BUG_ON_VMA(address == -EFAULT, vma);
2498 		cond_resched();
2499 
2500 		if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
2501 			continue;
2502 
2503 		if (!rwc->rmap_one(folio, vma, address, rwc->arg))
2504 			goto done;
2505 		if (rwc->done && rwc->done(folio))
2506 			goto done;
2507 	}
2508 
2509 done:
2510 	if (!locked)
2511 		i_mmap_unlock_read(mapping);
2512 }
2513 
2514 void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc)
2515 {
2516 	if (unlikely(folio_test_ksm(folio)))
2517 		rmap_walk_ksm(folio, rwc);
2518 	else if (folio_test_anon(folio))
2519 		rmap_walk_anon(folio, rwc, false);
2520 	else
2521 		rmap_walk_file(folio, rwc, false);
2522 }
2523 
2524 /* Like rmap_walk, but caller holds relevant rmap lock */
2525 void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc)
2526 {
2527 	/* no ksm support for now */
2528 	VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio);
2529 	if (folio_test_anon(folio))
2530 		rmap_walk_anon(folio, rwc, true);
2531 	else
2532 		rmap_walk_file(folio, rwc, true);
2533 }
2534 
2535 #ifdef CONFIG_HUGETLB_PAGE
2536 /*
2537  * The following two functions are for anonymous (private mapped) hugepages.
2538  * Unlike common anonymous pages, anonymous hugepages have no accounting code
2539  * and no lru code, because we handle hugepages differently from common pages.
2540  *
2541  * RMAP_COMPOUND is ignored.
2542  */
2543 void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
2544 			    unsigned long address, rmap_t flags)
2545 {
2546 	struct folio *folio = page_folio(page);
2547 	struct anon_vma *anon_vma = vma->anon_vma;
2548 	int first;
2549 
2550 	BUG_ON(!folio_test_locked(folio));
2551 	BUG_ON(!anon_vma);
2552 	/* address might be in next vma when migration races vma_merge */
2553 	first = atomic_inc_and_test(&folio->_entire_mapcount);
2554 	VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page);
2555 	VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page);
2556 	if (first)
2557 		__page_set_anon_rmap(folio, page, vma, address,
2558 				     !!(flags & RMAP_EXCLUSIVE));
2559 }
2560 
2561 void hugepage_add_new_anon_rmap(struct folio *folio,
2562 			struct vm_area_struct *vma, unsigned long address)
2563 {
2564 	BUG_ON(address < vma->vm_start || address >= vma->vm_end);
2565 	/* increment count (starts at -1) */
2566 	atomic_set(&folio->_entire_mapcount, 0);
2567 	folio_clear_hugetlb_restore_reserve(folio);
2568 	__page_set_anon_rmap(folio, &folio->page, vma, address, 1);
2569 }
2570 #endif /* CONFIG_HUGETLB_PAGE */
2571