xref: /openbmc/linux/mm/rmap.c (revision 54a611b6)
1 /*
2  * mm/rmap.c - physical to virtual reverse mappings
3  *
4  * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5  * Released under the General Public License (GPL).
6  *
7  * Simple, low overhead reverse mapping scheme.
8  * Please try to keep this thing as modular as possible.
9  *
10  * Provides methods for unmapping each kind of mapped page:
11  * the anon methods track anonymous pages, and
12  * the file methods track pages belonging to an inode.
13  *
14  * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15  * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16  * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17  * Contributions by Hugh Dickins 2003, 2004
18  */
19 
20 /*
21  * Lock ordering in mm:
22  *
23  * inode->i_rwsem	(while writing or truncating, not reading or faulting)
24  *   mm->mmap_lock
25  *     mapping->invalidate_lock (in filemap_fault)
26  *       page->flags PG_locked (lock_page)   * (see hugetlbfs below)
27  *         hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
28  *           mapping->i_mmap_rwsem
29  *             hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
30  *             anon_vma->rwsem
31  *               mm->page_table_lock or pte_lock
32  *                 swap_lock (in swap_duplicate, swap_info_get)
33  *                   mmlist_lock (in mmput, drain_mmlist and others)
34  *                   mapping->private_lock (in block_dirty_folio)
35  *                     folio_lock_memcg move_lock (in block_dirty_folio)
36  *                       i_pages lock (widely used)
37  *                         lruvec->lru_lock (in folio_lruvec_lock_irq)
38  *                   inode->i_lock (in set_page_dirty's __mark_inode_dirty)
39  *                   bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
40  *                     sb_lock (within inode_lock in fs/fs-writeback.c)
41  *                     i_pages lock (widely used, in set_page_dirty,
42  *                               in arch-dependent flush_dcache_mmap_lock,
43  *                               within bdi.wb->list_lock in __sync_single_inode)
44  *
45  * anon_vma->rwsem,mapping->i_mmap_rwsem   (memory_failure, collect_procs_anon)
46  *   ->tasklist_lock
47  *     pte map lock
48  *
49  * * hugetlbfs PageHuge() pages take locks in this order:
50  *         mapping->i_mmap_rwsem
51  *           hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
52  *             page->flags PG_locked (lock_page)
53  */
54 
55 #include <linux/mm.h>
56 #include <linux/sched/mm.h>
57 #include <linux/sched/task.h>
58 #include <linux/pagemap.h>
59 #include <linux/swap.h>
60 #include <linux/swapops.h>
61 #include <linux/slab.h>
62 #include <linux/init.h>
63 #include <linux/ksm.h>
64 #include <linux/rmap.h>
65 #include <linux/rcupdate.h>
66 #include <linux/export.h>
67 #include <linux/memcontrol.h>
68 #include <linux/mmu_notifier.h>
69 #include <linux/migrate.h>
70 #include <linux/hugetlb.h>
71 #include <linux/huge_mm.h>
72 #include <linux/backing-dev.h>
73 #include <linux/page_idle.h>
74 #include <linux/memremap.h>
75 #include <linux/userfaultfd_k.h>
76 #include <linux/mm_inline.h>
77 
78 #include <asm/tlbflush.h>
79 
80 #define CREATE_TRACE_POINTS
81 #include <trace/events/tlb.h>
82 #include <trace/events/migrate.h>
83 
84 #include "internal.h"
85 
86 static struct kmem_cache *anon_vma_cachep;
87 static struct kmem_cache *anon_vma_chain_cachep;
88 
89 static inline struct anon_vma *anon_vma_alloc(void)
90 {
91 	struct anon_vma *anon_vma;
92 
93 	anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
94 	if (anon_vma) {
95 		atomic_set(&anon_vma->refcount, 1);
96 		anon_vma->degree = 1;	/* Reference for first vma */
97 		anon_vma->parent = anon_vma;
98 		/*
99 		 * Initialise the anon_vma root to point to itself. If called
100 		 * from fork, the root will be reset to the parents anon_vma.
101 		 */
102 		anon_vma->root = anon_vma;
103 	}
104 
105 	return anon_vma;
106 }
107 
108 static inline void anon_vma_free(struct anon_vma *anon_vma)
109 {
110 	VM_BUG_ON(atomic_read(&anon_vma->refcount));
111 
112 	/*
113 	 * Synchronize against folio_lock_anon_vma_read() such that
114 	 * we can safely hold the lock without the anon_vma getting
115 	 * freed.
116 	 *
117 	 * Relies on the full mb implied by the atomic_dec_and_test() from
118 	 * put_anon_vma() against the acquire barrier implied by
119 	 * down_read_trylock() from folio_lock_anon_vma_read(). This orders:
120 	 *
121 	 * folio_lock_anon_vma_read()	VS	put_anon_vma()
122 	 *   down_read_trylock()		  atomic_dec_and_test()
123 	 *   LOCK				  MB
124 	 *   atomic_read()			  rwsem_is_locked()
125 	 *
126 	 * LOCK should suffice since the actual taking of the lock must
127 	 * happen _before_ what follows.
128 	 */
129 	might_sleep();
130 	if (rwsem_is_locked(&anon_vma->root->rwsem)) {
131 		anon_vma_lock_write(anon_vma);
132 		anon_vma_unlock_write(anon_vma);
133 	}
134 
135 	kmem_cache_free(anon_vma_cachep, anon_vma);
136 }
137 
138 static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
139 {
140 	return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
141 }
142 
143 static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
144 {
145 	kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
146 }
147 
148 static void anon_vma_chain_link(struct vm_area_struct *vma,
149 				struct anon_vma_chain *avc,
150 				struct anon_vma *anon_vma)
151 {
152 	avc->vma = vma;
153 	avc->anon_vma = anon_vma;
154 	list_add(&avc->same_vma, &vma->anon_vma_chain);
155 	anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
156 }
157 
158 /**
159  * __anon_vma_prepare - attach an anon_vma to a memory region
160  * @vma: the memory region in question
161  *
162  * This makes sure the memory mapping described by 'vma' has
163  * an 'anon_vma' attached to it, so that we can associate the
164  * anonymous pages mapped into it with that anon_vma.
165  *
166  * The common case will be that we already have one, which
167  * is handled inline by anon_vma_prepare(). But if
168  * not we either need to find an adjacent mapping that we
169  * can re-use the anon_vma from (very common when the only
170  * reason for splitting a vma has been mprotect()), or we
171  * allocate a new one.
172  *
173  * Anon-vma allocations are very subtle, because we may have
174  * optimistically looked up an anon_vma in folio_lock_anon_vma_read()
175  * and that may actually touch the rwsem even in the newly
176  * allocated vma (it depends on RCU to make sure that the
177  * anon_vma isn't actually destroyed).
178  *
179  * As a result, we need to do proper anon_vma locking even
180  * for the new allocation. At the same time, we do not want
181  * to do any locking for the common case of already having
182  * an anon_vma.
183  *
184  * This must be called with the mmap_lock held for reading.
185  */
186 int __anon_vma_prepare(struct vm_area_struct *vma)
187 {
188 	struct mm_struct *mm = vma->vm_mm;
189 	struct anon_vma *anon_vma, *allocated;
190 	struct anon_vma_chain *avc;
191 
192 	might_sleep();
193 
194 	avc = anon_vma_chain_alloc(GFP_KERNEL);
195 	if (!avc)
196 		goto out_enomem;
197 
198 	anon_vma = find_mergeable_anon_vma(vma);
199 	allocated = NULL;
200 	if (!anon_vma) {
201 		anon_vma = anon_vma_alloc();
202 		if (unlikely(!anon_vma))
203 			goto out_enomem_free_avc;
204 		allocated = anon_vma;
205 	}
206 
207 	anon_vma_lock_write(anon_vma);
208 	/* page_table_lock to protect against threads */
209 	spin_lock(&mm->page_table_lock);
210 	if (likely(!vma->anon_vma)) {
211 		vma->anon_vma = anon_vma;
212 		anon_vma_chain_link(vma, avc, anon_vma);
213 		/* vma reference or self-parent link for new root */
214 		anon_vma->degree++;
215 		allocated = NULL;
216 		avc = NULL;
217 	}
218 	spin_unlock(&mm->page_table_lock);
219 	anon_vma_unlock_write(anon_vma);
220 
221 	if (unlikely(allocated))
222 		put_anon_vma(allocated);
223 	if (unlikely(avc))
224 		anon_vma_chain_free(avc);
225 
226 	return 0;
227 
228  out_enomem_free_avc:
229 	anon_vma_chain_free(avc);
230  out_enomem:
231 	return -ENOMEM;
232 }
233 
234 /*
235  * This is a useful helper function for locking the anon_vma root as
236  * we traverse the vma->anon_vma_chain, looping over anon_vma's that
237  * have the same vma.
238  *
239  * Such anon_vma's should have the same root, so you'd expect to see
240  * just a single mutex_lock for the whole traversal.
241  */
242 static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
243 {
244 	struct anon_vma *new_root = anon_vma->root;
245 	if (new_root != root) {
246 		if (WARN_ON_ONCE(root))
247 			up_write(&root->rwsem);
248 		root = new_root;
249 		down_write(&root->rwsem);
250 	}
251 	return root;
252 }
253 
254 static inline void unlock_anon_vma_root(struct anon_vma *root)
255 {
256 	if (root)
257 		up_write(&root->rwsem);
258 }
259 
260 /*
261  * Attach the anon_vmas from src to dst.
262  * Returns 0 on success, -ENOMEM on failure.
263  *
264  * anon_vma_clone() is called by __vma_adjust(), __split_vma(), copy_vma() and
265  * anon_vma_fork(). The first three want an exact copy of src, while the last
266  * one, anon_vma_fork(), may try to reuse an existing anon_vma to prevent
267  * endless growth of anon_vma. Since dst->anon_vma is set to NULL before call,
268  * we can identify this case by checking (!dst->anon_vma && src->anon_vma).
269  *
270  * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find
271  * and reuse existing anon_vma which has no vmas and only one child anon_vma.
272  * This prevents degradation of anon_vma hierarchy to endless linear chain in
273  * case of constantly forking task. On the other hand, an anon_vma with more
274  * than one child isn't reused even if there was no alive vma, thus rmap
275  * walker has a good chance of avoiding scanning the whole hierarchy when it
276  * searches where page is mapped.
277  */
278 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
279 {
280 	struct anon_vma_chain *avc, *pavc;
281 	struct anon_vma *root = NULL;
282 
283 	list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
284 		struct anon_vma *anon_vma;
285 
286 		avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
287 		if (unlikely(!avc)) {
288 			unlock_anon_vma_root(root);
289 			root = NULL;
290 			avc = anon_vma_chain_alloc(GFP_KERNEL);
291 			if (!avc)
292 				goto enomem_failure;
293 		}
294 		anon_vma = pavc->anon_vma;
295 		root = lock_anon_vma_root(root, anon_vma);
296 		anon_vma_chain_link(dst, avc, anon_vma);
297 
298 		/*
299 		 * Reuse existing anon_vma if its degree lower than two,
300 		 * that means it has no vma and only one anon_vma child.
301 		 *
302 		 * Do not choose parent anon_vma, otherwise first child
303 		 * will always reuse it. Root anon_vma is never reused:
304 		 * it has self-parent reference and at least one child.
305 		 */
306 		if (!dst->anon_vma && src->anon_vma &&
307 		    anon_vma != src->anon_vma && anon_vma->degree < 2)
308 			dst->anon_vma = anon_vma;
309 	}
310 	if (dst->anon_vma)
311 		dst->anon_vma->degree++;
312 	unlock_anon_vma_root(root);
313 	return 0;
314 
315  enomem_failure:
316 	/*
317 	 * dst->anon_vma is dropped here otherwise its degree can be incorrectly
318 	 * decremented in unlink_anon_vmas().
319 	 * We can safely do this because callers of anon_vma_clone() don't care
320 	 * about dst->anon_vma if anon_vma_clone() failed.
321 	 */
322 	dst->anon_vma = NULL;
323 	unlink_anon_vmas(dst);
324 	return -ENOMEM;
325 }
326 
327 /*
328  * Attach vma to its own anon_vma, as well as to the anon_vmas that
329  * the corresponding VMA in the parent process is attached to.
330  * Returns 0 on success, non-zero on failure.
331  */
332 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
333 {
334 	struct anon_vma_chain *avc;
335 	struct anon_vma *anon_vma;
336 	int error;
337 
338 	/* Don't bother if the parent process has no anon_vma here. */
339 	if (!pvma->anon_vma)
340 		return 0;
341 
342 	/* Drop inherited anon_vma, we'll reuse existing or allocate new. */
343 	vma->anon_vma = NULL;
344 
345 	/*
346 	 * First, attach the new VMA to the parent VMA's anon_vmas,
347 	 * so rmap can find non-COWed pages in child processes.
348 	 */
349 	error = anon_vma_clone(vma, pvma);
350 	if (error)
351 		return error;
352 
353 	/* An existing anon_vma has been reused, all done then. */
354 	if (vma->anon_vma)
355 		return 0;
356 
357 	/* Then add our own anon_vma. */
358 	anon_vma = anon_vma_alloc();
359 	if (!anon_vma)
360 		goto out_error;
361 	avc = anon_vma_chain_alloc(GFP_KERNEL);
362 	if (!avc)
363 		goto out_error_free_anon_vma;
364 
365 	/*
366 	 * The root anon_vma's rwsem is the lock actually used when we
367 	 * lock any of the anon_vmas in this anon_vma tree.
368 	 */
369 	anon_vma->root = pvma->anon_vma->root;
370 	anon_vma->parent = pvma->anon_vma;
371 	/*
372 	 * With refcounts, an anon_vma can stay around longer than the
373 	 * process it belongs to. The root anon_vma needs to be pinned until
374 	 * this anon_vma is freed, because the lock lives in the root.
375 	 */
376 	get_anon_vma(anon_vma->root);
377 	/* Mark this anon_vma as the one where our new (COWed) pages go. */
378 	vma->anon_vma = anon_vma;
379 	anon_vma_lock_write(anon_vma);
380 	anon_vma_chain_link(vma, avc, anon_vma);
381 	anon_vma->parent->degree++;
382 	anon_vma_unlock_write(anon_vma);
383 
384 	return 0;
385 
386  out_error_free_anon_vma:
387 	put_anon_vma(anon_vma);
388  out_error:
389 	unlink_anon_vmas(vma);
390 	return -ENOMEM;
391 }
392 
393 void unlink_anon_vmas(struct vm_area_struct *vma)
394 {
395 	struct anon_vma_chain *avc, *next;
396 	struct anon_vma *root = NULL;
397 
398 	/*
399 	 * Unlink each anon_vma chained to the VMA.  This list is ordered
400 	 * from newest to oldest, ensuring the root anon_vma gets freed last.
401 	 */
402 	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
403 		struct anon_vma *anon_vma = avc->anon_vma;
404 
405 		root = lock_anon_vma_root(root, anon_vma);
406 		anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
407 
408 		/*
409 		 * Leave empty anon_vmas on the list - we'll need
410 		 * to free them outside the lock.
411 		 */
412 		if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
413 			anon_vma->parent->degree--;
414 			continue;
415 		}
416 
417 		list_del(&avc->same_vma);
418 		anon_vma_chain_free(avc);
419 	}
420 	if (vma->anon_vma) {
421 		vma->anon_vma->degree--;
422 
423 		/*
424 		 * vma would still be needed after unlink, and anon_vma will be prepared
425 		 * when handle fault.
426 		 */
427 		vma->anon_vma = NULL;
428 	}
429 	unlock_anon_vma_root(root);
430 
431 	/*
432 	 * Iterate the list once more, it now only contains empty and unlinked
433 	 * anon_vmas, destroy them. Could not do before due to __put_anon_vma()
434 	 * needing to write-acquire the anon_vma->root->rwsem.
435 	 */
436 	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
437 		struct anon_vma *anon_vma = avc->anon_vma;
438 
439 		VM_WARN_ON(anon_vma->degree);
440 		put_anon_vma(anon_vma);
441 
442 		list_del(&avc->same_vma);
443 		anon_vma_chain_free(avc);
444 	}
445 }
446 
447 static void anon_vma_ctor(void *data)
448 {
449 	struct anon_vma *anon_vma = data;
450 
451 	init_rwsem(&anon_vma->rwsem);
452 	atomic_set(&anon_vma->refcount, 0);
453 	anon_vma->rb_root = RB_ROOT_CACHED;
454 }
455 
456 void __init anon_vma_init(void)
457 {
458 	anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
459 			0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
460 			anon_vma_ctor);
461 	anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
462 			SLAB_PANIC|SLAB_ACCOUNT);
463 }
464 
465 /*
466  * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
467  *
468  * Since there is no serialization what so ever against page_remove_rmap()
469  * the best this function can do is return a refcount increased anon_vma
470  * that might have been relevant to this page.
471  *
472  * The page might have been remapped to a different anon_vma or the anon_vma
473  * returned may already be freed (and even reused).
474  *
475  * In case it was remapped to a different anon_vma, the new anon_vma will be a
476  * child of the old anon_vma, and the anon_vma lifetime rules will therefore
477  * ensure that any anon_vma obtained from the page will still be valid for as
478  * long as we observe page_mapped() [ hence all those page_mapped() tests ].
479  *
480  * All users of this function must be very careful when walking the anon_vma
481  * chain and verify that the page in question is indeed mapped in it
482  * [ something equivalent to page_mapped_in_vma() ].
483  *
484  * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from
485  * page_remove_rmap() that the anon_vma pointer from page->mapping is valid
486  * if there is a mapcount, we can dereference the anon_vma after observing
487  * those.
488  */
489 struct anon_vma *page_get_anon_vma(struct page *page)
490 {
491 	struct anon_vma *anon_vma = NULL;
492 	unsigned long anon_mapping;
493 
494 	rcu_read_lock();
495 	anon_mapping = (unsigned long)READ_ONCE(page->mapping);
496 	if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
497 		goto out;
498 	if (!page_mapped(page))
499 		goto out;
500 
501 	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
502 	if (!atomic_inc_not_zero(&anon_vma->refcount)) {
503 		anon_vma = NULL;
504 		goto out;
505 	}
506 
507 	/*
508 	 * If this page is still mapped, then its anon_vma cannot have been
509 	 * freed.  But if it has been unmapped, we have no security against the
510 	 * anon_vma structure being freed and reused (for another anon_vma:
511 	 * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero()
512 	 * above cannot corrupt).
513 	 */
514 	if (!page_mapped(page)) {
515 		rcu_read_unlock();
516 		put_anon_vma(anon_vma);
517 		return NULL;
518 	}
519 out:
520 	rcu_read_unlock();
521 
522 	return anon_vma;
523 }
524 
525 /*
526  * Similar to page_get_anon_vma() except it locks the anon_vma.
527  *
528  * Its a little more complex as it tries to keep the fast path to a single
529  * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
530  * reference like with page_get_anon_vma() and then block on the mutex
531  * on !rwc->try_lock case.
532  */
533 struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
534 					  struct rmap_walk_control *rwc)
535 {
536 	struct anon_vma *anon_vma = NULL;
537 	struct anon_vma *root_anon_vma;
538 	unsigned long anon_mapping;
539 
540 	rcu_read_lock();
541 	anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
542 	if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
543 		goto out;
544 	if (!folio_mapped(folio))
545 		goto out;
546 
547 	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
548 	root_anon_vma = READ_ONCE(anon_vma->root);
549 	if (down_read_trylock(&root_anon_vma->rwsem)) {
550 		/*
551 		 * If the folio is still mapped, then this anon_vma is still
552 		 * its anon_vma, and holding the mutex ensures that it will
553 		 * not go away, see anon_vma_free().
554 		 */
555 		if (!folio_mapped(folio)) {
556 			up_read(&root_anon_vma->rwsem);
557 			anon_vma = NULL;
558 		}
559 		goto out;
560 	}
561 
562 	if (rwc && rwc->try_lock) {
563 		anon_vma = NULL;
564 		rwc->contended = true;
565 		goto out;
566 	}
567 
568 	/* trylock failed, we got to sleep */
569 	if (!atomic_inc_not_zero(&anon_vma->refcount)) {
570 		anon_vma = NULL;
571 		goto out;
572 	}
573 
574 	if (!folio_mapped(folio)) {
575 		rcu_read_unlock();
576 		put_anon_vma(anon_vma);
577 		return NULL;
578 	}
579 
580 	/* we pinned the anon_vma, its safe to sleep */
581 	rcu_read_unlock();
582 	anon_vma_lock_read(anon_vma);
583 
584 	if (atomic_dec_and_test(&anon_vma->refcount)) {
585 		/*
586 		 * Oops, we held the last refcount, release the lock
587 		 * and bail -- can't simply use put_anon_vma() because
588 		 * we'll deadlock on the anon_vma_lock_write() recursion.
589 		 */
590 		anon_vma_unlock_read(anon_vma);
591 		__put_anon_vma(anon_vma);
592 		anon_vma = NULL;
593 	}
594 
595 	return anon_vma;
596 
597 out:
598 	rcu_read_unlock();
599 	return anon_vma;
600 }
601 
602 void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
603 {
604 	anon_vma_unlock_read(anon_vma);
605 }
606 
607 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
608 /*
609  * Flush TLB entries for recently unmapped pages from remote CPUs. It is
610  * important if a PTE was dirty when it was unmapped that it's flushed
611  * before any IO is initiated on the page to prevent lost writes. Similarly,
612  * it must be flushed before freeing to prevent data leakage.
613  */
614 void try_to_unmap_flush(void)
615 {
616 	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
617 
618 	if (!tlb_ubc->flush_required)
619 		return;
620 
621 	arch_tlbbatch_flush(&tlb_ubc->arch);
622 	tlb_ubc->flush_required = false;
623 	tlb_ubc->writable = false;
624 }
625 
626 /* Flush iff there are potentially writable TLB entries that can race with IO */
627 void try_to_unmap_flush_dirty(void)
628 {
629 	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
630 
631 	if (tlb_ubc->writable)
632 		try_to_unmap_flush();
633 }
634 
635 /*
636  * Bits 0-14 of mm->tlb_flush_batched record pending generations.
637  * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations.
638  */
639 #define TLB_FLUSH_BATCH_FLUSHED_SHIFT	16
640 #define TLB_FLUSH_BATCH_PENDING_MASK			\
641 	((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1)
642 #define TLB_FLUSH_BATCH_PENDING_LARGE			\
643 	(TLB_FLUSH_BATCH_PENDING_MASK / 2)
644 
645 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
646 {
647 	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
648 	int batch, nbatch;
649 
650 	arch_tlbbatch_add_mm(&tlb_ubc->arch, mm);
651 	tlb_ubc->flush_required = true;
652 
653 	/*
654 	 * Ensure compiler does not re-order the setting of tlb_flush_batched
655 	 * before the PTE is cleared.
656 	 */
657 	barrier();
658 	batch = atomic_read(&mm->tlb_flush_batched);
659 retry:
660 	if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) {
661 		/*
662 		 * Prevent `pending' from catching up with `flushed' because of
663 		 * overflow.  Reset `pending' and `flushed' to be 1 and 0 if
664 		 * `pending' becomes large.
665 		 */
666 		nbatch = atomic_cmpxchg(&mm->tlb_flush_batched, batch, 1);
667 		if (nbatch != batch) {
668 			batch = nbatch;
669 			goto retry;
670 		}
671 	} else {
672 		atomic_inc(&mm->tlb_flush_batched);
673 	}
674 
675 	/*
676 	 * If the PTE was dirty then it's best to assume it's writable. The
677 	 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
678 	 * before the page is queued for IO.
679 	 */
680 	if (writable)
681 		tlb_ubc->writable = true;
682 }
683 
684 /*
685  * Returns true if the TLB flush should be deferred to the end of a batch of
686  * unmap operations to reduce IPIs.
687  */
688 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
689 {
690 	bool should_defer = false;
691 
692 	if (!(flags & TTU_BATCH_FLUSH))
693 		return false;
694 
695 	/* If remote CPUs need to be flushed then defer batch the flush */
696 	if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids)
697 		should_defer = true;
698 	put_cpu();
699 
700 	return should_defer;
701 }
702 
703 /*
704  * Reclaim unmaps pages under the PTL but do not flush the TLB prior to
705  * releasing the PTL if TLB flushes are batched. It's possible for a parallel
706  * operation such as mprotect or munmap to race between reclaim unmapping
707  * the page and flushing the page. If this race occurs, it potentially allows
708  * access to data via a stale TLB entry. Tracking all mm's that have TLB
709  * batching in flight would be expensive during reclaim so instead track
710  * whether TLB batching occurred in the past and if so then do a flush here
711  * if required. This will cost one additional flush per reclaim cycle paid
712  * by the first operation at risk such as mprotect and mumap.
713  *
714  * This must be called under the PTL so that an access to tlb_flush_batched
715  * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise
716  * via the PTL.
717  */
718 void flush_tlb_batched_pending(struct mm_struct *mm)
719 {
720 	int batch = atomic_read(&mm->tlb_flush_batched);
721 	int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK;
722 	int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT;
723 
724 	if (pending != flushed) {
725 		flush_tlb_mm(mm);
726 		/*
727 		 * If the new TLB flushing is pending during flushing, leave
728 		 * mm->tlb_flush_batched as is, to avoid losing flushing.
729 		 */
730 		atomic_cmpxchg(&mm->tlb_flush_batched, batch,
731 			       pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT));
732 	}
733 }
734 #else
735 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
736 {
737 }
738 
739 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
740 {
741 	return false;
742 }
743 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
744 
745 /*
746  * At what user virtual address is page expected in vma?
747  * Caller should check the page is actually part of the vma.
748  */
749 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
750 {
751 	struct folio *folio = page_folio(page);
752 	if (folio_test_anon(folio)) {
753 		struct anon_vma *page__anon_vma = folio_anon_vma(folio);
754 		/*
755 		 * Note: swapoff's unuse_vma() is more efficient with this
756 		 * check, and needs it to match anon_vma when KSM is active.
757 		 */
758 		if (!vma->anon_vma || !page__anon_vma ||
759 		    vma->anon_vma->root != page__anon_vma->root)
760 			return -EFAULT;
761 	} else if (!vma->vm_file) {
762 		return -EFAULT;
763 	} else if (vma->vm_file->f_mapping != folio->mapping) {
764 		return -EFAULT;
765 	}
766 
767 	return vma_address(page, vma);
768 }
769 
770 /*
771  * Returns the actual pmd_t* where we expect 'address' to be mapped from, or
772  * NULL if it doesn't exist.  No guarantees / checks on what the pmd_t*
773  * represents.
774  */
775 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
776 {
777 	pgd_t *pgd;
778 	p4d_t *p4d;
779 	pud_t *pud;
780 	pmd_t *pmd = NULL;
781 
782 	pgd = pgd_offset(mm, address);
783 	if (!pgd_present(*pgd))
784 		goto out;
785 
786 	p4d = p4d_offset(pgd, address);
787 	if (!p4d_present(*p4d))
788 		goto out;
789 
790 	pud = pud_offset(p4d, address);
791 	if (!pud_present(*pud))
792 		goto out;
793 
794 	pmd = pmd_offset(pud, address);
795 out:
796 	return pmd;
797 }
798 
799 struct folio_referenced_arg {
800 	int mapcount;
801 	int referenced;
802 	unsigned long vm_flags;
803 	struct mem_cgroup *memcg;
804 };
805 /*
806  * arg: folio_referenced_arg will be passed
807  */
808 static bool folio_referenced_one(struct folio *folio,
809 		struct vm_area_struct *vma, unsigned long address, void *arg)
810 {
811 	struct folio_referenced_arg *pra = arg;
812 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
813 	int referenced = 0;
814 
815 	while (page_vma_mapped_walk(&pvmw)) {
816 		address = pvmw.address;
817 
818 		if ((vma->vm_flags & VM_LOCKED) &&
819 		    (!folio_test_large(folio) || !pvmw.pte)) {
820 			/* Restore the mlock which got missed */
821 			mlock_vma_folio(folio, vma, !pvmw.pte);
822 			page_vma_mapped_walk_done(&pvmw);
823 			pra->vm_flags |= VM_LOCKED;
824 			return false; /* To break the loop */
825 		}
826 
827 		if (pvmw.pte) {
828 			if (lru_gen_enabled() && pte_young(*pvmw.pte) &&
829 			    !(vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))) {
830 				lru_gen_look_around(&pvmw);
831 				referenced++;
832 			}
833 
834 			if (ptep_clear_flush_young_notify(vma, address,
835 						pvmw.pte)) {
836 				/*
837 				 * Don't treat a reference through
838 				 * a sequentially read mapping as such.
839 				 * If the folio has been used in another mapping,
840 				 * we will catch it; if this other mapping is
841 				 * already gone, the unmap path will have set
842 				 * the referenced flag or activated the folio.
843 				 */
844 				if (likely(!(vma->vm_flags & VM_SEQ_READ)))
845 					referenced++;
846 			}
847 		} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
848 			if (pmdp_clear_flush_young_notify(vma, address,
849 						pvmw.pmd))
850 				referenced++;
851 		} else {
852 			/* unexpected pmd-mapped folio? */
853 			WARN_ON_ONCE(1);
854 		}
855 
856 		pra->mapcount--;
857 	}
858 
859 	if (referenced)
860 		folio_clear_idle(folio);
861 	if (folio_test_clear_young(folio))
862 		referenced++;
863 
864 	if (referenced) {
865 		pra->referenced++;
866 		pra->vm_flags |= vma->vm_flags & ~VM_LOCKED;
867 	}
868 
869 	if (!pra->mapcount)
870 		return false; /* To break the loop */
871 
872 	return true;
873 }
874 
875 static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg)
876 {
877 	struct folio_referenced_arg *pra = arg;
878 	struct mem_cgroup *memcg = pra->memcg;
879 
880 	if (!mm_match_cgroup(vma->vm_mm, memcg))
881 		return true;
882 
883 	return false;
884 }
885 
886 /**
887  * folio_referenced() - Test if the folio was referenced.
888  * @folio: The folio to test.
889  * @is_locked: Caller holds lock on the folio.
890  * @memcg: target memory cgroup
891  * @vm_flags: A combination of all the vma->vm_flags which referenced the folio.
892  *
893  * Quick test_and_clear_referenced for all mappings of a folio,
894  *
895  * Return: The number of mappings which referenced the folio. Return -1 if
896  * the function bailed out due to rmap lock contention.
897  */
898 int folio_referenced(struct folio *folio, int is_locked,
899 		     struct mem_cgroup *memcg, unsigned long *vm_flags)
900 {
901 	int we_locked = 0;
902 	struct folio_referenced_arg pra = {
903 		.mapcount = folio_mapcount(folio),
904 		.memcg = memcg,
905 	};
906 	struct rmap_walk_control rwc = {
907 		.rmap_one = folio_referenced_one,
908 		.arg = (void *)&pra,
909 		.anon_lock = folio_lock_anon_vma_read,
910 		.try_lock = true,
911 	};
912 
913 	*vm_flags = 0;
914 	if (!pra.mapcount)
915 		return 0;
916 
917 	if (!folio_raw_mapping(folio))
918 		return 0;
919 
920 	if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) {
921 		we_locked = folio_trylock(folio);
922 		if (!we_locked)
923 			return 1;
924 	}
925 
926 	/*
927 	 * If we are reclaiming on behalf of a cgroup, skip
928 	 * counting on behalf of references from different
929 	 * cgroups
930 	 */
931 	if (memcg) {
932 		rwc.invalid_vma = invalid_folio_referenced_vma;
933 	}
934 
935 	rmap_walk(folio, &rwc);
936 	*vm_flags = pra.vm_flags;
937 
938 	if (we_locked)
939 		folio_unlock(folio);
940 
941 	return rwc.contended ? -1 : pra.referenced;
942 }
943 
944 static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw)
945 {
946 	int cleaned = 0;
947 	struct vm_area_struct *vma = pvmw->vma;
948 	struct mmu_notifier_range range;
949 	unsigned long address = pvmw->address;
950 
951 	/*
952 	 * We have to assume the worse case ie pmd for invalidation. Note that
953 	 * the folio can not be freed from this function.
954 	 */
955 	mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
956 				0, vma, vma->vm_mm, address,
957 				vma_address_end(pvmw));
958 	mmu_notifier_invalidate_range_start(&range);
959 
960 	while (page_vma_mapped_walk(pvmw)) {
961 		int ret = 0;
962 
963 		address = pvmw->address;
964 		if (pvmw->pte) {
965 			pte_t entry;
966 			pte_t *pte = pvmw->pte;
967 
968 			if (!pte_dirty(*pte) && !pte_write(*pte))
969 				continue;
970 
971 			flush_cache_page(vma, address, pte_pfn(*pte));
972 			entry = ptep_clear_flush(vma, address, pte);
973 			entry = pte_wrprotect(entry);
974 			entry = pte_mkclean(entry);
975 			set_pte_at(vma->vm_mm, address, pte, entry);
976 			ret = 1;
977 		} else {
978 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
979 			pmd_t *pmd = pvmw->pmd;
980 			pmd_t entry;
981 
982 			if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
983 				continue;
984 
985 			flush_cache_range(vma, address,
986 					  address + HPAGE_PMD_SIZE);
987 			entry = pmdp_invalidate(vma, address, pmd);
988 			entry = pmd_wrprotect(entry);
989 			entry = pmd_mkclean(entry);
990 			set_pmd_at(vma->vm_mm, address, pmd, entry);
991 			ret = 1;
992 #else
993 			/* unexpected pmd-mapped folio? */
994 			WARN_ON_ONCE(1);
995 #endif
996 		}
997 
998 		/*
999 		 * No need to call mmu_notifier_invalidate_range() as we are
1000 		 * downgrading page table protection not changing it to point
1001 		 * to a new page.
1002 		 *
1003 		 * See Documentation/mm/mmu_notifier.rst
1004 		 */
1005 		if (ret)
1006 			cleaned++;
1007 	}
1008 
1009 	mmu_notifier_invalidate_range_end(&range);
1010 
1011 	return cleaned;
1012 }
1013 
1014 static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma,
1015 			     unsigned long address, void *arg)
1016 {
1017 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC);
1018 	int *cleaned = arg;
1019 
1020 	*cleaned += page_vma_mkclean_one(&pvmw);
1021 
1022 	return true;
1023 }
1024 
1025 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
1026 {
1027 	if (vma->vm_flags & VM_SHARED)
1028 		return false;
1029 
1030 	return true;
1031 }
1032 
1033 int folio_mkclean(struct folio *folio)
1034 {
1035 	int cleaned = 0;
1036 	struct address_space *mapping;
1037 	struct rmap_walk_control rwc = {
1038 		.arg = (void *)&cleaned,
1039 		.rmap_one = page_mkclean_one,
1040 		.invalid_vma = invalid_mkclean_vma,
1041 	};
1042 
1043 	BUG_ON(!folio_test_locked(folio));
1044 
1045 	if (!folio_mapped(folio))
1046 		return 0;
1047 
1048 	mapping = folio_mapping(folio);
1049 	if (!mapping)
1050 		return 0;
1051 
1052 	rmap_walk(folio, &rwc);
1053 
1054 	return cleaned;
1055 }
1056 EXPORT_SYMBOL_GPL(folio_mkclean);
1057 
1058 /**
1059  * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of
1060  *                     [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff)
1061  *                     within the @vma of shared mappings. And since clean PTEs
1062  *                     should also be readonly, write protects them too.
1063  * @pfn: start pfn.
1064  * @nr_pages: number of physically contiguous pages srarting with @pfn.
1065  * @pgoff: page offset that the @pfn mapped with.
1066  * @vma: vma that @pfn mapped within.
1067  *
1068  * Returns the number of cleaned PTEs (including PMDs).
1069  */
1070 int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
1071 		      struct vm_area_struct *vma)
1072 {
1073 	struct page_vma_mapped_walk pvmw = {
1074 		.pfn		= pfn,
1075 		.nr_pages	= nr_pages,
1076 		.pgoff		= pgoff,
1077 		.vma		= vma,
1078 		.flags		= PVMW_SYNC,
1079 	};
1080 
1081 	if (invalid_mkclean_vma(vma, NULL))
1082 		return 0;
1083 
1084 	pvmw.address = vma_pgoff_address(pgoff, nr_pages, vma);
1085 	VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma);
1086 
1087 	return page_vma_mkclean_one(&pvmw);
1088 }
1089 
1090 /**
1091  * page_move_anon_rmap - move a page to our anon_vma
1092  * @page:	the page to move to our anon_vma
1093  * @vma:	the vma the page belongs to
1094  *
1095  * When a page belongs exclusively to one process after a COW event,
1096  * that page can be moved into the anon_vma that belongs to just that
1097  * process, so the rmap code will not search the parent or sibling
1098  * processes.
1099  */
1100 void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
1101 {
1102 	struct anon_vma *anon_vma = vma->anon_vma;
1103 	struct page *subpage = page;
1104 
1105 	page = compound_head(page);
1106 
1107 	VM_BUG_ON_PAGE(!PageLocked(page), page);
1108 	VM_BUG_ON_VMA(!anon_vma, vma);
1109 
1110 	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1111 	/*
1112 	 * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written
1113 	 * simultaneously, so a concurrent reader (eg folio_referenced()'s
1114 	 * folio_test_anon()) will not see one without the other.
1115 	 */
1116 	WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
1117 	SetPageAnonExclusive(subpage);
1118 }
1119 
1120 /**
1121  * __page_set_anon_rmap - set up new anonymous rmap
1122  * @page:	Page or Hugepage to add to rmap
1123  * @vma:	VM area to add page to.
1124  * @address:	User virtual address of the mapping
1125  * @exclusive:	the page is exclusively owned by the current process
1126  */
1127 static void __page_set_anon_rmap(struct page *page,
1128 	struct vm_area_struct *vma, unsigned long address, int exclusive)
1129 {
1130 	struct anon_vma *anon_vma = vma->anon_vma;
1131 
1132 	BUG_ON(!anon_vma);
1133 
1134 	if (PageAnon(page))
1135 		goto out;
1136 
1137 	/*
1138 	 * If the page isn't exclusively mapped into this vma,
1139 	 * we must use the _oldest_ possible anon_vma for the
1140 	 * page mapping!
1141 	 */
1142 	if (!exclusive)
1143 		anon_vma = anon_vma->root;
1144 
1145 	/*
1146 	 * page_idle does a lockless/optimistic rmap scan on page->mapping.
1147 	 * Make sure the compiler doesn't split the stores of anon_vma and
1148 	 * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code
1149 	 * could mistake the mapping for a struct address_space and crash.
1150 	 */
1151 	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1152 	WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
1153 	page->index = linear_page_index(vma, address);
1154 out:
1155 	if (exclusive)
1156 		SetPageAnonExclusive(page);
1157 }
1158 
1159 /**
1160  * __page_check_anon_rmap - sanity check anonymous rmap addition
1161  * @page:	the page to add the mapping to
1162  * @vma:	the vm area in which the mapping is added
1163  * @address:	the user virtual address mapped
1164  */
1165 static void __page_check_anon_rmap(struct page *page,
1166 	struct vm_area_struct *vma, unsigned long address)
1167 {
1168 	struct folio *folio = page_folio(page);
1169 	/*
1170 	 * The page's anon-rmap details (mapping and index) are guaranteed to
1171 	 * be set up correctly at this point.
1172 	 *
1173 	 * We have exclusion against page_add_anon_rmap because the caller
1174 	 * always holds the page locked.
1175 	 *
1176 	 * We have exclusion against page_add_new_anon_rmap because those pages
1177 	 * are initially only visible via the pagetables, and the pte is locked
1178 	 * over the call to page_add_new_anon_rmap.
1179 	 */
1180 	VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root,
1181 			folio);
1182 	VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address),
1183 		       page);
1184 }
1185 
1186 /**
1187  * page_add_anon_rmap - add pte mapping to an anonymous page
1188  * @page:	the page to add the mapping to
1189  * @vma:	the vm area in which the mapping is added
1190  * @address:	the user virtual address mapped
1191  * @flags:	the rmap flags
1192  *
1193  * The caller needs to hold the pte lock, and the page must be locked in
1194  * the anon_vma case: to serialize mapping,index checking after setting,
1195  * and to ensure that PageAnon is not being upgraded racily to PageKsm
1196  * (but PageKsm is never downgraded to PageAnon).
1197  */
1198 void page_add_anon_rmap(struct page *page,
1199 	struct vm_area_struct *vma, unsigned long address, rmap_t flags)
1200 {
1201 	bool compound = flags & RMAP_COMPOUND;
1202 	bool first;
1203 
1204 	if (unlikely(PageKsm(page)))
1205 		lock_page_memcg(page);
1206 	else
1207 		VM_BUG_ON_PAGE(!PageLocked(page), page);
1208 
1209 	if (compound) {
1210 		atomic_t *mapcount;
1211 		VM_BUG_ON_PAGE(!PageLocked(page), page);
1212 		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1213 		mapcount = compound_mapcount_ptr(page);
1214 		first = atomic_inc_and_test(mapcount);
1215 	} else {
1216 		first = atomic_inc_and_test(&page->_mapcount);
1217 	}
1218 	VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page);
1219 	VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page);
1220 
1221 	if (first) {
1222 		int nr = compound ? thp_nr_pages(page) : 1;
1223 		/*
1224 		 * We use the irq-unsafe __{inc|mod}_zone_page_stat because
1225 		 * these counters are not modified in interrupt context, and
1226 		 * pte lock(a spinlock) is held, which implies preemption
1227 		 * disabled.
1228 		 */
1229 		if (compound)
1230 			__mod_lruvec_page_state(page, NR_ANON_THPS, nr);
1231 		__mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
1232 	}
1233 
1234 	if (unlikely(PageKsm(page)))
1235 		unlock_page_memcg(page);
1236 
1237 	/* address might be in next vma when migration races vma_adjust */
1238 	else if (first)
1239 		__page_set_anon_rmap(page, vma, address,
1240 				     !!(flags & RMAP_EXCLUSIVE));
1241 	else
1242 		__page_check_anon_rmap(page, vma, address);
1243 
1244 	mlock_vma_page(page, vma, compound);
1245 }
1246 
1247 /**
1248  * page_add_new_anon_rmap - add mapping to a new anonymous page
1249  * @page:	the page to add the mapping to
1250  * @vma:	the vm area in which the mapping is added
1251  * @address:	the user virtual address mapped
1252  *
1253  * If it's a compound page, it is accounted as a compound page. As the page
1254  * is new, it's assume to get mapped exclusively by a single process.
1255  *
1256  * Same as page_add_anon_rmap but must only be called on *new* pages.
1257  * This means the inc-and-test can be bypassed.
1258  * Page does not have to be locked.
1259  */
1260 void page_add_new_anon_rmap(struct page *page,
1261 	struct vm_area_struct *vma, unsigned long address)
1262 {
1263 	const bool compound = PageCompound(page);
1264 	int nr = compound ? thp_nr_pages(page) : 1;
1265 
1266 	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
1267 	__SetPageSwapBacked(page);
1268 	if (compound) {
1269 		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1270 		/* increment count (starts at -1) */
1271 		atomic_set(compound_mapcount_ptr(page), 0);
1272 		atomic_set(compound_pincount_ptr(page), 0);
1273 
1274 		__mod_lruvec_page_state(page, NR_ANON_THPS, nr);
1275 	} else {
1276 		/* increment count (starts at -1) */
1277 		atomic_set(&page->_mapcount, 0);
1278 	}
1279 	__mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
1280 	__page_set_anon_rmap(page, vma, address, 1);
1281 }
1282 
1283 /**
1284  * page_add_file_rmap - add pte mapping to a file page
1285  * @page:	the page to add the mapping to
1286  * @vma:	the vm area in which the mapping is added
1287  * @compound:	charge the page as compound or small page
1288  *
1289  * The caller needs to hold the pte lock.
1290  */
1291 void page_add_file_rmap(struct page *page,
1292 	struct vm_area_struct *vma, bool compound)
1293 {
1294 	int i, nr = 0;
1295 
1296 	VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
1297 	lock_page_memcg(page);
1298 	if (compound && PageTransHuge(page)) {
1299 		int nr_pages = thp_nr_pages(page);
1300 
1301 		for (i = 0; i < nr_pages; i++) {
1302 			if (atomic_inc_and_test(&page[i]._mapcount))
1303 				nr++;
1304 		}
1305 		if (!atomic_inc_and_test(compound_mapcount_ptr(page)))
1306 			goto out;
1307 
1308 		/*
1309 		 * It is racy to ClearPageDoubleMap in page_remove_file_rmap();
1310 		 * but page lock is held by all page_add_file_rmap() compound
1311 		 * callers, and SetPageDoubleMap below warns if !PageLocked:
1312 		 * so here is a place that DoubleMap can be safely cleared.
1313 		 */
1314 		VM_WARN_ON_ONCE(!PageLocked(page));
1315 		if (nr == nr_pages && PageDoubleMap(page))
1316 			ClearPageDoubleMap(page);
1317 
1318 		if (PageSwapBacked(page))
1319 			__mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED,
1320 						nr_pages);
1321 		else
1322 			__mod_lruvec_page_state(page, NR_FILE_PMDMAPPED,
1323 						nr_pages);
1324 	} else {
1325 		if (PageTransCompound(page) && page_mapping(page)) {
1326 			VM_WARN_ON_ONCE(!PageLocked(page));
1327 			SetPageDoubleMap(compound_head(page));
1328 		}
1329 		if (atomic_inc_and_test(&page->_mapcount))
1330 			nr++;
1331 	}
1332 out:
1333 	if (nr)
1334 		__mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
1335 	unlock_page_memcg(page);
1336 
1337 	mlock_vma_page(page, vma, compound);
1338 }
1339 
1340 static void page_remove_file_rmap(struct page *page, bool compound)
1341 {
1342 	int i, nr = 0;
1343 
1344 	VM_BUG_ON_PAGE(compound && !PageHead(page), page);
1345 
1346 	/* Hugepages are not counted in NR_FILE_MAPPED for now. */
1347 	if (unlikely(PageHuge(page))) {
1348 		/* hugetlb pages are always mapped with pmds */
1349 		atomic_dec(compound_mapcount_ptr(page));
1350 		return;
1351 	}
1352 
1353 	/* page still mapped by someone else? */
1354 	if (compound && PageTransHuge(page)) {
1355 		int nr_pages = thp_nr_pages(page);
1356 
1357 		for (i = 0; i < nr_pages; i++) {
1358 			if (atomic_add_negative(-1, &page[i]._mapcount))
1359 				nr++;
1360 		}
1361 		if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
1362 			goto out;
1363 		if (PageSwapBacked(page))
1364 			__mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED,
1365 						-nr_pages);
1366 		else
1367 			__mod_lruvec_page_state(page, NR_FILE_PMDMAPPED,
1368 						-nr_pages);
1369 	} else {
1370 		if (atomic_add_negative(-1, &page->_mapcount))
1371 			nr++;
1372 	}
1373 out:
1374 	if (nr)
1375 		__mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr);
1376 }
1377 
1378 static void page_remove_anon_compound_rmap(struct page *page)
1379 {
1380 	int i, nr;
1381 
1382 	if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
1383 		return;
1384 
1385 	/* Hugepages are not counted in NR_ANON_PAGES for now. */
1386 	if (unlikely(PageHuge(page)))
1387 		return;
1388 
1389 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1390 		return;
1391 
1392 	__mod_lruvec_page_state(page, NR_ANON_THPS, -thp_nr_pages(page));
1393 
1394 	if (TestClearPageDoubleMap(page)) {
1395 		/*
1396 		 * Subpages can be mapped with PTEs too. Check how many of
1397 		 * them are still mapped.
1398 		 */
1399 		for (i = 0, nr = 0; i < thp_nr_pages(page); i++) {
1400 			if (atomic_add_negative(-1, &page[i]._mapcount))
1401 				nr++;
1402 		}
1403 
1404 		/*
1405 		 * Queue the page for deferred split if at least one small
1406 		 * page of the compound page is unmapped, but at least one
1407 		 * small page is still mapped.
1408 		 */
1409 		if (nr && nr < thp_nr_pages(page))
1410 			deferred_split_huge_page(page);
1411 	} else {
1412 		nr = thp_nr_pages(page);
1413 	}
1414 
1415 	if (nr)
1416 		__mod_lruvec_page_state(page, NR_ANON_MAPPED, -nr);
1417 }
1418 
1419 /**
1420  * page_remove_rmap - take down pte mapping from a page
1421  * @page:	page to remove mapping from
1422  * @vma:	the vm area from which the mapping is removed
1423  * @compound:	uncharge the page as compound or small page
1424  *
1425  * The caller needs to hold the pte lock.
1426  */
1427 void page_remove_rmap(struct page *page,
1428 	struct vm_area_struct *vma, bool compound)
1429 {
1430 	lock_page_memcg(page);
1431 
1432 	if (!PageAnon(page)) {
1433 		page_remove_file_rmap(page, compound);
1434 		goto out;
1435 	}
1436 
1437 	if (compound) {
1438 		page_remove_anon_compound_rmap(page);
1439 		goto out;
1440 	}
1441 
1442 	/* page still mapped by someone else? */
1443 	if (!atomic_add_negative(-1, &page->_mapcount))
1444 		goto out;
1445 
1446 	/*
1447 	 * We use the irq-unsafe __{inc|mod}_zone_page_stat because
1448 	 * these counters are not modified in interrupt context, and
1449 	 * pte lock(a spinlock) is held, which implies preemption disabled.
1450 	 */
1451 	__dec_lruvec_page_state(page, NR_ANON_MAPPED);
1452 
1453 	if (PageTransCompound(page))
1454 		deferred_split_huge_page(compound_head(page));
1455 
1456 	/*
1457 	 * It would be tidy to reset the PageAnon mapping here,
1458 	 * but that might overwrite a racing page_add_anon_rmap
1459 	 * which increments mapcount after us but sets mapping
1460 	 * before us: so leave the reset to free_unref_page,
1461 	 * and remember that it's only reliable while mapped.
1462 	 * Leaving it set also helps swapoff to reinstate ptes
1463 	 * faster for those pages still in swapcache.
1464 	 */
1465 out:
1466 	unlock_page_memcg(page);
1467 
1468 	munlock_vma_page(page, vma, compound);
1469 }
1470 
1471 /*
1472  * @arg: enum ttu_flags will be passed to this argument
1473  */
1474 static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
1475 		     unsigned long address, void *arg)
1476 {
1477 	struct mm_struct *mm = vma->vm_mm;
1478 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
1479 	pte_t pteval;
1480 	struct page *subpage;
1481 	bool anon_exclusive, ret = true;
1482 	struct mmu_notifier_range range;
1483 	enum ttu_flags flags = (enum ttu_flags)(long)arg;
1484 
1485 	/*
1486 	 * When racing against e.g. zap_pte_range() on another cpu,
1487 	 * in between its ptep_get_and_clear_full() and page_remove_rmap(),
1488 	 * try_to_unmap() may return before page_mapped() has become false,
1489 	 * if page table locking is skipped: use TTU_SYNC to wait for that.
1490 	 */
1491 	if (flags & TTU_SYNC)
1492 		pvmw.flags = PVMW_SYNC;
1493 
1494 	if (flags & TTU_SPLIT_HUGE_PMD)
1495 		split_huge_pmd_address(vma, address, false, folio);
1496 
1497 	/*
1498 	 * For THP, we have to assume the worse case ie pmd for invalidation.
1499 	 * For hugetlb, it could be much worse if we need to do pud
1500 	 * invalidation in the case of pmd sharing.
1501 	 *
1502 	 * Note that the folio can not be freed in this function as call of
1503 	 * try_to_unmap() must hold a reference on the folio.
1504 	 */
1505 	range.end = vma_address_end(&pvmw);
1506 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1507 				address, range.end);
1508 	if (folio_test_hugetlb(folio)) {
1509 		/*
1510 		 * If sharing is possible, start and end will be adjusted
1511 		 * accordingly.
1512 		 */
1513 		adjust_range_if_pmd_sharing_possible(vma, &range.start,
1514 						     &range.end);
1515 	}
1516 	mmu_notifier_invalidate_range_start(&range);
1517 
1518 	while (page_vma_mapped_walk(&pvmw)) {
1519 		/* Unexpected PMD-mapped THP? */
1520 		VM_BUG_ON_FOLIO(!pvmw.pte, folio);
1521 
1522 		/*
1523 		 * If the folio is in an mlock()d vma, we must not swap it out.
1524 		 */
1525 		if (!(flags & TTU_IGNORE_MLOCK) &&
1526 		    (vma->vm_flags & VM_LOCKED)) {
1527 			/* Restore the mlock which got missed */
1528 			mlock_vma_folio(folio, vma, false);
1529 			page_vma_mapped_walk_done(&pvmw);
1530 			ret = false;
1531 			break;
1532 		}
1533 
1534 		subpage = folio_page(folio,
1535 					pte_pfn(*pvmw.pte) - folio_pfn(folio));
1536 		address = pvmw.address;
1537 		anon_exclusive = folio_test_anon(folio) &&
1538 				 PageAnonExclusive(subpage);
1539 
1540 		if (folio_test_hugetlb(folio)) {
1541 			bool anon = folio_test_anon(folio);
1542 
1543 			/*
1544 			 * The try_to_unmap() is only passed a hugetlb page
1545 			 * in the case where the hugetlb page is poisoned.
1546 			 */
1547 			VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage);
1548 			/*
1549 			 * huge_pmd_unshare may unmap an entire PMD page.
1550 			 * There is no way of knowing exactly which PMDs may
1551 			 * be cached for this mm, so we must flush them all.
1552 			 * start/end were already adjusted above to cover this
1553 			 * range.
1554 			 */
1555 			flush_cache_range(vma, range.start, range.end);
1556 
1557 			/*
1558 			 * To call huge_pmd_unshare, i_mmap_rwsem must be
1559 			 * held in write mode.  Caller needs to explicitly
1560 			 * do this outside rmap routines.
1561 			 */
1562 			VM_BUG_ON(!anon && !(flags & TTU_RMAP_LOCKED));
1563 			if (!anon && huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
1564 				flush_tlb_range(vma, range.start, range.end);
1565 				mmu_notifier_invalidate_range(mm, range.start,
1566 							      range.end);
1567 
1568 				/*
1569 				 * The ref count of the PMD page was dropped
1570 				 * which is part of the way map counting
1571 				 * is done for shared PMDs.  Return 'true'
1572 				 * here.  When there is no other sharing,
1573 				 * huge_pmd_unshare returns false and we will
1574 				 * unmap the actual page and drop map count
1575 				 * to zero.
1576 				 */
1577 				page_vma_mapped_walk_done(&pvmw);
1578 				break;
1579 			}
1580 			pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
1581 		} else {
1582 			flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
1583 			/* Nuke the page table entry. */
1584 			if (should_defer_flush(mm, flags)) {
1585 				/*
1586 				 * We clear the PTE but do not flush so potentially
1587 				 * a remote CPU could still be writing to the folio.
1588 				 * If the entry was previously clean then the
1589 				 * architecture must guarantee that a clear->dirty
1590 				 * transition on a cached TLB entry is written through
1591 				 * and traps if the PTE is unmapped.
1592 				 */
1593 				pteval = ptep_get_and_clear(mm, address, pvmw.pte);
1594 
1595 				set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
1596 			} else {
1597 				pteval = ptep_clear_flush(vma, address, pvmw.pte);
1598 			}
1599 		}
1600 
1601 		/*
1602 		 * Now the pte is cleared. If this pte was uffd-wp armed,
1603 		 * we may want to replace a none pte with a marker pte if
1604 		 * it's file-backed, so we don't lose the tracking info.
1605 		 */
1606 		pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval);
1607 
1608 		/* Set the dirty flag on the folio now the pte is gone. */
1609 		if (pte_dirty(pteval))
1610 			folio_mark_dirty(folio);
1611 
1612 		/* Update high watermark before we lower rss */
1613 		update_hiwater_rss(mm);
1614 
1615 		if (PageHWPoison(subpage) && !(flags & TTU_IGNORE_HWPOISON)) {
1616 			pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
1617 			if (folio_test_hugetlb(folio)) {
1618 				hugetlb_count_sub(folio_nr_pages(folio), mm);
1619 				set_huge_pte_at(mm, address, pvmw.pte, pteval);
1620 			} else {
1621 				dec_mm_counter(mm, mm_counter(&folio->page));
1622 				set_pte_at(mm, address, pvmw.pte, pteval);
1623 			}
1624 
1625 		} else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
1626 			/*
1627 			 * The guest indicated that the page content is of no
1628 			 * interest anymore. Simply discard the pte, vmscan
1629 			 * will take care of the rest.
1630 			 * A future reference will then fault in a new zero
1631 			 * page. When userfaultfd is active, we must not drop
1632 			 * this page though, as its main user (postcopy
1633 			 * migration) will not expect userfaults on already
1634 			 * copied pages.
1635 			 */
1636 			dec_mm_counter(mm, mm_counter(&folio->page));
1637 			/* We have to invalidate as we cleared the pte */
1638 			mmu_notifier_invalidate_range(mm, address,
1639 						      address + PAGE_SIZE);
1640 		} else if (folio_test_anon(folio)) {
1641 			swp_entry_t entry = { .val = page_private(subpage) };
1642 			pte_t swp_pte;
1643 			/*
1644 			 * Store the swap location in the pte.
1645 			 * See handle_pte_fault() ...
1646 			 */
1647 			if (unlikely(folio_test_swapbacked(folio) !=
1648 					folio_test_swapcache(folio))) {
1649 				WARN_ON_ONCE(1);
1650 				ret = false;
1651 				/* We have to invalidate as we cleared the pte */
1652 				mmu_notifier_invalidate_range(mm, address,
1653 							address + PAGE_SIZE);
1654 				page_vma_mapped_walk_done(&pvmw);
1655 				break;
1656 			}
1657 
1658 			/* MADV_FREE page check */
1659 			if (!folio_test_swapbacked(folio)) {
1660 				int ref_count, map_count;
1661 
1662 				/*
1663 				 * Synchronize with gup_pte_range():
1664 				 * - clear PTE; barrier; read refcount
1665 				 * - inc refcount; barrier; read PTE
1666 				 */
1667 				smp_mb();
1668 
1669 				ref_count = folio_ref_count(folio);
1670 				map_count = folio_mapcount(folio);
1671 
1672 				/*
1673 				 * Order reads for page refcount and dirty flag
1674 				 * (see comments in __remove_mapping()).
1675 				 */
1676 				smp_rmb();
1677 
1678 				/*
1679 				 * The only page refs must be one from isolation
1680 				 * plus the rmap(s) (dropped by discard:).
1681 				 */
1682 				if (ref_count == 1 + map_count &&
1683 				    !folio_test_dirty(folio)) {
1684 					/* Invalidate as we cleared the pte */
1685 					mmu_notifier_invalidate_range(mm,
1686 						address, address + PAGE_SIZE);
1687 					dec_mm_counter(mm, MM_ANONPAGES);
1688 					goto discard;
1689 				}
1690 
1691 				/*
1692 				 * If the folio was redirtied, it cannot be
1693 				 * discarded. Remap the page to page table.
1694 				 */
1695 				set_pte_at(mm, address, pvmw.pte, pteval);
1696 				folio_set_swapbacked(folio);
1697 				ret = false;
1698 				page_vma_mapped_walk_done(&pvmw);
1699 				break;
1700 			}
1701 
1702 			if (swap_duplicate(entry) < 0) {
1703 				set_pte_at(mm, address, pvmw.pte, pteval);
1704 				ret = false;
1705 				page_vma_mapped_walk_done(&pvmw);
1706 				break;
1707 			}
1708 			if (arch_unmap_one(mm, vma, address, pteval) < 0) {
1709 				swap_free(entry);
1710 				set_pte_at(mm, address, pvmw.pte, pteval);
1711 				ret = false;
1712 				page_vma_mapped_walk_done(&pvmw);
1713 				break;
1714 			}
1715 
1716 			/* See page_try_share_anon_rmap(): clear PTE first. */
1717 			if (anon_exclusive &&
1718 			    page_try_share_anon_rmap(subpage)) {
1719 				swap_free(entry);
1720 				set_pte_at(mm, address, pvmw.pte, pteval);
1721 				ret = false;
1722 				page_vma_mapped_walk_done(&pvmw);
1723 				break;
1724 			}
1725 			/*
1726 			 * Note: We *don't* remember if the page was mapped
1727 			 * exclusively in the swap pte if the architecture
1728 			 * doesn't support __HAVE_ARCH_PTE_SWP_EXCLUSIVE. In
1729 			 * that case, swapin code has to re-determine that
1730 			 * manually and might detect the page as possibly
1731 			 * shared, for example, if there are other references on
1732 			 * the page or if the page is under writeback. We made
1733 			 * sure that there are no GUP pins on the page that
1734 			 * would rely on it, so for GUP pins this is fine.
1735 			 */
1736 			if (list_empty(&mm->mmlist)) {
1737 				spin_lock(&mmlist_lock);
1738 				if (list_empty(&mm->mmlist))
1739 					list_add(&mm->mmlist, &init_mm.mmlist);
1740 				spin_unlock(&mmlist_lock);
1741 			}
1742 			dec_mm_counter(mm, MM_ANONPAGES);
1743 			inc_mm_counter(mm, MM_SWAPENTS);
1744 			swp_pte = swp_entry_to_pte(entry);
1745 			if (anon_exclusive)
1746 				swp_pte = pte_swp_mkexclusive(swp_pte);
1747 			if (pte_soft_dirty(pteval))
1748 				swp_pte = pte_swp_mksoft_dirty(swp_pte);
1749 			if (pte_uffd_wp(pteval))
1750 				swp_pte = pte_swp_mkuffd_wp(swp_pte);
1751 			set_pte_at(mm, address, pvmw.pte, swp_pte);
1752 			/* Invalidate as we cleared the pte */
1753 			mmu_notifier_invalidate_range(mm, address,
1754 						      address + PAGE_SIZE);
1755 		} else {
1756 			/*
1757 			 * This is a locked file-backed folio,
1758 			 * so it cannot be removed from the page
1759 			 * cache and replaced by a new folio before
1760 			 * mmu_notifier_invalidate_range_end, so no
1761 			 * concurrent thread might update its page table
1762 			 * to point at a new folio while a device is
1763 			 * still using this folio.
1764 			 *
1765 			 * See Documentation/mm/mmu_notifier.rst
1766 			 */
1767 			dec_mm_counter(mm, mm_counter_file(&folio->page));
1768 		}
1769 discard:
1770 		/*
1771 		 * No need to call mmu_notifier_invalidate_range() it has be
1772 		 * done above for all cases requiring it to happen under page
1773 		 * table lock before mmu_notifier_invalidate_range_end()
1774 		 *
1775 		 * See Documentation/mm/mmu_notifier.rst
1776 		 */
1777 		page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
1778 		if (vma->vm_flags & VM_LOCKED)
1779 			mlock_page_drain_local();
1780 		folio_put(folio);
1781 	}
1782 
1783 	mmu_notifier_invalidate_range_end(&range);
1784 
1785 	return ret;
1786 }
1787 
1788 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
1789 {
1790 	return vma_is_temporary_stack(vma);
1791 }
1792 
1793 static int page_not_mapped(struct folio *folio)
1794 {
1795 	return !folio_mapped(folio);
1796 }
1797 
1798 /**
1799  * try_to_unmap - Try to remove all page table mappings to a folio.
1800  * @folio: The folio to unmap.
1801  * @flags: action and flags
1802  *
1803  * Tries to remove all the page table entries which are mapping this
1804  * folio.  It is the caller's responsibility to check if the folio is
1805  * still mapped if needed (use TTU_SYNC to prevent accounting races).
1806  *
1807  * Context: Caller must hold the folio lock.
1808  */
1809 void try_to_unmap(struct folio *folio, enum ttu_flags flags)
1810 {
1811 	struct rmap_walk_control rwc = {
1812 		.rmap_one = try_to_unmap_one,
1813 		.arg = (void *)flags,
1814 		.done = page_not_mapped,
1815 		.anon_lock = folio_lock_anon_vma_read,
1816 	};
1817 
1818 	if (flags & TTU_RMAP_LOCKED)
1819 		rmap_walk_locked(folio, &rwc);
1820 	else
1821 		rmap_walk(folio, &rwc);
1822 }
1823 
1824 /*
1825  * @arg: enum ttu_flags will be passed to this argument.
1826  *
1827  * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs
1828  * containing migration entries.
1829  */
1830 static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
1831 		     unsigned long address, void *arg)
1832 {
1833 	struct mm_struct *mm = vma->vm_mm;
1834 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
1835 	pte_t pteval;
1836 	struct page *subpage;
1837 	bool anon_exclusive, ret = true;
1838 	struct mmu_notifier_range range;
1839 	enum ttu_flags flags = (enum ttu_flags)(long)arg;
1840 
1841 	/*
1842 	 * When racing against e.g. zap_pte_range() on another cpu,
1843 	 * in between its ptep_get_and_clear_full() and page_remove_rmap(),
1844 	 * try_to_migrate() may return before page_mapped() has become false,
1845 	 * if page table locking is skipped: use TTU_SYNC to wait for that.
1846 	 */
1847 	if (flags & TTU_SYNC)
1848 		pvmw.flags = PVMW_SYNC;
1849 
1850 	/*
1851 	 * unmap_page() in mm/huge_memory.c is the only user of migration with
1852 	 * TTU_SPLIT_HUGE_PMD and it wants to freeze.
1853 	 */
1854 	if (flags & TTU_SPLIT_HUGE_PMD)
1855 		split_huge_pmd_address(vma, address, true, folio);
1856 
1857 	/*
1858 	 * For THP, we have to assume the worse case ie pmd for invalidation.
1859 	 * For hugetlb, it could be much worse if we need to do pud
1860 	 * invalidation in the case of pmd sharing.
1861 	 *
1862 	 * Note that the page can not be free in this function as call of
1863 	 * try_to_unmap() must hold a reference on the page.
1864 	 */
1865 	range.end = vma_address_end(&pvmw);
1866 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1867 				address, range.end);
1868 	if (folio_test_hugetlb(folio)) {
1869 		/*
1870 		 * If sharing is possible, start and end will be adjusted
1871 		 * accordingly.
1872 		 */
1873 		adjust_range_if_pmd_sharing_possible(vma, &range.start,
1874 						     &range.end);
1875 	}
1876 	mmu_notifier_invalidate_range_start(&range);
1877 
1878 	while (page_vma_mapped_walk(&pvmw)) {
1879 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1880 		/* PMD-mapped THP migration entry */
1881 		if (!pvmw.pte) {
1882 			subpage = folio_page(folio,
1883 				pmd_pfn(*pvmw.pmd) - folio_pfn(folio));
1884 			VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
1885 					!folio_test_pmd_mappable(folio), folio);
1886 
1887 			if (set_pmd_migration_entry(&pvmw, subpage)) {
1888 				ret = false;
1889 				page_vma_mapped_walk_done(&pvmw);
1890 				break;
1891 			}
1892 			continue;
1893 		}
1894 #endif
1895 
1896 		/* Unexpected PMD-mapped THP? */
1897 		VM_BUG_ON_FOLIO(!pvmw.pte, folio);
1898 
1899 		if (folio_is_zone_device(folio)) {
1900 			/*
1901 			 * Our PTE is a non-present device exclusive entry and
1902 			 * calculating the subpage as for the common case would
1903 			 * result in an invalid pointer.
1904 			 *
1905 			 * Since only PAGE_SIZE pages can currently be
1906 			 * migrated, just set it to page. This will need to be
1907 			 * changed when hugepage migrations to device private
1908 			 * memory are supported.
1909 			 */
1910 			VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio);
1911 			subpage = &folio->page;
1912 		} else {
1913 			subpage = folio_page(folio,
1914 					pte_pfn(*pvmw.pte) - folio_pfn(folio));
1915 		}
1916 		address = pvmw.address;
1917 		anon_exclusive = folio_test_anon(folio) &&
1918 				 PageAnonExclusive(subpage);
1919 
1920 		if (folio_test_hugetlb(folio)) {
1921 			bool anon = folio_test_anon(folio);
1922 
1923 			/*
1924 			 * huge_pmd_unshare may unmap an entire PMD page.
1925 			 * There is no way of knowing exactly which PMDs may
1926 			 * be cached for this mm, so we must flush them all.
1927 			 * start/end were already adjusted above to cover this
1928 			 * range.
1929 			 */
1930 			flush_cache_range(vma, range.start, range.end);
1931 
1932 			/*
1933 			 * To call huge_pmd_unshare, i_mmap_rwsem must be
1934 			 * held in write mode.  Caller needs to explicitly
1935 			 * do this outside rmap routines.
1936 			 */
1937 			VM_BUG_ON(!anon && !(flags & TTU_RMAP_LOCKED));
1938 			if (!anon && huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
1939 				flush_tlb_range(vma, range.start, range.end);
1940 				mmu_notifier_invalidate_range(mm, range.start,
1941 							      range.end);
1942 
1943 				/*
1944 				 * The ref count of the PMD page was dropped
1945 				 * which is part of the way map counting
1946 				 * is done for shared PMDs.  Return 'true'
1947 				 * here.  When there is no other sharing,
1948 				 * huge_pmd_unshare returns false and we will
1949 				 * unmap the actual page and drop map count
1950 				 * to zero.
1951 				 */
1952 				page_vma_mapped_walk_done(&pvmw);
1953 				break;
1954 			}
1955 
1956 			/* Nuke the hugetlb page table entry */
1957 			pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
1958 		} else {
1959 			flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
1960 			/* Nuke the page table entry. */
1961 			pteval = ptep_clear_flush(vma, address, pvmw.pte);
1962 		}
1963 
1964 		/* Set the dirty flag on the folio now the pte is gone. */
1965 		if (pte_dirty(pteval))
1966 			folio_mark_dirty(folio);
1967 
1968 		/* Update high watermark before we lower rss */
1969 		update_hiwater_rss(mm);
1970 
1971 		if (folio_is_device_private(folio)) {
1972 			unsigned long pfn = folio_pfn(folio);
1973 			swp_entry_t entry;
1974 			pte_t swp_pte;
1975 
1976 			if (anon_exclusive)
1977 				BUG_ON(page_try_share_anon_rmap(subpage));
1978 
1979 			/*
1980 			 * Store the pfn of the page in a special migration
1981 			 * pte. do_swap_page() will wait until the migration
1982 			 * pte is removed and then restart fault handling.
1983 			 */
1984 			entry = pte_to_swp_entry(pteval);
1985 			if (is_writable_device_private_entry(entry))
1986 				entry = make_writable_migration_entry(pfn);
1987 			else if (anon_exclusive)
1988 				entry = make_readable_exclusive_migration_entry(pfn);
1989 			else
1990 				entry = make_readable_migration_entry(pfn);
1991 			swp_pte = swp_entry_to_pte(entry);
1992 
1993 			/*
1994 			 * pteval maps a zone device page and is therefore
1995 			 * a swap pte.
1996 			 */
1997 			if (pte_swp_soft_dirty(pteval))
1998 				swp_pte = pte_swp_mksoft_dirty(swp_pte);
1999 			if (pte_swp_uffd_wp(pteval))
2000 				swp_pte = pte_swp_mkuffd_wp(swp_pte);
2001 			set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
2002 			trace_set_migration_pte(pvmw.address, pte_val(swp_pte),
2003 						compound_order(&folio->page));
2004 			/*
2005 			 * No need to invalidate here it will synchronize on
2006 			 * against the special swap migration pte.
2007 			 */
2008 		} else if (PageHWPoison(subpage)) {
2009 			pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
2010 			if (folio_test_hugetlb(folio)) {
2011 				hugetlb_count_sub(folio_nr_pages(folio), mm);
2012 				set_huge_pte_at(mm, address, pvmw.pte, pteval);
2013 			} else {
2014 				dec_mm_counter(mm, mm_counter(&folio->page));
2015 				set_pte_at(mm, address, pvmw.pte, pteval);
2016 			}
2017 
2018 		} else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
2019 			/*
2020 			 * The guest indicated that the page content is of no
2021 			 * interest anymore. Simply discard the pte, vmscan
2022 			 * will take care of the rest.
2023 			 * A future reference will then fault in a new zero
2024 			 * page. When userfaultfd is active, we must not drop
2025 			 * this page though, as its main user (postcopy
2026 			 * migration) will not expect userfaults on already
2027 			 * copied pages.
2028 			 */
2029 			dec_mm_counter(mm, mm_counter(&folio->page));
2030 			/* We have to invalidate as we cleared the pte */
2031 			mmu_notifier_invalidate_range(mm, address,
2032 						      address + PAGE_SIZE);
2033 		} else {
2034 			swp_entry_t entry;
2035 			pte_t swp_pte;
2036 
2037 			if (arch_unmap_one(mm, vma, address, pteval) < 0) {
2038 				if (folio_test_hugetlb(folio))
2039 					set_huge_pte_at(mm, address, pvmw.pte, pteval);
2040 				else
2041 					set_pte_at(mm, address, pvmw.pte, pteval);
2042 				ret = false;
2043 				page_vma_mapped_walk_done(&pvmw);
2044 				break;
2045 			}
2046 			VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) &&
2047 				       !anon_exclusive, subpage);
2048 
2049 			/* See page_try_share_anon_rmap(): clear PTE first. */
2050 			if (anon_exclusive &&
2051 			    page_try_share_anon_rmap(subpage)) {
2052 				if (folio_test_hugetlb(folio))
2053 					set_huge_pte_at(mm, address, pvmw.pte, pteval);
2054 				else
2055 					set_pte_at(mm, address, pvmw.pte, pteval);
2056 				ret = false;
2057 				page_vma_mapped_walk_done(&pvmw);
2058 				break;
2059 			}
2060 
2061 			/*
2062 			 * Store the pfn of the page in a special migration
2063 			 * pte. do_swap_page() will wait until the migration
2064 			 * pte is removed and then restart fault handling.
2065 			 */
2066 			if (pte_write(pteval))
2067 				entry = make_writable_migration_entry(
2068 							page_to_pfn(subpage));
2069 			else if (anon_exclusive)
2070 				entry = make_readable_exclusive_migration_entry(
2071 							page_to_pfn(subpage));
2072 			else
2073 				entry = make_readable_migration_entry(
2074 							page_to_pfn(subpage));
2075 			if (pte_young(pteval))
2076 				entry = make_migration_entry_young(entry);
2077 			if (pte_dirty(pteval))
2078 				entry = make_migration_entry_dirty(entry);
2079 			swp_pte = swp_entry_to_pte(entry);
2080 			if (pte_soft_dirty(pteval))
2081 				swp_pte = pte_swp_mksoft_dirty(swp_pte);
2082 			if (pte_uffd_wp(pteval))
2083 				swp_pte = pte_swp_mkuffd_wp(swp_pte);
2084 			if (folio_test_hugetlb(folio))
2085 				set_huge_pte_at(mm, address, pvmw.pte, swp_pte);
2086 			else
2087 				set_pte_at(mm, address, pvmw.pte, swp_pte);
2088 			trace_set_migration_pte(address, pte_val(swp_pte),
2089 						compound_order(&folio->page));
2090 			/*
2091 			 * No need to invalidate here it will synchronize on
2092 			 * against the special swap migration pte.
2093 			 */
2094 		}
2095 
2096 		/*
2097 		 * No need to call mmu_notifier_invalidate_range() it has be
2098 		 * done above for all cases requiring it to happen under page
2099 		 * table lock before mmu_notifier_invalidate_range_end()
2100 		 *
2101 		 * See Documentation/mm/mmu_notifier.rst
2102 		 */
2103 		page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
2104 		if (vma->vm_flags & VM_LOCKED)
2105 			mlock_page_drain_local();
2106 		folio_put(folio);
2107 	}
2108 
2109 	mmu_notifier_invalidate_range_end(&range);
2110 
2111 	return ret;
2112 }
2113 
2114 /**
2115  * try_to_migrate - try to replace all page table mappings with swap entries
2116  * @folio: the folio to replace page table entries for
2117  * @flags: action and flags
2118  *
2119  * Tries to remove all the page table entries which are mapping this folio and
2120  * replace them with special swap entries. Caller must hold the folio lock.
2121  */
2122 void try_to_migrate(struct folio *folio, enum ttu_flags flags)
2123 {
2124 	struct rmap_walk_control rwc = {
2125 		.rmap_one = try_to_migrate_one,
2126 		.arg = (void *)flags,
2127 		.done = page_not_mapped,
2128 		.anon_lock = folio_lock_anon_vma_read,
2129 	};
2130 
2131 	/*
2132 	 * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and
2133 	 * TTU_SPLIT_HUGE_PMD and TTU_SYNC flags.
2134 	 */
2135 	if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
2136 					TTU_SYNC)))
2137 		return;
2138 
2139 	if (folio_is_zone_device(folio) &&
2140 	    (!folio_is_device_private(folio) && !folio_is_device_coherent(folio)))
2141 		return;
2142 
2143 	/*
2144 	 * During exec, a temporary VMA is setup and later moved.
2145 	 * The VMA is moved under the anon_vma lock but not the
2146 	 * page tables leading to a race where migration cannot
2147 	 * find the migration ptes. Rather than increasing the
2148 	 * locking requirements of exec(), migration skips
2149 	 * temporary VMAs until after exec() completes.
2150 	 */
2151 	if (!folio_test_ksm(folio) && folio_test_anon(folio))
2152 		rwc.invalid_vma = invalid_migration_vma;
2153 
2154 	if (flags & TTU_RMAP_LOCKED)
2155 		rmap_walk_locked(folio, &rwc);
2156 	else
2157 		rmap_walk(folio, &rwc);
2158 }
2159 
2160 #ifdef CONFIG_DEVICE_PRIVATE
2161 struct make_exclusive_args {
2162 	struct mm_struct *mm;
2163 	unsigned long address;
2164 	void *owner;
2165 	bool valid;
2166 };
2167 
2168 static bool page_make_device_exclusive_one(struct folio *folio,
2169 		struct vm_area_struct *vma, unsigned long address, void *priv)
2170 {
2171 	struct mm_struct *mm = vma->vm_mm;
2172 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
2173 	struct make_exclusive_args *args = priv;
2174 	pte_t pteval;
2175 	struct page *subpage;
2176 	bool ret = true;
2177 	struct mmu_notifier_range range;
2178 	swp_entry_t entry;
2179 	pte_t swp_pte;
2180 
2181 	mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma,
2182 				      vma->vm_mm, address, min(vma->vm_end,
2183 				      address + folio_size(folio)),
2184 				      args->owner);
2185 	mmu_notifier_invalidate_range_start(&range);
2186 
2187 	while (page_vma_mapped_walk(&pvmw)) {
2188 		/* Unexpected PMD-mapped THP? */
2189 		VM_BUG_ON_FOLIO(!pvmw.pte, folio);
2190 
2191 		if (!pte_present(*pvmw.pte)) {
2192 			ret = false;
2193 			page_vma_mapped_walk_done(&pvmw);
2194 			break;
2195 		}
2196 
2197 		subpage = folio_page(folio,
2198 				pte_pfn(*pvmw.pte) - folio_pfn(folio));
2199 		address = pvmw.address;
2200 
2201 		/* Nuke the page table entry. */
2202 		flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
2203 		pteval = ptep_clear_flush(vma, address, pvmw.pte);
2204 
2205 		/* Set the dirty flag on the folio now the pte is gone. */
2206 		if (pte_dirty(pteval))
2207 			folio_mark_dirty(folio);
2208 
2209 		/*
2210 		 * Check that our target page is still mapped at the expected
2211 		 * address.
2212 		 */
2213 		if (args->mm == mm && args->address == address &&
2214 		    pte_write(pteval))
2215 			args->valid = true;
2216 
2217 		/*
2218 		 * Store the pfn of the page in a special migration
2219 		 * pte. do_swap_page() will wait until the migration
2220 		 * pte is removed and then restart fault handling.
2221 		 */
2222 		if (pte_write(pteval))
2223 			entry = make_writable_device_exclusive_entry(
2224 							page_to_pfn(subpage));
2225 		else
2226 			entry = make_readable_device_exclusive_entry(
2227 							page_to_pfn(subpage));
2228 		swp_pte = swp_entry_to_pte(entry);
2229 		if (pte_soft_dirty(pteval))
2230 			swp_pte = pte_swp_mksoft_dirty(swp_pte);
2231 		if (pte_uffd_wp(pteval))
2232 			swp_pte = pte_swp_mkuffd_wp(swp_pte);
2233 
2234 		set_pte_at(mm, address, pvmw.pte, swp_pte);
2235 
2236 		/*
2237 		 * There is a reference on the page for the swap entry which has
2238 		 * been removed, so shouldn't take another.
2239 		 */
2240 		page_remove_rmap(subpage, vma, false);
2241 	}
2242 
2243 	mmu_notifier_invalidate_range_end(&range);
2244 
2245 	return ret;
2246 }
2247 
2248 /**
2249  * folio_make_device_exclusive - Mark the folio exclusively owned by a device.
2250  * @folio: The folio to replace page table entries for.
2251  * @mm: The mm_struct where the folio is expected to be mapped.
2252  * @address: Address where the folio is expected to be mapped.
2253  * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks
2254  *
2255  * Tries to remove all the page table entries which are mapping this
2256  * folio and replace them with special device exclusive swap entries to
2257  * grant a device exclusive access to the folio.
2258  *
2259  * Context: Caller must hold the folio lock.
2260  * Return: false if the page is still mapped, or if it could not be unmapped
2261  * from the expected address. Otherwise returns true (success).
2262  */
2263 static bool folio_make_device_exclusive(struct folio *folio,
2264 		struct mm_struct *mm, unsigned long address, void *owner)
2265 {
2266 	struct make_exclusive_args args = {
2267 		.mm = mm,
2268 		.address = address,
2269 		.owner = owner,
2270 		.valid = false,
2271 	};
2272 	struct rmap_walk_control rwc = {
2273 		.rmap_one = page_make_device_exclusive_one,
2274 		.done = page_not_mapped,
2275 		.anon_lock = folio_lock_anon_vma_read,
2276 		.arg = &args,
2277 	};
2278 
2279 	/*
2280 	 * Restrict to anonymous folios for now to avoid potential writeback
2281 	 * issues.
2282 	 */
2283 	if (!folio_test_anon(folio))
2284 		return false;
2285 
2286 	rmap_walk(folio, &rwc);
2287 
2288 	return args.valid && !folio_mapcount(folio);
2289 }
2290 
2291 /**
2292  * make_device_exclusive_range() - Mark a range for exclusive use by a device
2293  * @mm: mm_struct of associated target process
2294  * @start: start of the region to mark for exclusive device access
2295  * @end: end address of region
2296  * @pages: returns the pages which were successfully marked for exclusive access
2297  * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering
2298  *
2299  * Returns: number of pages found in the range by GUP. A page is marked for
2300  * exclusive access only if the page pointer is non-NULL.
2301  *
2302  * This function finds ptes mapping page(s) to the given address range, locks
2303  * them and replaces mappings with special swap entries preventing userspace CPU
2304  * access. On fault these entries are replaced with the original mapping after
2305  * calling MMU notifiers.
2306  *
2307  * A driver using this to program access from a device must use a mmu notifier
2308  * critical section to hold a device specific lock during programming. Once
2309  * programming is complete it should drop the page lock and reference after
2310  * which point CPU access to the page will revoke the exclusive access.
2311  */
2312 int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
2313 				unsigned long end, struct page **pages,
2314 				void *owner)
2315 {
2316 	long npages = (end - start) >> PAGE_SHIFT;
2317 	long i;
2318 
2319 	npages = get_user_pages_remote(mm, start, npages,
2320 				       FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD,
2321 				       pages, NULL, NULL);
2322 	if (npages < 0)
2323 		return npages;
2324 
2325 	for (i = 0; i < npages; i++, start += PAGE_SIZE) {
2326 		struct folio *folio = page_folio(pages[i]);
2327 		if (PageTail(pages[i]) || !folio_trylock(folio)) {
2328 			folio_put(folio);
2329 			pages[i] = NULL;
2330 			continue;
2331 		}
2332 
2333 		if (!folio_make_device_exclusive(folio, mm, start, owner)) {
2334 			folio_unlock(folio);
2335 			folio_put(folio);
2336 			pages[i] = NULL;
2337 		}
2338 	}
2339 
2340 	return npages;
2341 }
2342 EXPORT_SYMBOL_GPL(make_device_exclusive_range);
2343 #endif
2344 
2345 void __put_anon_vma(struct anon_vma *anon_vma)
2346 {
2347 	struct anon_vma *root = anon_vma->root;
2348 
2349 	anon_vma_free(anon_vma);
2350 	if (root != anon_vma && atomic_dec_and_test(&root->refcount))
2351 		anon_vma_free(root);
2352 }
2353 
2354 static struct anon_vma *rmap_walk_anon_lock(struct folio *folio,
2355 					    struct rmap_walk_control *rwc)
2356 {
2357 	struct anon_vma *anon_vma;
2358 
2359 	if (rwc->anon_lock)
2360 		return rwc->anon_lock(folio, rwc);
2361 
2362 	/*
2363 	 * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read()
2364 	 * because that depends on page_mapped(); but not all its usages
2365 	 * are holding mmap_lock. Users without mmap_lock are required to
2366 	 * take a reference count to prevent the anon_vma disappearing
2367 	 */
2368 	anon_vma = folio_anon_vma(folio);
2369 	if (!anon_vma)
2370 		return NULL;
2371 
2372 	if (anon_vma_trylock_read(anon_vma))
2373 		goto out;
2374 
2375 	if (rwc->try_lock) {
2376 		anon_vma = NULL;
2377 		rwc->contended = true;
2378 		goto out;
2379 	}
2380 
2381 	anon_vma_lock_read(anon_vma);
2382 out:
2383 	return anon_vma;
2384 }
2385 
2386 /*
2387  * rmap_walk_anon - do something to anonymous page using the object-based
2388  * rmap method
2389  * @page: the page to be handled
2390  * @rwc: control variable according to each walk type
2391  *
2392  * Find all the mappings of a page using the mapping pointer and the vma chains
2393  * contained in the anon_vma struct it points to.
2394  */
2395 static void rmap_walk_anon(struct folio *folio,
2396 		struct rmap_walk_control *rwc, bool locked)
2397 {
2398 	struct anon_vma *anon_vma;
2399 	pgoff_t pgoff_start, pgoff_end;
2400 	struct anon_vma_chain *avc;
2401 
2402 	if (locked) {
2403 		anon_vma = folio_anon_vma(folio);
2404 		/* anon_vma disappear under us? */
2405 		VM_BUG_ON_FOLIO(!anon_vma, folio);
2406 	} else {
2407 		anon_vma = rmap_walk_anon_lock(folio, rwc);
2408 	}
2409 	if (!anon_vma)
2410 		return;
2411 
2412 	pgoff_start = folio_pgoff(folio);
2413 	pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
2414 	anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
2415 			pgoff_start, pgoff_end) {
2416 		struct vm_area_struct *vma = avc->vma;
2417 		unsigned long address = vma_address(&folio->page, vma);
2418 
2419 		VM_BUG_ON_VMA(address == -EFAULT, vma);
2420 		cond_resched();
2421 
2422 		if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
2423 			continue;
2424 
2425 		if (!rwc->rmap_one(folio, vma, address, rwc->arg))
2426 			break;
2427 		if (rwc->done && rwc->done(folio))
2428 			break;
2429 	}
2430 
2431 	if (!locked)
2432 		anon_vma_unlock_read(anon_vma);
2433 }
2434 
2435 /*
2436  * rmap_walk_file - do something to file page using the object-based rmap method
2437  * @page: the page to be handled
2438  * @rwc: control variable according to each walk type
2439  *
2440  * Find all the mappings of a page using the mapping pointer and the vma chains
2441  * contained in the address_space struct it points to.
2442  */
2443 static void rmap_walk_file(struct folio *folio,
2444 		struct rmap_walk_control *rwc, bool locked)
2445 {
2446 	struct address_space *mapping = folio_mapping(folio);
2447 	pgoff_t pgoff_start, pgoff_end;
2448 	struct vm_area_struct *vma;
2449 
2450 	/*
2451 	 * The page lock not only makes sure that page->mapping cannot
2452 	 * suddenly be NULLified by truncation, it makes sure that the
2453 	 * structure at mapping cannot be freed and reused yet,
2454 	 * so we can safely take mapping->i_mmap_rwsem.
2455 	 */
2456 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
2457 
2458 	if (!mapping)
2459 		return;
2460 
2461 	pgoff_start = folio_pgoff(folio);
2462 	pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
2463 	if (!locked) {
2464 		if (i_mmap_trylock_read(mapping))
2465 			goto lookup;
2466 
2467 		if (rwc->try_lock) {
2468 			rwc->contended = true;
2469 			return;
2470 		}
2471 
2472 		i_mmap_lock_read(mapping);
2473 	}
2474 lookup:
2475 	vma_interval_tree_foreach(vma, &mapping->i_mmap,
2476 			pgoff_start, pgoff_end) {
2477 		unsigned long address = vma_address(&folio->page, vma);
2478 
2479 		VM_BUG_ON_VMA(address == -EFAULT, vma);
2480 		cond_resched();
2481 
2482 		if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
2483 			continue;
2484 
2485 		if (!rwc->rmap_one(folio, vma, address, rwc->arg))
2486 			goto done;
2487 		if (rwc->done && rwc->done(folio))
2488 			goto done;
2489 	}
2490 
2491 done:
2492 	if (!locked)
2493 		i_mmap_unlock_read(mapping);
2494 }
2495 
2496 void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc)
2497 {
2498 	if (unlikely(folio_test_ksm(folio)))
2499 		rmap_walk_ksm(folio, rwc);
2500 	else if (folio_test_anon(folio))
2501 		rmap_walk_anon(folio, rwc, false);
2502 	else
2503 		rmap_walk_file(folio, rwc, false);
2504 }
2505 
2506 /* Like rmap_walk, but caller holds relevant rmap lock */
2507 void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc)
2508 {
2509 	/* no ksm support for now */
2510 	VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio);
2511 	if (folio_test_anon(folio))
2512 		rmap_walk_anon(folio, rwc, true);
2513 	else
2514 		rmap_walk_file(folio, rwc, true);
2515 }
2516 
2517 #ifdef CONFIG_HUGETLB_PAGE
2518 /*
2519  * The following two functions are for anonymous (private mapped) hugepages.
2520  * Unlike common anonymous pages, anonymous hugepages have no accounting code
2521  * and no lru code, because we handle hugepages differently from common pages.
2522  *
2523  * RMAP_COMPOUND is ignored.
2524  */
2525 void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
2526 			    unsigned long address, rmap_t flags)
2527 {
2528 	struct anon_vma *anon_vma = vma->anon_vma;
2529 	int first;
2530 
2531 	BUG_ON(!PageLocked(page));
2532 	BUG_ON(!anon_vma);
2533 	/* address might be in next vma when migration races vma_adjust */
2534 	first = atomic_inc_and_test(compound_mapcount_ptr(page));
2535 	VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page);
2536 	VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page);
2537 	if (first)
2538 		__page_set_anon_rmap(page, vma, address,
2539 				     !!(flags & RMAP_EXCLUSIVE));
2540 }
2541 
2542 void hugepage_add_new_anon_rmap(struct page *page,
2543 			struct vm_area_struct *vma, unsigned long address)
2544 {
2545 	BUG_ON(address < vma->vm_start || address >= vma->vm_end);
2546 	atomic_set(compound_mapcount_ptr(page), 0);
2547 	atomic_set(compound_pincount_ptr(page), 0);
2548 
2549 	__page_set_anon_rmap(page, vma, address, 1);
2550 }
2551 #endif /* CONFIG_HUGETLB_PAGE */
2552